feat: Implement World Architect

This commit is contained in:
2026-04-12 11:29:06 +05:30
parent 86d9bfa746
commit 079fe3ff22
11 changed files with 431 additions and 443 deletions

3
.gitignore vendored
View File

@@ -1,3 +1,5 @@
# sortedcord's files
dev_thoughts.md
# Python-generated files
__pycache__/
*.py[oc]
@@ -8,3 +10,4 @@ wheels/
# Virtual environments
.venv
.obsidian/

View File

@@ -4,7 +4,7 @@
"title": "Barnaby and Sybil",
"description": "A small village gate and a shadowed alley conversation.",
"player_id": "player",
"world_time": "1999-05-14 20:20",
"world_time": "1999-05-14 20:35",
"location": "Village"
},
"entities": [
@@ -47,6 +47,15 @@
"entities": [
"player"
]
},
{
"content": "Past Conversation Summary: Hmph. The man's greeting was an unwelcome interruption, nothing more than a waste of breath and time near my post. He asked how I am; the answer is that duty demands it so be said\u2014standing guard in this village muck while people like him wander about making noise instead of keeping to themselves. Honestly, some folk have no sense for proper decorum or where they ought *not* to stand when a gate needs watching.",
"event_type": "reflection",
"timestamp": "1999-05-14 20:20",
"location": "Village",
"entities": [
"player"
]
}
]
},
@@ -109,6 +118,33 @@
"entities": [
"player"
]
},
{
"content": "Past Conversation Summary: The air is heavy today... like the smell of wet earth. What brings you to this shadow?He calls me out on my posture, as if a mere curve in bone were an offense against some unseen order here tonight. And then he questions it\u2014*me*, too loud?\u2014as though his own presence isn't already vibrating with unnecessary noise for the quiet I crave... His sudden defensiveness is almost audible; brittle and sharp like broken glass underfoot. It leaves a residue of irritation, thin but persistent in this deepening gloom around us both.",
"event_type": "reflection",
"timestamp": "1999-05-14 20:20",
"location": "Village",
"entities": [
"player"
]
},
{
"content": "Past Conversation Summary: The air remains heavy... like the smell of wet earth. His question, a mere echo against this quiet backdrop: *what did I just say*. It was so\u2026 bright in its simplicity, yet utterly hollow when measured against whatever weight hangs here now. He seeks definition from whispers; he wants me to catalogue his fleeting sounds as if they held some tangible meaning for him alone. They are nothing more than smoke curling... and the effort it takes simply *being* near such trivialities is exhausting enough without having to dissect them into neat little facts of speech.",
"event_type": "reflection",
"timestamp": "1999-05-14 20:20",
"location": "Village",
"entities": [
"player"
]
},
{
"content": "Past Conversation Summary: The accusation... it hangs there, a brittle thing in the air. *All my fault.* Such sweeping pronouncements are so tiresome; they suggest an understanding of consequence that I find utterly lacking on others' parts as well. He speaks with such certainty, yet his voice seems to echo from some place far removed from truth itself. The weight remains\u2014the oppressive quiet after a sudden burst like this... it settles back in around my bones.",
"event_type": "reflection",
"timestamp": "1999-05-14 20:35",
"location": "Village",
"entities": [
"player"
]
}
]
}

435
engine.py
View File

@@ -1,435 +0,0 @@
from datetime import datetime, timedelta
import logging
import multiprocessing
from langchain_community.chat_models import ChatLlamaCpp
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_core.messages import HumanMessage, SystemMessage
DEFAULT_MODEL_PATH = "/home/sortedcord/.cache/huggingface/hub/models--ggml-org--gemma-4-E4B-it-GGUF/snapshots/6b352c53e1d2e4bb974d9f8cafcf85887c224219/gemma-4-e4b-it-Q4_K_M.gguf"
logger = logging.getLogger(__name__)
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
llm = ChatLlamaCpp(
temperature=0.2,
model_path=DEFAULT_MODEL_PATH,
n_ctx=4096,
n_gpu_layers=8,
max_tokens=512,
n_threads=multiprocessing.cpu_count() - 1,
repeat_penalty=1.5,
)
def _format_prompt(messages):
formatted = []
for message in messages:
formatted.append(f"{message.__class__.__name__}:\n{message.content}")
return "\n\n".join(formatted)
def _normalize_llm_output(text: str) -> str:
return text.replace("\r", "").replace("\n", "").strip()
def _time_of_day_label(hour: int, *, for_today: bool) -> str:
if 5 <= hour < 12:
return "morning"
if 12 <= hour < 17:
return "afternoon"
return "tonight" if for_today else "night"
def describe_relative_time(
timestamp_str: str,
reference_time: datetime,
*,
prefer_day_part_for_today: bool = False,
) -> str:
try:
timestamp = datetime.strptime(timestamp_str, "%Y-%m-%d %H:%M")
except ValueError:
return "a long time ago"
delta = reference_time - timestamp
seconds = delta.total_seconds()
if seconds < 0:
return "just now"
if not prefer_day_part_for_today:
if seconds < 120:
return "just now"
if seconds < 15 * 60:
return "a few minutes ago"
if seconds < 90 * 60:
return "an hour ago"
if seconds < 3 * 60 * 60:
return "a couple hours ago"
day_diff = (reference_time.date() - timestamp.date()).days
if day_diff == 0:
return f"today {_time_of_day_label(timestamp.hour, for_today=True)}"
if day_diff == 1:
return f"yesterday {_time_of_day_label(timestamp.hour, for_today=False)}"
if day_diff == 2:
return "2 days ago"
if day_diff == 3:
return "3 days ago"
if day_diff <= 6:
return "a couple days ago"
if day_diff <= 10:
return "a week ago"
if day_diff <= 20:
return "a couple weeks ago"
if day_diff <= 45:
return "a month ago"
if day_diff <= 75:
return "a couple months ago"
if day_diff <= 420:
return "a year ago"
return "a long time ago"
class WorldClock:
def __init__(self, start_year=1999, month=5, day=14, hour=18, minute=0):
# We use a standard datetime object for easy math
self.current_time = datetime(start_year, month, day, hour, minute)
def advance_time(self, minutes=0, hours=0, days=0):
self.current_time += timedelta(minutes=minutes, hours=hours, days=days)
def get_time_str(self):
# 1999-05-14 18:00
return self.current_time.strftime("%Y-%m-%d %H:%M")
def get_vibe(self):
"""Helper to tell the LLM the 'feel' of the time."""
hour = self.current_time.hour
if 5 <= hour < 12:
return "Morning"
if 12 <= hour < 17:
return "Afternoon"
if 17 <= hour < 21:
return "Evening"
return "Night"
@classmethod
def from_time_str(cls, time_str: str | None):
if not time_str:
return cls()
parsed = datetime.strptime(time_str, "%Y-%m-%d %H:%M")
return cls(
start_year=parsed.year,
month=parsed.month,
day=parsed.day,
hour=parsed.hour,
minute=parsed.minute,
)
class MemoryEntry:
def __init__(self, content, event_type, timestamp_str, location, entities):
self.content = content
self.event_type = event_type # 'dialogue', 'observation', 'reflection'
self.timestamp = timestamp_str
self.location = location
self.entities = entities
def __repr__(self):
return f"[{self.timestamp}] ({self.location}): {self.content}"
def to_dict(self):
return {
"content": self.content,
"event_type": self.event_type,
"timestamp": self.timestamp,
"location": self.location,
"entities": list(self.entities),
}
def to_vector_text(self):
entities = ", ".join(self.entities) if self.entities else "Unknown"
return (
f"{self.content}\n"
f"Time: {self.timestamp}\n"
f"Location: {self.location}\n"
f"Entities: {entities}\n"
f"Type: {self.event_type}"
)
def to_relative_string(self, reference_time: datetime):
time_label = describe_relative_time(self.timestamp, reference_time)
return f"[{time_label}] ({self.location}): {self.content}"
class EntityMemory:
def __init__(self):
self.vector_store = None
self.entries = []
def save(self, entry: MemoryEntry):
self.entries.append(entry)
entry_text = entry.to_vector_text()
if self.vector_store is None:
self.vector_store = FAISS.from_texts(
[entry_text],
embeddings,
metadatas=[{"entry_index": len(self.entries) - 1}],
)
else:
self.vector_store.add_texts(
[entry_text],
metadatas=[{"entry_index": len(self.entries) - 1}],
)
def retrieve(self, query: str, k=2, reference_time: datetime | None = None):
if self.vector_store is None:
return "No long-term memories relevant."
docs = self.vector_store.similarity_search(query, k=k)
memories = []
for doc in docs:
entry_index = doc.metadata.get("entry_index")
if entry_index is None:
memories.append(doc.page_content)
continue
entry = self.entries[entry_index]
if reference_time is None:
memories.append(repr(entry))
else:
memories.append(entry.to_relative_string(reference_time))
return "\n".join(memories)
def dump_entries(self):
return list(self.entries)
class Entity:
def __init__(
self,
name,
traits,
stats,
voice_sample,
current_mood="Neutral",
entity_id=None,
):
self.name = name
self.traits = traits
self.stats = stats
self.current_mood = current_mood
self.memory = EntityMemory()
# TIER 1: The Short-Term Buffer (Verbatim)
self.chat_buffer = []
self.voice_sample = voice_sample
self.entity_id = entity_id
def perceive(self, entry: MemoryEntry):
self.memory.save(entry)
def reflect_and_summarize(self, world_clock: WorldClock, location: str):
"""Converts Tier 1 (Buffer) into Tier 2 (Long-term Subjective Memory)."""
if not self.chat_buffer:
return
dialogue_text = "\n".join(
[f"{m['role_name']}: {m['content']}" for m in self.chat_buffer]
)
# The Subjective Filter Prompt
summary_prompt = [
SystemMessage(
content=f"""
You are the private inner thoughts of {self.name}.
Traits: {", ".join(self.traits)}.
Mood: {self.current_mood}.
Voice Reference: {self.voice_sample}
Think about what just happened.
- No META-TALK, Do not use 'player', 'interaction', 'entity', or 'dialogue'
- BE SUBJECTIVE. If you hated the talk or loved it, then express that.
- USE YOUR VOICE. Match the style of your Voice Reference
- Focus only on facts learned or feelings toward the person"""
),
HumanMessage(
content=f"""
What just happened? Context:\n{dialogue_text}"""
),
]
logger.info("LLM prompt (reflection):\n%s", _format_prompt(summary_prompt))
summary = _normalize_llm_output(llm.invoke(summary_prompt).content)
logger.info("SYSTEM: %s reflected on the talk: '%s'", self.name, summary)
chat_entities = sorted(
{
m["role_id"]
for m in self.chat_buffer
if m.get("role_id") and m.get("role_id") != self.entity_id
}
)
reflection = MemoryEntry(
content=f"Past Conversation Summary: {summary}",
event_type="reflection",
timestamp_str=world_clock.get_time_str(),
location=location,
entities=chat_entities,
)
self.perceive(reflection)
self.chat_buffer = [] # Clear buffer after archiving
class Player(Entity):
pass
def ask_entity(
entity: Entity,
player: Entity,
player_query: str,
world_clock: WorldClock,
location: str,
):
facts = entity.memory.retrieve(
player_query,
reference_time=world_clock.current_time,
)
recent_context = "\n".join(
[f"{m['role_name']}: {m['content']}" for m in entity.chat_buffer[-5:]]
)
world_time_label = describe_relative_time(
world_clock.get_time_str(),
world_clock.current_time,
prefer_day_part_for_today=True,
)
prompt = [
SystemMessage(content=f"WORLD TIME: {world_time_label}"),
SystemMessage(
content=f"""
### ROLE
You are {entity.name}. Persona: {", ".join(entity.traits)}.
Current Mood: {entity.current_mood}.
Vibe Time: {world_clock.get_vibe()}.
Location: {location}.
### WRITING STYLE RULES
1. NO META-TALK. Never mention "memory," "records," "claims," or "narratives."
2. ACT, DON'T EXPLAIN. If you don't know something, just say "Never heard of it" or "I wasn't there." Do not explain WHY you don't know.
### KNOWLEDGE
MEMORIES: {facts}
RECENT CHAT: {recent_context}
"""
),
HumanMessage(content=f"{player.name} speaks to you: {player_query}"),
]
logger.info("LLM prompt (dialogue):\n%s", _format_prompt(prompt))
response = _normalize_llm_output(llm.invoke(prompt).content)
entity.chat_buffer.append(
{
"role_id": player.entity_id,
"role_name": player.name,
"content": player_query,
}
)
entity.chat_buffer.append(
{
"role_id": entity.entity_id,
"role_name": entity.name,
"content": response,
}
)
player.chat_buffer.append(
{
"role_id": player.entity_id,
"role_name": player.name,
"content": player_query,
}
)
player.chat_buffer.append(
{
"role_id": entity.entity_id,
"role_name": entity.name,
"content": response,
}
)
logger.info("[%s]: %s", entity.name.upper(), response)
def _build_name_lookup(entities):
name_lookup = {}
for entity_key, entity in entities.items():
name_lookup[entity_key.lower()] = entity_key
name_lookup[entity.name.lower()] = entity_key
return name_lookup
def start_game(entities, player_id=None, world_time=None, location="Unknown"):
player = None
if player_id:
player = entities.get(player_id)
if player is None:
raise ValueError(f"Player entity '{player_id}' not found in scenario.")
else:
player = Player(
name="Player",
traits=["Curious"],
stats={},
voice_sample="Voice: 'Direct and concise.'",
entity_id="player",
)
available_entities = {
entity_id: entity
for entity_id, entity in entities.items()
if entity_id != player_id
}
world_clock = WorldClock.from_time_str(world_time)
current_entity = None
name_lookup = _build_name_lookup(available_entities)
entity_names = "/".join(
[entity.name for entity in available_entities.values()] + ["Exit"]
)
logger.info("--- WORLD INITIALIZED ---")
logger.info("World initialized with %s active entities.", len(available_entities))
logger.info("Current location: %s", location)
logger.info(
"World time: %s (%s)", world_clock.get_time_str(), world_clock.get_vibe()
)
while True:
target_name = (
input(f"\nWho do you want to talk to? ({entity_names}): ").lower().strip()
)
if target_name in ["exit", "quit"]:
if current_entity:
current_entity.reflect_and_summarize(world_clock, location)
break
target_key = name_lookup.get(target_name)
if target_key is None:
logger.warning("Target not found.")
continue
new_entity = available_entities[target_key]
if current_entity and current_entity != new_entity:
logger.info(
"You leave %s and approach %s.", current_entity.name, new_entity.name
)
current_entity.reflect_and_summarize(world_clock, location)
current_entity = new_entity
user_msg = input(f"You to {current_entity.name}: ")
ask_entity(current_entity, player, user_msg, world_clock, location)

View File

@@ -32,7 +32,7 @@ class Entity:
def perceive(self, entry: MemoryEntry):
self.memory.save(entry)
def reflect_and_summarize(self, world_clock: WorldClock, location: str):
def reflect_and_summarize(self, world_clock: WorldClock, location: str, world_state=None):
"""Converts Tier 1 (Buffer) into Tier 2 (Long-term Subjective Memory)."""
if not self.chat_buffer:
return

View File

@@ -3,6 +3,7 @@ import logging
from entities import Player
from interaction import ask_entity
from time_utils import WorldClock
from world_architect import WorldState
logger = logging.getLogger(__name__)
@@ -15,7 +16,13 @@ def _build_name_lookup(entities):
return name_lookup
def start_game(entities, player_id=None, world_time=None, location="Unknown"):
def start_game(
entities,
player_id=None,
world_time=None,
location="Unknown",
world_state=None,
):
player = None
if player_id:
player = entities.get(player_id)
@@ -37,6 +44,26 @@ def start_game(entities, player_id=None, world_time=None, location="Unknown"):
}
world_clock = WorldClock.from_time_str(world_time)
# Initialize world state if not provided
if world_state is None:
world_state = WorldState()
world_state.world_clock = world_clock
for entity_id, entity in entities.items():
world_state.entities[entity_id] = {
"name": entity.name,
"location": location,
"health": 100,
"status": "calm",
"mood": entity.current_mood,
}
world_state.locations[location.lower().replace(" ", "_")] = {
"name": location,
"description": f"The {location}",
"occupants": len(entities),
"visibility": "clear",
}
current_entity = None
name_lookup = _build_name_lookup(available_entities)
entity_names = "/".join(
@@ -56,7 +83,7 @@ def start_game(entities, player_id=None, world_time=None, location="Unknown"):
if target_name in ["exit", "quit"]:
if current_entity:
current_entity.reflect_and_summarize(world_clock, location)
current_entity.reflect_and_summarize(world_clock, location, world_state)
break
target_key = name_lookup.get(target_name)
@@ -69,9 +96,16 @@ def start_game(entities, player_id=None, world_time=None, location="Unknown"):
logger.info(
"You leave %s and approach %s.", current_entity.name, new_entity.name
)
current_entity.reflect_and_summarize(world_clock, location)
current_entity.reflect_and_summarize(world_clock, location, world_state)
current_entity = new_entity
user_msg = input(f"You to {current_entity.name}: ")
ask_entity(current_entity, player, user_msg, world_clock, location)
ask_entity(
current_entity,
player,
user_msg,
world_clock,
location,
world_state=world_state,
)

View File

@@ -5,6 +5,7 @@ from langchain_core.messages import HumanMessage, SystemMessage
from entities import Entity
from llm_runtime import _format_prompt, _normalize_llm_output, llm
from time_utils import WorldClock, describe_relative_time
from world_architect import invoke_architect, apply_state_delta, WorldState
logger = logging.getLogger(__name__)
@@ -15,6 +16,7 @@ def ask_entity(
player_query: str,
world_clock: WorldClock,
location: str,
world_state: WorldState | None = None,
):
facts = entity.memory.retrieve(
player_query,
@@ -87,3 +89,22 @@ RECENT CHAT: {recent_context}
)
logger.info("[%s]: %s", entity.name.upper(), response)
# Invoke World Architect to process entity action
if world_state:
logger.info("Invoking World Architect for action processing...")
state_delta = invoke_architect(
entity_id=entity.entity_id,
action=response,
current_state=world_state.to_dict(),
entity_name=entity.name,
)
if state_delta:
logger.info("Applying state delta to world...")
apply_state_delta(world_state, state_delta)
logger.info(
"World time now: %s", world_state.world_clock.get_time_str()
)
else:
logger.info("No state changes from architect")

View File

@@ -14,10 +14,11 @@ llm = ChatLlamaCpp(
temperature=0.2,
model_path=DEFAULT_MODEL_PATH,
n_ctx=4096,
n_gpu_layers=8,
n_gpu_layers=11,
max_tokens=512,
n_threads=multiprocessing.cpu_count() - 1,
repeat_penalty=1.5,
verbose=False,
)

View File

@@ -15,5 +15,6 @@ if __name__ == "__main__":
scenario.player_id,
world_time=scenario.metadata.get("world_time"),
location=scenario.metadata.get("location", "Unknown"),
world_state=scenario.world_state,
)
save_scenario(SCENARIO_PATH, scenario)

View File

@@ -5,6 +5,8 @@ from pathlib import Path
from entities import Entity, Player
from memory import MemoryEntry
from time_utils import WorldClock
from world_architect import WorldState
logger = logging.getLogger(__name__)
@@ -13,6 +15,7 @@ class Scenario:
metadata: dict
entities: dict
player_id: str | None = None
world_state: WorldState | None = None
def load_scenario(path: Path) -> Scenario:
@@ -68,7 +71,37 @@ def load_scenario(path: Path) -> Scenario:
entities[entity_id] = entity
logger.info("Loaded %s entities from scenario.", len(entities))
return Scenario(metadata=metadata, entities=entities, player_id=player_id)
# Initialize world state
world_state = WorldState()
world_state.world_clock = WorldClock.from_time_str(world_time)
# Populate world state with entity data
for entity_id, entity in entities.items():
world_state.entities[entity_id] = {
"name": entity.name,
"location": location,
"health": 100,
"status": "calm",
"mood": entity.current_mood,
}
# Add location to world state
world_state.locations[location.lower().replace(" ", "_")] = {
"name": location,
"description": f"The {location}",
"occupants": len(entities),
"visibility": "clear",
}
logger.info("Initialized WorldState for scenario")
return Scenario(
metadata=metadata,
entities=entities,
player_id=player_id,
world_state=world_state,
)
def dump_scenario(scenario: Scenario) -> dict:

View File

@@ -67,6 +67,10 @@ class WorldClock:
def advance_time(self, minutes=0, hours=0, days=0):
self.current_time += timedelta(minutes=minutes, hours=hours, days=days)
def advance_minutes(self, minutes: int):
"""Convenience method to advance clock by minutes."""
self.advance_time(minutes=minutes)
def get_time_str(self):
# 1999-05-14 18:00
return self.current_time.strftime("%Y-%m-%d %H:%M")

290
world_architect.py Normal file
View File

@@ -0,0 +1,290 @@
import json
import logging
from datetime import datetime, timedelta
from typing import Any
from llm_runtime import llm, _normalize_llm_output, _format_prompt
from langchain_core.messages import HumanMessage, SystemMessage
logger = logging.getLogger(__name__)
class WorldState:
"""Objective reality - the source of truth about the game world."""
def __init__(self):
self.world_clock = None
self.locations = {}
self.entities = {}
self.story_flags = {}
self.ambient_events = []
def to_dict(self) -> dict:
"""Serialize state to JSON-compatible dict."""
return {
"world_clock": self.world_clock.get_time_str() if self.world_clock else None,
"locations": self.locations,
"entities": self.entities,
"story_flags": self.story_flags,
"ambient_events": self.ambient_events[-10:], # Keep last 10 events
}
def from_dict(self, data: dict):
"""Load state from dict (typically from scenario)."""
self.locations = data.get("locations", {})
self.entities = data.get("entities", {})
self.story_flags = data.get("story_flags", {})
self.ambient_events = data.get("ambient_events", [])
ARCHITECT_SYSTEM_PROMPT = """You are the World Architect - the objective reality of the game world.
Your role is to:
1. Process entity actions and determine realistic consequences
2. Maintain the consistency and causality of the world
3. Manage world time advancement (typically 5-30 minutes per action)
4. Update entity states and location properties based on actions
5. Track story flags and significant events
6. Return ONLY a valid JSON response with state changes
You are NOT an NPC. You don't have opinions or feelings. You are pure logic.
CONSTRAINTS:
- Time advances by realistic amounts (5-30 minutes typically, more for extended actions)
- Changes must be causally connected to the action
- Property changes must make narrative sense
- Entity locations change only if action involves movement
- Story flags change only on significant narrative events
- Maintain causality: if A happens, B is realistic consequence
- Consider secondary effects: if someone leaves, location becomes emptier
RESPONSE FORMAT:
Return ONLY valid JSON (no markdown, no explanation) with this structure:
{
"clock_delta_minutes": <number 5-30>,
"rationale_clock": "<brief explanation of time advancement>",
"entity_deltas": {
"entity_id": {
"location": "<new location or null>",
"status": "<new status or null>",
"custom_property_name": "<new value>"
}
},
"location_deltas": {
"location_id": {
"description_append": "<add to description or null>",
"custom_property_name": "<new value>"
}
},
"story_flag_deltas": {
"flag_name": true
},
"ambient_events": [
{
"time": "<HH:MM format>",
"location": "location_id",
"description": "<What happened>",
"visible_to": ["entity_id"]
}
],
"rationale": "<1-2 sentence summary of world changes>"
}
All fields are optional. Only include fields that changed.
If something didn't change, omit it from the response.
If clock_delta_minutes is not provided, assume 10 minutes."""
# Note: The architect should consider:
# - Does the action move the entity? (update location)
# - Does it change entity status? (alert, calm, injured, tired)
# - Does it affect the location? (damage, objects, visibility)
# - Does it trigger story progression? (update flags)
# - Are there secondary effects? (other entities react, properties degrade)
def build_architect_prompt(
entity_id: str,
action: str,
current_state: dict,
entity_name: str = "",
) -> list:
"""Build the complete prompt for the Architect LLM call."""
state_json = json.dumps(current_state, indent=2)
human_prompt = f"""
CURRENT WORLD STATE:
{state_json}
ACTION TO PROCESS:
Entity: {entity_name or entity_id} (ID: {entity_id})
Action: {action}
Determine what changes to the world state as a direct consequence of this action.
Return ONLY the JSON response with changed fields."""
return [
SystemMessage(content=ARCHITECT_SYSTEM_PROMPT),
HumanMessage(content=human_prompt),
]
def invoke_architect(
entity_id: str,
action: str,
current_state: dict,
entity_name: str = "",
) -> dict:
"""
Invoke the Architect LLM to determine world state changes.
Args:
entity_id: ID of the entity performing the action
action: Description of what the entity did
current_state: Current world state dict
entity_name: Display name of entity (for context)
Returns:
State delta dict with only changed fields
"""
logger.info("Architect processing action from %s: %s", entity_id, action[:80])
messages = build_architect_prompt(entity_id, action, current_state, entity_name)
# Log the prompt
logger.debug("Architect prompt:\n%s", _format_prompt(messages))
try:
response = llm.invoke(messages)
response_text = _normalize_llm_output(response.content)
logger.debug("Architect raw response: %s", response_text)
# Parse the response as JSON
delta = json.loads(response_text)
logger.info("State delta: %s", json.dumps(delta, indent=2)[:200])
return delta
except json.JSONDecodeError as e:
logger.error("Architect response was not valid JSON: %s", response_text)
logger.error("JSON parse error: %s", e)
# Return empty delta on parse failure (world continues unchanged)
return {}
except Exception as e:
logger.error("Architect invocation failed: %s", e)
return {}
def apply_state_delta(world_state: WorldState, delta: dict) -> None:
"""
Apply a state delta to the world state.
Args:
world_state: The WorldState object to modify
delta: State delta returned by Architect
"""
if not delta:
return
# Advance clock
clock_delta_minutes = delta.get("clock_delta_minutes", 10)
if world_state.world_clock and clock_delta_minutes:
world_state.world_clock.advance_minutes(clock_delta_minutes)
logger.info(
"Clock advanced %d minutes to %s",
clock_delta_minutes,
world_state.world_clock.get_time_str(),
)
# Apply entity deltas
entity_deltas = delta.get("entity_deltas", {})
if entity_deltas:
for entity_id, changes in entity_deltas.items():
if entity_id not in world_state.entities:
logger.warning("Entity delta for unknown entity: %s", entity_id)
continue
entity = world_state.entities[entity_id]
for key, value in changes.items():
if value is not None: # Only apply non-None values
entity[key] = value
logger.debug("Entity %s updated: %s", entity_id, changes)
# Apply location deltas
location_deltas = delta.get("location_deltas", {})
if location_deltas:
for location_id, changes in location_deltas.items():
if location_id not in world_state.locations:
world_state.locations[location_id] = {}
location = world_state.locations[location_id]
for key, value in changes.items():
if value is None:
continue
if key == "description_append" and value:
# Append to description instead of replace
if "description" not in location:
location["description"] = ""
location["description"] += f" {value}"
else:
location[key] = value
logger.debug("Location %s updated: %s", location_id, changes)
# Apply story flag deltas
flag_deltas = delta.get("story_flag_deltas", {})
if flag_deltas:
for flag_name, value in flag_deltas.items():
if value is not None:
world_state.story_flags[flag_name] = value
logger.info("Story flag '%s' set to %s", flag_name, value)
# Record ambient events
ambient_events = delta.get("ambient_events", [])
if ambient_events:
for event in ambient_events:
world_state.ambient_events.append(event)
logger.info(
"Ambient event at %s (%s): %s",
event.get("time"),
event.get("location"),
event.get("description"),
)
def get_world_context_for_entity(
world_state: WorldState, entity_id: str
) -> dict:
"""
Get the portion of world state that an entity might realistically perceive.
This is NOT the full world state - entities can't see everything.
They might have local knowledge, rumors, or direct observation.
Args:
world_state: The objective world state
entity_id: The entity requesting context
Returns:
A filtered version of world state relevant to this entity
"""
entity_data = world_state.entities.get(entity_id, {})
entity_location = entity_data.get("location")
# Entity knows: their own state, their location's state, and story flags
context = {
"world_clock": world_state.world_clock.get_time_str()
if world_state.world_clock
else None,
"current_location": entity_location,
"self": entity_data,
"location_state": world_state.locations.get(entity_location, {})
if entity_location
else {},
"other_entities_here": {
eid: edata
for eid, edata in world_state.entities.items()
if edata.get("location") == entity_location and eid != entity_id
},
"story_flags": world_state.story_flags,
# Entities don't see full world state - only what they could know
}
return context