Files
omnia-langchain/interaction.py

195 lines
5.7 KiB
Python

import logging
from langchain_core.messages import HumanMessage, SystemMessage
from entities import Entity
from llm_runtime import _format_prompt, _normalize_llm_output, llm
from spatial_graph import SpatialGraph
from time_utils import WorldClock, describe_relative_time
from world_architect import invoke_architect, apply_state_delta, WorldState
logger = logging.getLogger(__name__)
def ask_entity(
entity: Entity,
player: Entity,
player_query: str,
world_clock: WorldClock,
location: str,
world_state: WorldState | None = None,
spatial_graph: SpatialGraph | None = None,
):
facts = entity.memory.retrieve(
player_query,
reference_time=world_clock.current_time,
)
recent_context = "\n".join(
[f"{m['role_name']}: {m['content']}" for m in entity.chat_buffer[-5:]]
)
world_time_label = describe_relative_time(
world_clock.get_time_str(),
world_clock.current_time,
prefer_day_part_for_today=True,
)
prompt = [
SystemMessage(content=f"WORLD TIME: {world_time_label}"),
SystemMessage(
content=f"""
### ROLE
You are {entity.name}. Persona: {", ".join(entity.traits)}.
Current Mood: {entity.current_mood}.
Vibe Time: {world_clock.get_vibe()}.
Location: {location}.
### WRITING STYLE RULES
1. NO META-TALK. Never mention "memory," "records," "claims," or "narratives."
2. ACT, DON'T EXPLAIN. If you don't know something, just say "Never heard of it" or "I wasn't there." Do not explain WHY you don't know.
### KNOWLEDGE
MEMORIES: {facts}
RECENT CHAT: {recent_context}
"""
),
HumanMessage(content=f"{player.name} speaks to you: {player_query}"),
]
logger.info("LLM prompt (dialogue):\n%s", _format_prompt(prompt))
response = _normalize_llm_output(llm.invoke(prompt).content)
entity.chat_buffer.append(
{
"role_id": player.entity_id,
"role_name": player.name,
"content": player_query,
}
)
entity.chat_buffer.append(
{
"role_id": entity.entity_id,
"role_name": entity.name,
"content": response,
}
)
player.chat_buffer.append(
{
"role_id": player.entity_id,
"role_name": player.name,
"content": player_query,
}
)
player.chat_buffer.append(
{
"role_id": entity.entity_id,
"role_name": entity.name,
"content": response,
}
)
logger.info("[%s]: %s", entity.name.upper(), response)
# Invoke World Architect to process entity action
if world_state:
logger.info("Invoking World Architect for action processing...")
state_delta = invoke_architect(
entity_id=entity.entity_id,
action=response,
current_state=world_state.to_dict(),
entity_name=entity.name,
)
if state_delta:
logger.info("Applying state delta to world...")
apply_state_delta(world_state, state_delta)
logger.info("World time now: %s", world_state.world_clock.get_time_str())
else:
logger.info("No state changes from architect")
# Broadcast action through spatial graph
if spatial_graph:
logger.info("Broadcasting action through spatial graph...")
entity_pos = spatial_graph.get_entity_position(entity.entity_id)
if entity_pos:
# Get all perceiving entities
perceptions = spatial_graph.bubble_up_broadcast(
location_id=entity_pos.location_id,
action=response,
actor_id=entity.entity_id,
llm_filter=_portal_filter_llm,
escalation_check=_escalation_check_llm,
)
# Update other entities' memory based on perceptions
logger.info(f"Perception broadcast: {len(perceptions)} entities perceiving")
for perceiver_id, perception in perceptions.items():
if perceiver_id == entity.entity_id:
continue
# Find perceiving entity in current session (would be in entities dict)
if perception.perceivable:
logger.debug(
f"{perceiver_id} perceives: {perception.transformed_action}"
)
else:
logger.warning(f"Entity {entity.entity_id} has no spatial position")
else:
logger.debug("No spatial graph provided, skipping perception broadcast")
def _portal_filter_llm(action, source_id, target_id, portal_conn):
"""
Simple portal filtering based on connection properties.
Can be enhanced with actual LLM calls if needed.
"""
vision = portal_conn.vision_prop
sound = portal_conn.sound_prop
if vision < 1 and sound < 1:
return None
if vision < 2 or sound < 2:
return f"You hear muffled sounds from {source_id}."
if vision < 5 or sound < 5:
words = action.split()
if len(words) > 2:
return f"You hear indistinct sounds from {source_id}..."
return f"You hear from {source_id}: {action}"
# Clear
return action
def _escalation_check_llm(action, source_id, parent_id):
"""
Simple escalation check based on keywords.
Can be enhanced with actual LLM calls if needed.
"""
escalation_keywords = [
"yell",
"scream",
"shout",
"cry",
"crash",
"bang",
"explosion",
"smash",
"break",
"attack",
"fight",
"combat",
"blood",
"murder",
"help",
"alarm",
"emergency",
]
action_lower = action.lower()
return any(kw in action_lower for kw in escalation_keywords)