Files
omnia-langchain/main.py

93 lines
3.1 KiB
Python

import multiprocessing
from langchain_community.embeddings import HuggingFaceEmbeddings
from langchain_community.vectorstores import FAISS
from langchain_community.chat_models import ChatLlamaCpp
from langchain_core.messages import SystemMessage, HumanMessage
local_model = "/home/sortedcord/.cache/huggingface/hub/models--ggml-org--gemma-4-E4B-it-GGUF/snapshots/6b352c53e1d2e4bb974d9f8cafcf85887c224219/gemma-4-e4b-it-Q4_K_M.gguf"
llm = ChatLlamaCpp(
temperature=0.2, # Lower temperature for consistency in logic
model_path=local_model,
n_ctx=4096,
n_gpu_layers=8,
max_tokens=256,
n_threads=multiprocessing.cpu_count() - 1,
repeat_penalty=1.2,
)
embeddings = HuggingFaceEmbeddings(model_name="all-MiniLM-L6-v2")
class GameWorld:
def __init__(self):
self.global_state = {
"murderer": "The Bard",
"weapon": "Poisoned Lute String",
"location": "The Blue Tavern",
"body_discovered": False,
}
self.npc_memories = {}
def add_npc_memory(self, npc_name, observation):
"""Injects a specific fact into an NPC's subjective reality."""
if npc_name not in self.npc_memories:
self.npc_memories[npc_name] = FAISS.from_texts([observation], embeddings)
else:
self.npc_memories[npc_name].add_texts([observation])
def get_npc_context(self, npc_name, query):
"""Retrieves only what the NPC knows regarding a query."""
if npc_name not in self.npc_memories:
return "I don't know anything about that."
docs = self.npc_memories[npc_name].similarity_search(query, k=2)
return " ".join([d.page_content for d in docs])
world = GameWorld()
# THE TRUTH: The Bard killed the Merchant.
# NPC "Guard Barnaby" only saw the Merchant enter the tavern.
world.add_npc_memory(
"Barnaby", "I saw the Merchant enter the Blue Tavern at sunset. He looked happy."
)
world.add_npc_memory(
"Barnaby", "The Bard was tuning his instrument near the fireplace."
)
world.add_npc_memory(
"Sybil", "I smelled bitter almonds (poison) coming from the Bard's bag."
)
def ask_npc(npc_name, player_query):
# Retrieve ONLY this NPC's memories
subjective_knowledge = world.get_npc_context(npc_name, player_query)
prompt = [
SystemMessage(
content=f"""
You are {npc_name}, a character in a fantasy world.
Strict Rule: You ONLY know what is in your 'Memory' block.
If the information isn't there, you must honestly say you don't know or speculate based ONLY on your memory.
Do not use outside knowledge.
Your Memory: {subjective_knowledge}
"""
),
HumanMessage(content=player_query),
]
response = llm.invoke(prompt)
print(f"\n--- {npc_name.upper()} ---")
print(f"Player: {player_query}")
print(f"Response: {response.content.strip()}")
# Ask the Guard about the murder (He shouldn't know it happened)
ask_npc("Barnaby", "Did you see Bard?")
# Ask the Witch about the Bard (She has a suspicious clue)
ask_npc("Sybil", "Do you know anything about bard? Did you see him?")