diff --git a/unfold_studio/static/player.js b/unfold_studio/static/player.js index 33992d5..b137fa0 100644 --- a/unfold_studio/static/player.js +++ b/unfold_studio/static/player.js @@ -14,6 +14,10 @@ function InkPlayer(containerSelector) { this.currentStoryPoint = 0; this.aiSeed = null; this.generateInProgress = false; + + this.agentFunctionCalled = false; + this.currentAgentCharacter = null; + this.currentAgentTarget = null; } InkPlayer.prototype = { @@ -50,6 +54,21 @@ InkPlayer.prototype = { this.scheduleInputBoxForContinue() return ''; }.bind(this)); + +//AGENT + + story.BindExternalFunction("agent_call", function(characterKnot, targetKnot) { + var characterKnotName = characterKnot._componentsString; + var targetKnotName = targetKnot._componentsString; + this.agentFunctionCalled = true; + this.currentAgentCharacter = characterKnotName; + this.currentAgentTarget = targetKnotName; + + this.scheduleAgentInputBox("Talk to " + this.currentAgentCharacter); + return ''; + }.bind(this)); + + story.BindExternalFunction("input", function (placeholder = "Enter text...", variableName) { this.inputFunctionCalled = true; this.scheduleInputBox(placeholder, variableName); @@ -58,7 +77,7 @@ InkPlayer.prototype = { - + // TODO: There is a race condition here: the ajax query is sent off // with a callback for when it returns. Meanwhile, a temporary span // is created with text "Loading..." and a unique ID. Once the query @@ -150,6 +169,12 @@ InkPlayer.prototype = { this.continueFunctionCalled = false; return; } + + if (this.agentFunctionCalled) { + this.agentFunctionCalled = false; + this.events.renderScheduledInputBox.bind(this)(); + return; + } if (this.generateInProgress) { await this.generateAndInsertInDOM(this.generatePrompt); } @@ -257,6 +282,28 @@ InkPlayer.prototype = { ); this.inputBoxToInsert = formContainer; }, + + scheduleAgentInputBox: function(placeholder = "Enter text....") { + const eventHandler = (userInput) => { + this.createStoryPlayRecord( + this.getStoryPlayInstanceUUID(), + "READERS_AGENT_ENTERED_TEXT", + userInput + ); + + this.handleUserInputForAgent(userInput); + }; + + const formContainer = this.createInputForm( + "AUTHORS_AGENT_INPUT_BOX", + eventHandler, + placeholder + ); + + this.inputBoxToInsert = formContainer; + this.events.renderScheduledInputBox.bind(this)(); + }, + createInputForm: function(formType, eventHandler, placeholder, variableName=null) { const formContainer = document.createElement("div"); formContainer.classList.add("input-container"); @@ -349,6 +396,88 @@ InkPlayer.prototype = { break; } }, + + // AGENT HANDLING INPUT + handleUserInputForAgent: async function(userInput) { + try { + const response = await $.ajax("/agent/", { + beforeSend: function(xhr) { + xhr.setRequestHeader("X-CSRFToken", CSRF); + }, + method: "POST", + data: JSON.stringify({ + user_input: userInput, + story_play_instance_uuid: this.getStoryPlayInstanceUUID(), + character_knot_name: this.currentAgentCharacter, + target_knot_name: this.currentAgentTarget, + ai_seed: this.aiSeed + }), + contentType: "application/json" + }); + + const agentResult = response.result || {}; + const characterText = agentResult.character_text || ""; + const decision = agentResult.continue_decision || {}; + const direction = decision.direction || "NEEDS_INPUT"; + const content = decision.content || {}; + + if (characterText) { + this.events.addContent.bind(this)({ + text: characterText, + tags: ["agent"] + }); + } + + switch (direction) { + case "NEEDS_INPUT": + this.scheduleAgentInputBox(); + this.events.renderScheduledInputBox.bind(this)(); + break; + + case "INVALID_USER_INPUT": + this.scheduleAgentInputBox("Input was not valid... Try again"); + this.events.renderScheduledInputBox.bind(this)(); + break; + + case "BRIDGE_AND_CONTINUE": + if (content.bridge_text) { + this.events.addContent.bind(this)({ + text: content.bridge_text, + tags: ["bridge"] + }); + + this.createStoryPlayRecord( + this.getStoryPlayInstanceUUID(), + "AI_GENERATED_TEXT", + content.bridge_text + ); + } + this.continueStory(); + break; + + case "DIRECT_CONTINUE": + this.continueStory(); + break; + + default: + console.error("Unexpected agent direction:", direction, decision); + this.scheduleAgentInputBox("Something went wrong — try again"); + this.events.renderScheduledInputBox.bind(this)(); + break; + } + } catch (err) { + console.error("Agent request failed:", err); + + this.events.addContent.bind(this)({ + text: "Connection issue talking to the character. Please try again.", + tags: ["agent"] + }); + + this.scheduleAgentInputBox("Try again..."); + this.events.renderScheduledInputBox.bind(this)(); + } + }, + getStoryPlayInstanceUUID: function() { return this.storyPlayInstanceUUID; }, diff --git a/unfold_studio/text_generation/backends.py b/unfold_studio/text_generation/backends.py index 057f3c3..3ce8c5b 100644 --- a/unfold_studio/text_generation/backends.py +++ b/unfold_studio/text_generation/backends.py @@ -104,6 +104,7 @@ def _create_chat_completion(self, messages, seed, hit_cache=True, **extra_api_pa return result except APIError as err: log.error("Error calling OpenAI", error=str(err)) + print("OPENAI APIError:", repr(err)) return "...error generating text..." def generate(self, prompt, context_array, seed, hit_cache=True): @@ -114,17 +115,20 @@ def generate(self, prompt, context_array, seed, hit_cache=True): hit_cache=hit_cache ) - def get_ai_response_by_system_and_user_prompt(self, system_prompt, user_prompt, seed, hit_cache=True): + def get_ai_response_by_system_and_user_prompt(self, system_prompt, user_prompt, seed, hit_cache=True, force_json=False): messages = [ {"role": "system", "content": system_prompt}, {"role": "user", "content": user_prompt} ] - return self._create_chat_completion( - messages=messages, - seed=seed, - hit_cache=hit_cache, - response_format={"type": "json_object"} - ) + kwargs = { + "hit_cache": hit_cache, + "seed": seed, + "messages": messages, + } + if force_json: + kwargs["response_format"] = {"type": "json_object"} + + return self._create_chat_completion(**kwargs) diff --git a/unfold_studio/text_generation/constants.py b/unfold_studio/text_generation/constants.py index f01a9a9..1dc7b8e 100644 --- a/unfold_studio/text_generation/constants.py +++ b/unfold_studio/text_generation/constants.py @@ -141,4 +141,19 @@ class StoryContinueDirections(BaseConstant): { "score": 1-5, "reason": "detailed analysis" -}""" \ No newline at end of file +}""" + +AGENT_SYSTEM_PROMPT = """ +You are an in-story character speaking to the reader. You're job is to stickly stay in character and +give no spoilers to the reader. Keep replies concise (about one short paragraph). If the reader asks +questions or says anything unrelated, respond in character and bring the conversation back to the topic. +(For system compliance: output is not json.) After answering, ask a question that encourages the reader to take an action in the scene. +""" + +AGENT_USER_PROMPT_TEMPLATE = """ +### Character voice ### +Character knot: %(character_knot)s +History: %(history)s +User Input: %(user_input)s + +""" \ No newline at end of file diff --git a/unfold_studio/text_generation/urls.py b/unfold_studio/text_generation/urls.py index 727ab2b..df6ca84 100644 --- a/unfold_studio/text_generation/urls.py +++ b/unfold_studio/text_generation/urls.py @@ -2,7 +2,8 @@ from . import views urlpatterns = [ - path('generate', views.GenerateTextView.as_view(), name="generate"), - path('get_next_direction', views.GetNextDirectionView.as_view(), name="get_next_direction") + path('generate/', views.GenerateTextView.as_view(), name="generate"), + path('get_next_direction/', views.GetNextDirectionView.as_view(), name="get_next_direction"), + path('agent/', views.AgentView.as_view(), name="agent") ] diff --git a/unfold_studio/text_generation/views.py b/unfold_studio/text_generation/views.py index 0f9bd2c..e818b0b 100644 --- a/unfold_studio/text_generation/views.py +++ b/unfold_studio/text_generation/views.py @@ -6,6 +6,10 @@ from .models import StoryTransitionRecord from .services.unfold_studio import UnfoldStudioService from .constants import (StoryContinueDirections, CONTINUE_STORY_SYSTEM_PROMPT, CONTINUE_STORY_USER_PROMPT_TEMPLATE) +from .constants import AGENT_SYSTEM_PROMPT, AGENT_USER_PROMPT_TEMPLATE + +from django.utils.decorators import method_decorator +from django.views.decorators.csrf import csrf_exempt class GenerateTextView(BaseView): @@ -131,7 +135,7 @@ def get_next_direction_details_for_story(self, target_knot_data, story_history, backend = TextGenerationFactory.create(backend_config) system_prompt, user_prompt = self.build_system_and_user_prompt(target_knot_data, story_history, user_input) - response = backend.get_ai_response_by_system_and_user_prompt(system_prompt, user_prompt, seed, hit_cache=True) + response = backend.get_ai_response_by_system_and_user_prompt(system_prompt, user_prompt, seed, hit_cache=True,force_json=True) #print(response) parsed_response = self.parse_and_validate_ai_response(response) @@ -195,3 +199,122 @@ def post(self, request): except Exception as e: print(str(e)) return JsonResponse({"error": str(e)}, status=500) + +@method_decorator(csrf_exempt, name="dispatch") +class AgentView(BaseView): + + def validate_request(self, request_body): + required_fields = ['user_input', 'character_knot_name', 'target_knot_name', 'story_play_instance_uuid'] + for field in required_fields: + if not request_body.get(field): + return False, f"Missing required field: {field}" + return True, None + + def generate_character_text(self, character_knot_data, story_history, user_input, seed): + backend = TextGenerationFactory.create(settings.TEXT_GENERATION) + system_prompt, user_prompt = self.build_agent_prompts(character_knot_data, story_history, user_input) + + try: + text = backend.get_ai_response_by_system_and_user_prompt( + system_prompt, user_prompt, seed, hit_cache=True, + ) + return text + except Exception as e: + print("ERROR in generate_character_text:", repr(e)) + # fallback that still behaves “in character-ish” + voice_lines = [ln.strip() for ln in character_knot_data.get("knotContents", []) if ln.strip()] + voice_hint = voice_lines[0] if voice_lines else "…" + return f"{voice_hint}\n\nWhat do you want?" + + + def post(self, request): + try: + request_body = json.loads(request.body) + seed = request_body.get('ai_seed') or settings.DEFAULT_AI_SEED + + story_play_instance_uuid = request_body.get("story_play_instance_uuid") + character_knot_name = request_body.get("character_knot_name") + target_knot_name = request_body.get("target_knot_name") + user_input = request_body.get("user_input") + print("character_knot_name", character_knot_name) + print("target_knot_name", target_knot_name) + print("story_play_instance_uuid", story_play_instance_uuid) + print("user_input", user_input) + print("seed", seed) + + validation_successful, failure_reason = self.validate_request(request_body) + if not validation_successful: + return JsonResponse({"error": failure_reason}, status=400) + + story_id = UnfoldStudioService.get_story_id_from_play_instance_uuid(story_play_instance_uuid) + story_play_history = UnfoldStudioService.get_story_play_history(story_play_instance_uuid) + + character_knot_data = UnfoldStudioService.get_knot_data(story_id, character_knot_name) + if not character_knot_data: + return JsonResponse({"error": f"Character knot not found or empty: {character_knot_name}"}, status=404) + + target_knot_data = UnfoldStudioService.get_knot_data(story_id, target_knot_name) + if not target_knot_data: + return JsonResponse({"error": f"Target knot not found or empty: {target_knot_name}"}, status=404) + + character_text = self.generate_character_text( + character_knot_data=character_knot_data, + story_history=story_play_history, + user_input=user_input, + seed=seed + ) + + direction_view = GetNextDirectionView() + direction, content = direction_view.get_next_direction_details_for_story( + target_knot_data=target_knot_data, + story_history=story_play_history, + user_input=user_input, + seed=seed + ) + if direction in (StoryContinueDirections.NEEDS_INPUT, StoryContinueDirections.INVALID_USER_INPUT): + content["guidance_text"] = character_text + + + if direction in (StoryContinueDirections.BRIDGE_AND_CONTINUE): + # Blend the two: Maya speaks, then the narrator describes the movement + integrated_bridge = f"{character_text}\n\n{content['bridge_text']}" + content['bridge_text'] = integrated_bridge + # Clear character_text so the frontend doesn't show it twice + character_text = None + + result = { + "character_text": character_text, + "continue_decision": { + "direction": direction, + "content": content, + }, + } + return JsonResponse({"result": result}, status=200) + + except json.JSONDecodeError: + return JsonResponse({"error": "Invalid JSON in request body."}, status=400) + except Exception as e: + return JsonResponse({"error": str(e)}, status=500) + + def build_agent_prompts(self, character_knot_data, story_history, user_input): + + character_voice = [ln.strip() for ln in character_knot_data.get("knotContents", []) if ln.strip()] + voice_block = "\n".join(character_voice) + + timeline = story_history.get("timeline", []) + truncated_history = {"timeline": timeline[-10:]} + + system_prompt = AGENT_SYSTEM_PROMPT + user_prompt = AGENT_USER_PROMPT_TEMPLATE % { + "character_knot": voice_block, + "history": json.dumps(truncated_history, indent=2), + "user_input": user_input + } + print("Character voice:", len(character_voice)) + print("VOICE block length:", len(voice_block)) + print("HISTORY timeline length:", len(truncated_history.get("timeline", []))) + + return system_prompt, user_prompt + + def get(self, request): + return JsonResponse({"result": {"text": "agent endpoint: ok (GET)"}}) \ No newline at end of file diff --git a/unfold_studio/unfold_studio/models.py b/unfold_studio/unfold_studio/models.py index 5d25f9e..de8bec3 100644 --- a/unfold_studio/unfold_studio/models.py +++ b/unfold_studio/unfold_studio/models.py @@ -245,23 +245,43 @@ def include(base, new): inkText = self.inject_input_call_indicators(inkText) inkText = self.inject_generate_call_indicators(inkText) inkText = self.inject_static_continue_knot(inkText) + inkText = self.inject_static_agent_knot(inkText) + + debug_path = "/tmp/preprocessed_story.ink" + with open(debug_path, "w", encoding="utf-8") as f: + f.write(inkText) + print("WROTE PREPROCESSED INK TO:", debug_path) offset = ((len(variables) - initialVarLength) + len(directInclusions) - len(self.external_function_declarations())) return inkText, inclusions, variables, knots, offset - + def inject_static_continue_knot(self, inkText): """ Injects static continue knot text into the ink text. """ - continue_knot = """ - === continue(->target_knot) === - ~ continue_function(target_knot) - Continue was called above - -> target_knot - """ + continue_knot = ( + "\n" + "=== continue(->target_knot) ===\n" + "~ continue_function(target_knot)\n" + "Continue was called above\n" + "-> target_knot\n" + ) return inkText + continue_knot - + + def inject_static_agent_knot(self, inkText): + """ + Injects static agent knot text into the ink text. + """ + agent_knot = ( + "\n" + "=== agent(->character_knot, ->target_knot) ===\n" + "~ agent_call(character_knot, target_knot)\n" + "Agent was called above\n" + "-> target_knot\n" + ) + return inkText + agent_knot + def inject_input_call_indicators(self, inkText): """ Injects input call indicators into the ink text. @@ -305,6 +325,7 @@ def external_function_declarations(self): "EXTERNAL input(a,b)", "EXTERNAL SEED_AI(a)", "EXTERNAL continue_function(a)", + "EXTERNAL agent_call(a,b)", ] def ink_to_json(self, ink, offset=0): @@ -317,6 +338,13 @@ def ink_to_json(self, ink, offset=0): fqn = os.path.join(settings.INK_DIR, fn) with open(fqn, 'w', encoding='utf-8') as inkfile: inkfile.write(ink) + + debug_copy = "/tmp/inklecate_input.ink" + with open(debug_copy, "w", encoding="utf-8") as f: + f.write(ink) + print("WROTE INKLECATE INPUT TO:", debug_copy) + print("INKLECATE FQN:", fqn) + try: warnings = subprocess.check_output([settings.INKLECATE, fqn]).decode("utf-8-sig") for warning in warnings.split('\n'): @@ -442,6 +470,27 @@ def get_knot_data(self, knot_name): 'knotChoices': knot_choices } + def run_from_knot(self, knot_name: str) -> str: + #only extrafccts text from self.ink + if not knot_name or not isinstance(knot_name,str): + raise ValueError("knot_name must be a non-empty string") + knots = self.get_knots() # OrderedDict(name -> (lineNum, knotText)) + name = knot_name.strip() + + if name not in knots: + raise KeyError(f"Knot '{knot_name}' not found") + + _, knot_text = knots[name] + + # knot_text includes the knot header line itself (e.g. "=== intro ===") + # We want everything AFTER that header. + if "\n" in knot_text: + _, content = knot_text.split("\n", 1) + else: + content = "" + return content.strip("\n") + #Does not handle choices, includes, or update StoryPlayRecords. Only returns raw ink source + # Using Hacker News gravity algorithm: # https://medium.com/hacking-and-gonzo/how-hacker-news-ranking-algorithm-works-1d9b0cf2c08d def update_priority(self):