Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
131 changes: 130 additions & 1 deletion unfold_studio/static/player.js
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,10 @@ function InkPlayer(containerSelector) {
this.currentStoryPoint = 0;
this.aiSeed = null;
this.generateInProgress = false;

this.agentFunctionCalled = false;
this.currentAgentCharacter = null;
this.currentAgentTarget = null;
}

InkPlayer.prototype = {
Expand Down Expand Up @@ -50,6 +54,21 @@ InkPlayer.prototype = {
this.scheduleInputBoxForContinue()
return '';
}.bind(this));

//AGENT

story.BindExternalFunction("agent_call", function(characterKnot, targetKnot) {
var characterKnotName = characterKnot._componentsString;
var targetKnotName = targetKnot._componentsString;
this.agentFunctionCalled = true;
this.currentAgentCharacter = characterKnotName;
this.currentAgentTarget = targetKnotName;

this.scheduleAgentInputBox("Talk to " + this.currentAgentCharacter);
return '';
}.bind(this));


story.BindExternalFunction("input", function (placeholder = "Enter text...", variableName) {
this.inputFunctionCalled = true;
this.scheduleInputBox(placeholder, variableName);
Expand All @@ -58,7 +77,7 @@ InkPlayer.prototype = {




// TODO: There is a race condition here: the ajax query is sent off
// with a callback for when it returns. Meanwhile, a temporary span
// is created with text "Loading..." and a unique ID. Once the query
Expand Down Expand Up @@ -150,6 +169,12 @@ InkPlayer.prototype = {
this.continueFunctionCalled = false;
return;
}

if (this.agentFunctionCalled) {
this.agentFunctionCalled = false;
this.events.renderScheduledInputBox.bind(this)();
return;
}
if (this.generateInProgress) {
await this.generateAndInsertInDOM(this.generatePrompt);
}
Expand Down Expand Up @@ -257,6 +282,28 @@ InkPlayer.prototype = {
);
this.inputBoxToInsert = formContainer;
},

scheduleAgentInputBox: function(placeholder = "Enter text....") {
const eventHandler = (userInput) => {
this.createStoryPlayRecord(
this.getStoryPlayInstanceUUID(),
"READERS_AGENT_ENTERED_TEXT",
userInput
);

this.handleUserInputForAgent(userInput);
};

const formContainer = this.createInputForm(
"AUTHORS_AGENT_INPUT_BOX",
eventHandler,
placeholder
);

this.inputBoxToInsert = formContainer;
this.events.renderScheduledInputBox.bind(this)();
},

createInputForm: function(formType, eventHandler, placeholder, variableName=null) {
const formContainer = document.createElement("div");
formContainer.classList.add("input-container");
Expand Down Expand Up @@ -349,6 +396,88 @@ InkPlayer.prototype = {
break;
}
},

// AGENT HANDLING INPUT
handleUserInputForAgent: async function(userInput) {
try {
const response = await $.ajax("/agent/", {
beforeSend: function(xhr) {
xhr.setRequestHeader("X-CSRFToken", CSRF);
},
method: "POST",
data: JSON.stringify({
user_input: userInput,
story_play_instance_uuid: this.getStoryPlayInstanceUUID(),
character_knot_name: this.currentAgentCharacter,
target_knot_name: this.currentAgentTarget,
ai_seed: this.aiSeed
}),
contentType: "application/json"
});

const agentResult = response.result || {};
const characterText = agentResult.character_text || "";
const decision = agentResult.continue_decision || {};
const direction = decision.direction || "NEEDS_INPUT";
const content = decision.content || {};

if (characterText) {
this.events.addContent.bind(this)({
text: characterText,
tags: ["agent"]
});
}

switch (direction) {
case "NEEDS_INPUT":
this.scheduleAgentInputBox();
this.events.renderScheduledInputBox.bind(this)();
break;

case "INVALID_USER_INPUT":
this.scheduleAgentInputBox("Input was not valid... Try again");
this.events.renderScheduledInputBox.bind(this)();
break;

case "BRIDGE_AND_CONTINUE":
if (content.bridge_text) {
this.events.addContent.bind(this)({
text: content.bridge_text,
tags: ["bridge"]
});

this.createStoryPlayRecord(
this.getStoryPlayInstanceUUID(),
"AI_GENERATED_TEXT",
content.bridge_text
);
}
this.continueStory();
break;

case "DIRECT_CONTINUE":
this.continueStory();
break;

default:
console.error("Unexpected agent direction:", direction, decision);
this.scheduleAgentInputBox("Something went wrong — try again");
this.events.renderScheduledInputBox.bind(this)();
break;
}
} catch (err) {
console.error("Agent request failed:", err);

this.events.addContent.bind(this)({
text: "Connection issue talking to the character. Please try again.",
tags: ["agent"]
});

this.scheduleAgentInputBox("Try again...");
this.events.renderScheduledInputBox.bind(this)();
}
},

getStoryPlayInstanceUUID: function() {
return this.storyPlayInstanceUUID;
},
Expand Down
18 changes: 11 additions & 7 deletions unfold_studio/text_generation/backends.py
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,7 @@ def _create_chat_completion(self, messages, seed, hit_cache=True, **extra_api_pa
return result
except APIError as err:
log.error("Error calling OpenAI", error=str(err))
print("OPENAI APIError:", repr(err))
return "...error generating text..."

def generate(self, prompt, context_array, seed, hit_cache=True):
Expand All @@ -114,17 +115,20 @@ def generate(self, prompt, context_array, seed, hit_cache=True):
hit_cache=hit_cache
)

def get_ai_response_by_system_and_user_prompt(self, system_prompt, user_prompt, seed, hit_cache=True):
def get_ai_response_by_system_and_user_prompt(self, system_prompt, user_prompt, seed, hit_cache=True, force_json=False):
messages = [
{"role": "system", "content": system_prompt},
{"role": "user", "content": user_prompt}
]
return self._create_chat_completion(
messages=messages,
seed=seed,
hit_cache=hit_cache,
response_format={"type": "json_object"}
)
kwargs = {
"hit_cache": hit_cache,
"seed": seed,
"messages": messages,
}
if force_json:
kwargs["response_format"] = {"type": "json_object"}

return self._create_chat_completion(**kwargs)



Expand Down
17 changes: 16 additions & 1 deletion unfold_studio/text_generation/constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -141,4 +141,19 @@ class StoryContinueDirections(BaseConstant):
{
"score": 1-5,
"reason": "detailed analysis"
}"""
}"""

AGENT_SYSTEM_PROMPT = """
You are an in-story character speaking to the reader. You're job is to stickly stay in character and
give no spoilers to the reader. Keep replies concise (about one short paragraph). If the reader asks
questions or says anything unrelated, respond in character and bring the conversation back to the topic.
(For system compliance: output is not json.) After answering, ask a question that encourages the reader to take an action in the scene.
"""

AGENT_USER_PROMPT_TEMPLATE = """
### Character voice ###
Character knot: %(character_knot)s
History: %(history)s
User Input: %(user_input)s

"""
5 changes: 3 additions & 2 deletions unfold_studio/text_generation/urls.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
from . import views

urlpatterns = [
path('generate', views.GenerateTextView.as_view(), name="generate"),
path('get_next_direction', views.GetNextDirectionView.as_view(), name="get_next_direction")
path('generate/', views.GenerateTextView.as_view(), name="generate"),
path('get_next_direction/', views.GetNextDirectionView.as_view(), name="get_next_direction"),
path('agent/', views.AgentView.as_view(), name="agent")
]

125 changes: 124 additions & 1 deletion unfold_studio/text_generation/views.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,6 +6,10 @@
from .models import StoryTransitionRecord
from .services.unfold_studio import UnfoldStudioService
from .constants import (StoryContinueDirections, CONTINUE_STORY_SYSTEM_PROMPT, CONTINUE_STORY_USER_PROMPT_TEMPLATE)
from .constants import AGENT_SYSTEM_PROMPT, AGENT_USER_PROMPT_TEMPLATE

from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt

class GenerateTextView(BaseView):

Expand Down Expand Up @@ -131,7 +135,7 @@ def get_next_direction_details_for_story(self, target_knot_data, story_history,
backend = TextGenerationFactory.create(backend_config)

system_prompt, user_prompt = self.build_system_and_user_prompt(target_knot_data, story_history, user_input)
response = backend.get_ai_response_by_system_and_user_prompt(system_prompt, user_prompt, seed, hit_cache=True)
response = backend.get_ai_response_by_system_and_user_prompt(system_prompt, user_prompt, seed, hit_cache=True,force_json=True)
#print(response)

parsed_response = self.parse_and_validate_ai_response(response)
Expand Down Expand Up @@ -195,3 +199,122 @@ def post(self, request):
except Exception as e:
print(str(e))
return JsonResponse({"error": str(e)}, status=500)

@method_decorator(csrf_exempt, name="dispatch")
class AgentView(BaseView):

def validate_request(self, request_body):
required_fields = ['user_input', 'character_knot_name', 'target_knot_name', 'story_play_instance_uuid']
for field in required_fields:
if not request_body.get(field):
return False, f"Missing required field: {field}"
return True, None

def generate_character_text(self, character_knot_data, story_history, user_input, seed):
backend = TextGenerationFactory.create(settings.TEXT_GENERATION)
system_prompt, user_prompt = self.build_agent_prompts(character_knot_data, story_history, user_input)

try:
text = backend.get_ai_response_by_system_and_user_prompt(
system_prompt, user_prompt, seed, hit_cache=True,
)
return text
except Exception as e:
print("ERROR in generate_character_text:", repr(e))
# fallback that still behaves “in character-ish”
voice_lines = [ln.strip() for ln in character_knot_data.get("knotContents", []) if ln.strip()]
voice_hint = voice_lines[0] if voice_lines else "…"
return f"{voice_hint}\n\nWhat do you want?"


def post(self, request):
try:
request_body = json.loads(request.body)
seed = request_body.get('ai_seed') or settings.DEFAULT_AI_SEED

story_play_instance_uuid = request_body.get("story_play_instance_uuid")
character_knot_name = request_body.get("character_knot_name")
target_knot_name = request_body.get("target_knot_name")
user_input = request_body.get("user_input")
print("character_knot_name", character_knot_name)
print("target_knot_name", target_knot_name)
print("story_play_instance_uuid", story_play_instance_uuid)
print("user_input", user_input)
print("seed", seed)

validation_successful, failure_reason = self.validate_request(request_body)
if not validation_successful:
return JsonResponse({"error": failure_reason}, status=400)

story_id = UnfoldStudioService.get_story_id_from_play_instance_uuid(story_play_instance_uuid)
story_play_history = UnfoldStudioService.get_story_play_history(story_play_instance_uuid)

character_knot_data = UnfoldStudioService.get_knot_data(story_id, character_knot_name)
if not character_knot_data:
return JsonResponse({"error": f"Character knot not found or empty: {character_knot_name}"}, status=404)

target_knot_data = UnfoldStudioService.get_knot_data(story_id, target_knot_name)
if not target_knot_data:
return JsonResponse({"error": f"Target knot not found or empty: {target_knot_name}"}, status=404)

character_text = self.generate_character_text(
character_knot_data=character_knot_data,
story_history=story_play_history,
user_input=user_input,
seed=seed
)

direction_view = GetNextDirectionView()
direction, content = direction_view.get_next_direction_details_for_story(
target_knot_data=target_knot_data,
story_history=story_play_history,
user_input=user_input,
seed=seed
)
if direction in (StoryContinueDirections.NEEDS_INPUT, StoryContinueDirections.INVALID_USER_INPUT):
content["guidance_text"] = character_text


if direction in (StoryContinueDirections.BRIDGE_AND_CONTINUE):
# Blend the two: Maya speaks, then the narrator describes the movement
integrated_bridge = f"{character_text}\n\n{content['bridge_text']}"
content['bridge_text'] = integrated_bridge
# Clear character_text so the frontend doesn't show it twice
character_text = None

result = {
"character_text": character_text,
"continue_decision": {
"direction": direction,
"content": content,
},
}
return JsonResponse({"result": result}, status=200)

except json.JSONDecodeError:
return JsonResponse({"error": "Invalid JSON in request body."}, status=400)
except Exception as e:
return JsonResponse({"error": str(e)}, status=500)

def build_agent_prompts(self, character_knot_data, story_history, user_input):

character_voice = [ln.strip() for ln in character_knot_data.get("knotContents", []) if ln.strip()]
voice_block = "\n".join(character_voice)

timeline = story_history.get("timeline", [])
truncated_history = {"timeline": timeline[-10:]}

system_prompt = AGENT_SYSTEM_PROMPT
user_prompt = AGENT_USER_PROMPT_TEMPLATE % {
"character_knot": voice_block,
"history": json.dumps(truncated_history, indent=2),
"user_input": user_input
}
print("Character voice:", len(character_voice))
print("VOICE block length:", len(voice_block))
print("HISTORY timeline length:", len(truncated_history.get("timeline", [])))

return system_prompt, user_prompt

def get(self, request):
return JsonResponse({"result": {"text": "agent endpoint: ok (GET)"}})
Loading
Loading