-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathfeedback.py
More file actions
127 lines (89 loc) · 3.87 KB
/
feedback.py
File metadata and controls
127 lines (89 loc) · 3.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
from langgraph.graph import StateGraph, START, END, add_messages
from langgraph.types import Command, interrupt
from typing import TypedDict, Annotated, List
from langgraph.checkpoint.memory import MemorySaver
from main import llm
from langchain_core.messages import HumanMessage, SystemMessage, AIMessage
import uuid
class State(TypedDict):
linkedin_topic: str
generated_post: Annotated[List[str], add_messages]
human_feedback: Annotated[List[str], add_messages]
def model(state: State):
""" Here, we're using the LLM to generate a LinkedIn post with human feedback incorporated """
print("[model] Generating content")
linkedin_topic = state["linkedin_topic"]
feedback = state["human_feedback"] if "human_feedback" in state else ["No Feedback yet"]
# Here, we define the prompt
prompt = f"""
LinkedIn Topic: {linkedin_topic}
Human Feedback: {feedback[-1] if feedback else "No feedback yet"}
Generate a structured and well-written LinkedIn post based on the given topic.
Consider previous human feedback to refine the reponse.
"""
response = llm.invoke([
SystemMessage(content="You are an expert LinkedIn content writer"),
HumanMessage(content=prompt)
])
geneated_linkedin_post = response.content
print(f"[model_node] Generated post:\n{geneated_linkedin_post}\n")
return {
"generated_post": [AIMessage(content=geneated_linkedin_post)] ,
"human_feedback": feedback
}
def human_node(state: State):
"""Human Intervention node - loops back to model unless input is done"""
print("\n [human_node] awaiting human feedback...")
generated_post = state["generated_post"]
# Interrupt to get user feedback
user_feedback = interrupt(
{
"generated_post": generated_post,
"message": "Provide feedback or type 'done' to finish"
}
)
print(f"[human_node] Received human feedback: {user_feedback}")
# If user types "done", transition to END node
if user_feedback.lower() == "done":
return Command(update={"human_feedback": state["human_feedback"] + ["Finalised"]}, goto="end_node")
# Otherwise, update feedback and return to model for re-generation
return Command(update={"human_feedback": state["human_feedback"] + [user_feedback]}, goto="model")
def end_node(state: State):
""" Final node """
print("\n[end_node] Process finished")
print("Final Generated Post:", state["generated_post"][-1])
print("Final Human Feedback", state["human_feedback"])
return {"generated_post": state["generated_post"], "human_feedback": state["human_feedback"]}
# Buiding the Graph
graph = StateGraph(State)
graph.add_node("model", model)
graph.add_node("human_node", human_node)
graph.add_node("end_node", end_node)
graph.set_entry_point("model")
# Define the flow
graph.add_edge(START, "model")
graph.add_edge("model", "human_node")
graph.set_finish_point("end_node")
# Enable Interrupt mechanism
checkpointer = MemorySaver()
app = graph.compile(checkpointer=checkpointer)
thread_config = {"configurable": {
"thread_id": uuid.uuid4()
}}
linkedin_topic = input("Enter your LinkedIn topic: ")
initial_state = {
"linkedin_topic": linkedin_topic,
"generated_post": [],
"human_feedback": []
}
for chunk in app.stream(initial_state, config=thread_config):
for node_id, value in chunk.items():
# If we reach an interrupt, continuously ask for human feedback
if(node_id == "__interrupt__"):
while True:
user_feedback = input("Provide feedback (or type 'done' when finished): ")
# Resume the graph execution with the user's feedback
app.invoke(Command(resume=user_feedback), config=thread_config)
# Exit loop if user says done
if user_feedback.lower() == "done":
break