-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathexample.py
More file actions
180 lines (139 loc) Β· 5.01 KB
/
example.py
File metadata and controls
180 lines (139 loc) Β· 5.01 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
"""
AIP Example - Demonstrating the AI Protocol SDK
This example shows how to:
1. Connect to an AIP server
2. Ask questions and receive streamed responses
3. Maintain conversation context across multiple questions
4. Cleanly disconnect
"""
import asyncio
from aip_sdk import AIPClient
async def main():
"""
Main example function demonstrating AIP SDK usage.
"""
print("=" * 70)
print("AIP Example - AI Protocol Communication Demo")
print("=" * 70)
print()
# Step 1: Connect to the AIP server
# Make sure the server is running at this address (python server.py)
server_url = "ws://localhost:8000/aip"
print(f"π‘ Connecting to AIP server at {server_url}...")
client = await AIPClient.connect(server_url)
print(f"β
Connected! Session ID: {client.session_id}")
print()
# Step 2: Ask the first question
# The response will be streamed token-by-token to the console
print("-" * 70)
print("Question 1: What is Python?")
print("-" * 70)
print("Assistant: ", end="", flush=True)
# Define a callback function to handle each token
# In this case, we just print it without a newline
def print_token(token: str):
print(token, end="", flush=True)
# Send the question and stream the response
await client.ask(
prompt="What is Python? Give me a brief answer in 2-3 sentences.",
on_token=print_token
)
print("\n") # Add newlines after the response
# Step 3: Ask a follow-up question
# This demonstrates that the server maintains context
print("-" * 70)
print("Question 2: What are its main uses? (Follow-up question)")
print("-" * 70)
print("Assistant: ", end="", flush=True)
# Ask the follow-up question
# The server should remember the previous context (that we were talking about Python)
await client.ask(
prompt="What are its main uses?",
on_token=print_token
)
print("\n") # Add newlines after the response
# Step 4: Demonstrate context retention with another follow-up
print("-" * 70)
print("Question 3: Can you give me a simple code example?")
print("-" * 70)
print("Assistant: ", end="", flush=True)
await client.ask(
prompt="Can you give me a simple code example?",
on_token=print_token
)
print("\n") # Add newlines after the response
# Step 5: Clean disconnect
print("-" * 70)
print("π Disconnecting from server...")
await client.disconnect()
print("β
Disconnected successfully!")
print("=" * 70)
async def example_with_context_manager():
"""
Alternative example using async context manager for automatic cleanup.
"""
print("\n" + "=" * 70)
print("Alternative Example - Using Context Manager")
print("=" * 70)
print()
# Using 'async with' ensures the connection is automatically closed
# even if an error occurs
async with await AIPClient.connect("ws://localhost:8000/aip") as client:
print(f"β
Connected! Session ID: {client.session_id}")
print()
print("Question: Tell me a fun fact about space.")
print("-" * 70)
print("Assistant: ", end="", flush=True)
await client.ask(
prompt="Tell me a fun fact about space in one sentence.",
on_token=lambda token: print(token, end="", flush=True)
)
print("\n")
# The client is automatically disconnected when exiting the 'async with' block
print("β
Automatically disconnected!")
print("=" * 70)
async def example_accumulate_response():
"""
Example showing how to accumulate the full response instead of just printing.
"""
print("\n" + "=" * 70)
print("Advanced Example - Accumulating Response")
print("=" * 70)
print()
client = await AIPClient.connect("ws://localhost:8000/aip")
print(f"β
Connected! Session ID: {client.session_id}")
print()
# Create a list to accumulate all tokens
response_tokens = []
print("Question: What is 2+2?")
print("Streaming response...")
# The callback appends each token to our list
await client.ask(
prompt="What is 2+2? Just give me the answer briefly.",
on_token=response_tokens.append
)
# Now we have the full response as a list of tokens
full_response = "".join(response_tokens)
print()
print("Full accumulated response:")
print("-" * 70)
print(full_response)
print("-" * 70)
print(f"Total tokens received: {len(response_tokens)}")
print(f"Total characters: {len(full_response)}")
await client.disconnect()
print()
print("=" * 70)
if __name__ == "__main__":
"""
Run all examples.
To run this script:
1. Make sure the server is running: python server.py
2. Set your OPENAI_API_KEY environment variable
3. Run this script: python example.py
"""
# Run the main example
asyncio.run(main())
# Uncomment to run additional examples:
# asyncio.run(example_with_context_manager())
# asyncio.run(example_accumulate_response())