From b303fb391c9de5f2863213d90577c3fbf77eaa5e Mon Sep 17 00:00:00 2001 From: AG2AI-Admin Date: Fri, 18 Jul 2025 15:42:11 -0400 Subject: [PATCH] Migrate from pyautogen to ag2 library --- autogen/autogen-gemini/instruction.md | 6 +++--- .../autogen_assistant/autogen_assistant.py | 20 +++++++++---------- autogen/autogen_assistant/readme.md | 10 +++++----- autogen/autogenra/readme.md | 8 ++++---- autogen/multiagents_groupchat/log.txt | 4 ++-- .../multiagents_groupchat.py | 6 +++--- autogen/multiagents_groupchat/readme.md | 10 +++++----- crewagent/crewagent.py | 2 +- 8 files changed, 33 insertions(+), 33 deletions(-) diff --git a/autogen/autogen-gemini/instruction.md b/autogen/autogen-gemini/instruction.md index 0718915..37d90fb 100755 --- a/autogen/autogen-gemini/instruction.md +++ b/autogen/autogen-gemini/instruction.md @@ -12,10 +12,10 @@ Google Gemini API=>https://cloud.google.com/vertex-ai/docs/generative-ai/multimo 2. Run your code local machine: ``` -python3.11 -m venv pyautogen -source pyautogen/bin/activate +python3.11 -m venv ag2 +source ag2/bin/activate # deactivate -pip install pyautogen google-generativeai +pip install ag2 google-generativeai pip install -q google-generativeai python autogen/autogen-gemini/autogen_gemini.py diff --git a/autogen/autogen_assistant/autogen_assistant.py b/autogen/autogen_assistant/autogen_assistant.py index 12d876f..6697d57 100755 --- a/autogen/autogen_assistant/autogen_assistant.py +++ b/autogen/autogen_assistant/autogen_assistant.py @@ -14,7 +14,7 @@ #base_url = "http://localhost:1234/v1" # You can specify API base URLs if needed. eg: localhost:8000 #http://10.9.150.174:1234/v1/models base_url = "http://192.168.40.229:1234/v1" #Sidan's Server Mistral-7B-Instruct-v0.1-GGUF/mistral-7b-instruct-v0.1.Q8_0.gguf -#base_url = "http://airedale-native-chicken.ngrok-free.app/v1" # Seems like OpenLLM server only support SKD is openai==0.28 pyautogen==0.1.14 . refer: https://colab.research.google.com/drive/1GKlfU7Fjq30oQPirHvCcQy_e_B8vNEDs?usp=sharing +#base_url = "http://airedale-native-chicken.ngrok-free.app/v1" # Seems like OpenLLM server only support SKD is openai==0.28 ag2==0.1.14 . refer: https://colab.research.google.com/drive/1GKlfU7Fjq30oQPirHvCcQy_e_B8vNEDs?usp=sharing api_type = "openai" # Type of API, e.g., "openai" or "aoai". api_version = None # Specify API version if needed. #api_model= "palm/chat-biso" @@ -111,15 +111,15 @@ def run_openai_completion(): -#=============autogen run_autogen_with_twoagent_pyautogen_latest_version============= -#refer:https://pypi.org/project/pyautogen/0.1.14/ +#=============autogen run_autogen_with_twoagent_ag2_latest_version============= +#refer:https://pypi.org/project/ag2/0.1.14/ ''' This code required the dependency follow these(also pay attention with api_base and api_key and api_model) : -pip install openai==0.28 pyautogen==0.1.14 +pip install openai==0.28 ag2==0.1.14 ''' -def run_autogen_with_twoagent_pyautogen_latest_version(): +def run_autogen_with_twoagent_ag2_latest_version(): try: - #Base on openai==0.28 pyautogen==0.1.14 ,If you want to use your own LLM,you must be override your openai.api_base and openai.api_key for autogen, otherwise won't be work. + #Base on openai==0.28 ag2==0.1.14 ,If you want to use your own LLM,you must be override your openai.api_base and openai.api_key for autogen, otherwise won't be work. openai.api_key = api_key # supply your API key however you choose openai.api_base= base_url # supply your api base URL If you have your own LLM assistant = AssistantAgent("assistant", llm_config={"api_key":api_key,"base_url": base_url,"api_model":api_model}) @@ -128,7 +128,7 @@ def run_autogen_with_twoagent_pyautogen_latest_version(): user_proxy.initiate_chat(assistant, message="Plot a chart of NVDA and TESLA stock price change YTD.") # This triggers automated chat to solve the task except Exception as e: - print(f"""run_autogen_with_twoagent_pyautogen_latest_version failed with Exception{e}. \n""") + print(f"""run_autogen_with_twoagent_ag2_latest_version failed with Exception{e}. \n""") # ##==================================================== @@ -155,7 +155,7 @@ def run_autogen_with_twoagent_pyautogen_latest_version(): # and OAI_CONFIG_LIST_sample ''' This code required the dependency follow these(also pay attention with config_list.api_base and config_list.api_key and config_list.api_model) : -pip install openai==0.28 pyautogen==0.1.14 +pip install openai==0.28 ag2==0.1.14 ''' def run_autogen_with_twoagent(): config_list = [ @@ -185,7 +185,7 @@ def run_autogen_with_twoagent(): #========================run_autogen_with_Assistant_and_userProxy============================ ''' This code required the dependency follow these(also pay attention with config_list.api_base and config_list.api_key and config_list.api_model) : -pip install openai==0.28 pyautogen==0.1.14 +pip install openai==0.28 ag2==0.1.14 ''' def run_autogen_with_Assistant_and_userProxy(): try: @@ -227,4 +227,4 @@ def run_autogen_with_Assistant_and_userProxy(): #run_autogen_with_Assistant_and_userProxy() #run_autogen_with_twoagent() #run_openai_completion() - run_autogen_with_twoagent_pyautogen_latest_version() \ No newline at end of file + run_autogen_with_twoagent_ag2_latest_version() \ No newline at end of file diff --git a/autogen/autogen_assistant/readme.md b/autogen/autogen_assistant/readme.md index 22cd2b7..5988359 100755 --- a/autogen/autogen_assistant/readme.md +++ b/autogen/autogen_assistant/readme.md @@ -8,14 +8,14 @@ 2. Run your code local machine: ``` -python3.12 -m venv pyautogen -source pyautogen/bin/activate +python3.12 -m venv ag2 +source ag2/bin/activate # deactivate # old version with between Aug 2023 and Oct 2023 -pip install openai==0.28 pyautogen==0.1.14 +pip install openai==0.28 ag2==0.1.14 # latest -pip install openai==1.6.1 pyautogen==0.2.3 -pip show pyautogen +pip install openai==1.6.1 ag2==0.2.3 +pip show ag2 python autogen/autogen_assistant/autogen_assistant.py diff --git a/autogen/autogenra/readme.md b/autogen/autogenra/readme.md index 5d389c1..7ef72e3 100755 --- a/autogen/autogenra/readme.md +++ b/autogen/autogenra/readme.md @@ -12,13 +12,13 @@ Or use LM Studio build your local LLM => https://lmstudio.ai/ 2. Run your code local machine: ``` -python3.12 -m venv pyautogen -source pyautogen/bin/activate +python3.12 -m venv ag2 +source ag2/bin/activate # deactivate # old version with between Aug 2023 and Oct 2023 -pip install openai==0.28 pyautogen==0.1.14 +pip install openai==0.28 ag2==0.1.14 # latest -pip install openai==1.6.1 pyautogen==0.2.3 +pip install openai==1.6.1 ag2==0.2.3 pip install -r requirements.txt diff --git a/autogen/multiagents_groupchat/log.txt b/autogen/multiagents_groupchat/log.txt index 8e00f63..8c74767 100755 --- a/autogen/multiagents_groupchat/log.txt +++ b/autogen/multiagents_groupchat/log.txt @@ -12,7 +12,7 @@ BadRequestError: Error code: 400 - {'error': "'messages' array must only contain -------------------------------------------------------------------------------- -(pyautogen) root@1cfeec487615:/home/tmp/mingwork/llm_sample# python autogen/multiagents_groupchat/multiagents_groupchat.py +(ag2) root@1cfeec487615:/home/tmp/mingwork/llm_sample# python autogen/multiagents_groupchat/multiagents_groupchat.py ==================== openai.Model.list=> { "data": [ @@ -180,4 +180,4 @@ user_proxy (to chat_manager): -------------------------------------------------------------------------------- ==========completed run_agentchat_multiagents_groupchat============= -(pyautogen) root@1cfeec487615:/home/tmp/mingwork/llm_sample# +(ag2) root@1cfeec487615:/home/tmp/mingwork/llm_sample# diff --git a/autogen/multiagents_groupchat/multiagents_groupchat.py b/autogen/multiagents_groupchat/multiagents_groupchat.py index 140713c..1b05c84 100755 --- a/autogen/multiagents_groupchat/multiagents_groupchat.py +++ b/autogen/multiagents_groupchat/multiagents_groupchat.py @@ -7,8 +7,8 @@ import openai -#Initial your Custom llm_config configurations,LM Studio Server support SKD is openai==0.28 pyautogen==0.1.14 . refer: https://colab.research.google.com/drive/1GKlfU7Fjq30oQPirHvCcQy_e_B8vNEDs?usp=sharing -# base_url = "http://airedale-native-chicken.ngrok-free.app/v1" # Seems like OpenLLM server only support SKD is openai==0.28 pyautogen==0.1.14 . refer: https://colab.research.google.com/drive/1GKlfU7Fjq30oQPirHvCcQy_e_B8vNEDs?usp=sharing +#Initial your Custom llm_config configurations,LM Studio Server support SKD is openai==0.28 ag2==0.1.14 . refer: https://colab.research.google.com/drive/1GKlfU7Fjq30oQPirHvCcQy_e_B8vNEDs?usp=sharing +# base_url = "http://airedale-native-chicken.ngrok-free.app/v1" # Seems like OpenLLM server only support SKD is openai==0.28 ag2==0.1.14 . refer: https://colab.research.google.com/drive/1GKlfU7Fjq30oQPirHvCcQy_e_B8vNEDs?usp=sharing # api_model="TinyLlama--TinyLlama-1.1B-Chat-v1.0" base_url = "http://192.168.0.232:1234/v1" #Sidan's Server Mistral-7B-Instruct-v0.2-GGUF/mistral-7b-instruct-v0.2.Q6_K.gguf api_model="Mistral-7B-Instruct-v0.2-GGUF/mistral-7b-instruct-v0.2.Q6_K.gguf" @@ -37,7 +37,7 @@ def run_agentchat_multiagents_groupchat(): # try: - #Base on openai==0.28 pyautogen==0.1.14 ,If you want to use your own LLM,you must be override your openai.api_base and openai.api_key for autogen, otherwise won't be work. + #Base on openai==0.28 ag2==0.1.14 ,If you want to use your own LLM,you must be override your openai.api_base and openai.api_key for autogen, otherwise won't be work. openai.api_key = api_key # supply your API key however you choose openai.api_base= base_url # supply your api base URL If you have your own LLM openai.model = api_model # supply your api base URL If you have your own model (PS:openLLM required change the model,LM Studio doesn't care about model) diff --git a/autogen/multiagents_groupchat/readme.md b/autogen/multiagents_groupchat/readme.md index 4a537f6..966c50d 100755 --- a/autogen/multiagents_groupchat/readme.md +++ b/autogen/multiagents_groupchat/readme.md @@ -9,14 +9,14 @@ Or use LM Studio build your local LLM => https://lmstudio.ai/ 2. Run your code local machine: ``` -python3.12 -m venv pyautogen -source pyautogen/bin/activate +python3.12 -m venv ag2 +source ag2/bin/activate # deactivate # old version with between Aug 2023 and Oct 2023 -pip install openai==0.28 pyautogen==0.1.14 +pip install openai==0.28 ag2==0.1.14 # latest -pip install openai==1.6.1 pyautogen==0.2.3 -pip show pyautogen +pip install openai==1.6.1 ag2==0.2.3 +pip show ag2 python autogen/multiagents_groupchat/multiagents_groupchat.py diff --git a/crewagent/crewagent.py b/crewagent/crewagent.py index 5c69c3c..3e673ea 100755 --- a/crewagent/crewagent.py +++ b/crewagent/crewagent.py @@ -20,7 +20,7 @@ #os.environ["OPENAI_API_KEY"] = "YOUR KEY" g_base_url = "http://192.168.137.176:1234/v1" #Sidan's Server Mistral-7B-Instruct-v0.1-GGUF/mistral-7b-instruct-v0.1.Q8_0.gguf g_api_key = "sk-llllllllllllllllllllll" # even your local don't use the authorization, but you need to fill something, otherwise will be get exception. -#g_base_url = "http://airedale-native-chicken.ngrok-free.app/v1" # Seems like OpenLLM server only support SKD is openai==0.28 pyautogen==0.1.14 . refer: https://colab.research.google.com/drive/1GKlfU7Fjq30oQPirHvCcQy_e_B8vNEDs?usp=sharing +#g_base_url = "http://airedale-native-chicken.ngrok-free.app/v1" # Seems like OpenLLM server only support SKD is openai==0.28 ag2==0.1.14 . refer: https://colab.research.google.com/drive/1GKlfU7Fjq30oQPirHvCcQy_e_B8vNEDs?usp=sharing #g_api_model="NousResearch--Nous-Hermes-llama-2-7b" g_api_model="TinyLlama--TinyLlama-1.1B-Chat-v1.0" os.environ["OPENAI_API_KEY"] = g_api_key