Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
18 changes: 12 additions & 6 deletions examples/openai/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,20 +24,26 @@ This directory contains examples of using OpenAI's API with environmental impact

```bash
# Basic chat
python openai-chat.py --model "gpt-4" --message "What is artificial intelligence?" --max-tokens 100
uv run python -m examples.openai.openai-chat --model "gpt-4" --message "What is artificial intelligence?"

# Async streaming chat
python openai-async-stream-chat.py --model "gpt-4" --message "Explain quantum computing" --stream
uv run python -m examples.openai.openai-async-stream-chat --model "gpt-4" --message "Explain quantum computing"

# Generate an image
python openai-image.py --prompt "A beautiful sunset over mountains" --size "1024x1024"
uv run python -m examples.openai.openai-image --prompt "A beautiful sunset over mountains" --model "dall-e-2" --size "1024x1024"

# Async image generation
python openai-async-image.py --prompt "A futuristic city" --model "dall-e-3"
uv run python -m examples.openai.openai-async-image --prompt "A futuristic city" --model "dall-e-2" --size "1024x1024"

# Text to Speech
python openai-speech.py --text "Hello, welcome to the future!" --voice "alloy" --output speech.mp3
uv run python -m examples.openai.openai-speech --text "Hello, welcome to the future!" --model "tts-1" --response-format "mp3"

# Async Text to Speech
uv run python -m examples.openai.openai-async-speech --text "Hello, welcome to the future!" --model "tts-1" --response-format "mp3"

# Audio Transcription
python openai-transcription.py --file "recording.mp3" --model "whisper-1"
uv run python -m examples.openai.openai-transcription --filename "recording.mp3" --model "whisper-1"

# Async Audio Transcription
uv run python -m examples.openai.openai-async-transcription --filename "recording.mp3" --model "whisper-1"
```
36 changes: 36 additions & 0 deletions examples/openai/openai-async-speech.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
import asyncio

from openai import AsyncOpenAI
from scope3ai import Scope3AI


async def main(text: str, model: str, response_format: str):
client = AsyncOpenAI()
scope3 = Scope3AI.init()

with scope3.trace() as tracer:
response = await client.audio.speech.create(
model=model,
voice="alloy",
input=text,
response_format=response_format,
)
print(response)
impact = tracer.impact()
print(impact)
print(f"Total Energy Wh: {impact.total_energy_wh}")
print(f"Total GCO2e: {impact.total_gco2e}")
print(f"Total MLH2O: {impact.total_mlh2o}")


if __name__ == "__main__":
import argparse

parser = argparse.ArgumentParser(description="OpenAI Speech to Text")
parser.add_argument("--model", type=str, default="whisper-1", help="Model")
parser.add_argument(
"--response-format", type=str, default="json", help="Response format"
)
parser.add_argument("--text", type=str, help="The text to convert to speech")
args = parser.parse_args()
asyncio.run(main(**vars(args)))
29 changes: 10 additions & 19 deletions examples/openai/openai-async-stream-chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,26 +5,24 @@
from scope3ai import Scope3AI


async def main(model: str, message: str, role: str, stream: bool):
async def main(model: str, message: str, role: str):
client = AsyncOpenAI()
scope3 = Scope3AI.init()

with scope3.trace() as tracer:
chunk_count = 0
response = await client.chat.completions.create(
model=model,
messages=[{"role": role, "content": message}],
stream=stream,
stream=True,
)

if stream:
async for event in response:
if not event.choices:
continue
print(event.choices[0].delta.content, end="", flush=True)
print()
else:
print(response.choices[0].message.content.strip())

async for event in response:
chunk_count += 1
if not event.choices:
continue
print(event.choices[0].delta.content, end="", flush=True)
print()
print(f"Chunk count: {chunk_count}")
impact = tracer.impact()
print(impact)
print(f"Total Energy Wh: {impact.total_energy_wh}")
Expand Down Expand Up @@ -56,12 +54,5 @@ async def main(model: str, message: str, role: str, stream: bool):
default="user",
help="Role for the message (user, system, or assistant)",
)
parser.add_argument(
"--no-stream",
action="store_false",
dest="stream",
help="Disable streaming mode",
default=True,
)
args = parser.parse_args()
asyncio.run(main(**vars(args)))
36 changes: 36 additions & 0 deletions examples/openai/openai-async-transcription.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
from pathlib import Path
import asyncio

from openai import AsyncOpenAI
from scope3ai import Scope3AI


async def main(filename: Path, model: str, response_format: str):
client = AsyncOpenAI()
scope3 = Scope3AI.init()

with scope3.trace() as tracer:
response = await client.audio.transcriptions.create(
model=model,
file=filename,
response_format=response_format,
)
print(response)
impact = tracer.impact()
print(impact)
print(f"Total Energy Wh: {impact.total_energy_wh}")
print(f"Total GCO2e: {impact.total_gco2e}")
print(f"Total MLH2O: {impact.total_mlh2o}")


if __name__ == "__main__":
import argparse

parser = argparse.ArgumentParser(description="OpenAI Speech to Text")
parser.add_argument("--model", type=str, default="whisper-1", help="Model")
parser.add_argument(
"--response_format", type=str, default="json", help="Response format"
)
parser.add_argument("--filename", type=Path, help="The path to the input file")
args = parser.parse_args()
asyncio.run(main(**vars(args)))
35 changes: 35 additions & 0 deletions examples/openai/openai-speech.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
from openai import OpenAI
from scope3ai import Scope3AI


def main(text: str, model: str, response_format: str):
client = OpenAI()
scope3 = Scope3AI.init()

with scope3.trace() as tracer:
response = client.audio.speech.create(
model=model,
voice="alloy",
input=text,
response_format=response_format,
)
print(response)
print(response.scope3ai.request)
impact = tracer.impact()
print(impact)
print(f"Total Energy Wh: {impact.total_energy_wh}")
print(f"Total GCO2e: {impact.total_gco2e}")
print(f"Total MLH2O: {impact.total_mlh2o}")


if __name__ == "__main__":
import argparse

parser = argparse.ArgumentParser(description="OpenAI Speech to Text")
parser.add_argument("--model", type=str, default="whisper-1", help="Model")
parser.add_argument(
"--response-format", type=str, default="json", help="Response format"
)
parser.add_argument("--text", type=str, help="The text to convert to speech")
args = parser.parse_args()
main(**vars(args))
Original file line number Diff line number Diff line change
Expand Up @@ -31,6 +31,6 @@ def main(filename: Path, model: str, response_format: str):
parser.add_argument(
"--response_format", type=str, default="json", help="Response format"
)
parser.add_argument("filename", type=Path, help="The path to the input file")
parser.add_argument("--filename", type=Path, help="The path to the input file")
args = parser.parse_args()
main(**vars(args))
7 changes: 7 additions & 0 deletions scope3ai/lib.py
Original file line number Diff line number Diff line change
Expand Up @@ -436,6 +436,13 @@ def set_only_if(row, field, *values):
)

# copy tracer or global metadata

set_only_if(
row,
"managed_service_id",
row.managed_service_id if row.managed_service_id else "",
)

set_only_if(
row,
"client_id",
Expand Down
8 changes: 0 additions & 8 deletions scope3ai/tracers/anthropic/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,8 @@
from typing_extensions import override

from scope3ai.api.types import Scope3AIContext, ImpactRow
from scope3ai.constants import PROVIDERS
from scope3ai.lib import Scope3AI

PROVIDER = PROVIDERS.ANTROPIC.value

MessageStreamT = TypeVar("MessageStreamT", bound=_MessageStream)
AsyncMessageStreamT = TypeVar("AsyncMessageStreamT", bound=_AsyncMessageStream)
Expand Down Expand Up @@ -63,7 +61,6 @@ def __stream_text__(self) -> Iterator[str]: # type: ignore[misc]
input_tokens=input_tokens,
output_tokens=output_tokens,
request_duration_ms=requests_latency * 1000,
managed_service_id=PROVIDER,
)
self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row)

Expand Down Expand Up @@ -106,7 +103,6 @@ async def __stream_text__(self) -> AsyncIterator[str]: # type: ignore[misc]
input_tokens=input_tokens,
output_tokens=output_tokens,
request_duration_ms=requests_latency * 1000,
managed_service_id=PROVIDER,
)
self.scope3ai = await Scope3AI.get_instance().asubmit_impact(scope3_row)

Expand Down Expand Up @@ -177,7 +173,6 @@ def __stream__(self) -> Iterator[_T]:
input_tokens=input_tokens,
output_tokens=output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
self.scope3ai = Scope3AI.get_instance().submit_impact(scope3_row)

Expand Down Expand Up @@ -210,7 +205,6 @@ async def __stream__(self) -> AsyncIterator[_T]:
input_tokens=input_tokens,
output_tokens=output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
self.scope3ai = await Scope3AI.get_instance().asubmit_impact(scope3_row)

Expand All @@ -229,7 +223,6 @@ def _anthropic_chat_wrapper(response: Message, request_latency: float) -> Messag
input_tokens=response.usage.input_tokens,
output_tokens=response.usage.output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
if scope3ai_ctx is not None:
Expand Down Expand Up @@ -263,7 +256,6 @@ async def _anthropic_async_chat_wrapper(
input_tokens=response.usage.input_tokens,
output_tokens=response.usage.output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row)
if scope3ai_ctx is not None:
Expand Down
7 changes: 0 additions & 7 deletions scope3ai/tracers/cohere/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,12 +11,9 @@
StreamEndStreamedChatResponse as _StreamEndStreamedChatResponse,
)

from scope3ai.constants import PROVIDERS
from scope3ai.lib import Scope3AI
from scope3ai.api.types import Scope3AIContext, ImpactRow

PROVIDER = PROVIDERS.COHERE.value


class NonStreamedChatResponse(_NonStreamedChatResponse):
scope3ai: Optional[Scope3AIContext] = None
Expand Down Expand Up @@ -47,7 +44,6 @@ def cohere_chat_wrapper(
input_tokens=response.meta.tokens.input_tokens,
output_tokens=response.meta.tokens.output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
return NonStreamedChatResponse(**response.dict(), scope3ai=scope3ai_ctx)
Expand All @@ -68,7 +64,6 @@ async def cohere_async_chat_wrapper(
input_tokens=response.meta.tokens.input_tokens,
output_tokens=response.meta.tokens.output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row)
return NonStreamedChatResponse(**response.dict(), scope3ai=scope3ai_ctx)
Expand All @@ -93,7 +88,6 @@ def cohere_stream_chat_wrapper(
input_tokens=input_tokens,
output_tokens=output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
yield StreamEndStreamedChatResponse(**event.dict(), scope3ai=scope3ai_ctx)
Expand All @@ -120,7 +114,6 @@ async def cohere_async_stream_chat_wrapper(
input_tokens=input_tokens,
output_tokens=output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row)
yield StreamEndStreamedChatResponse(**event.dict(), scope3ai=scope3ai_ctx)
Expand Down
7 changes: 0 additions & 7 deletions scope3ai/tracers/cohere/chat_v2.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,11 +11,8 @@
)

from scope3ai.api.types import ImpactRow, Scope3AIContext
from scope3ai.constants import PROVIDERS
from scope3ai.lib import Scope3AI

PROVIDER = PROVIDERS.COHERE.value


class ChatResponse(_ChatResponse):
scope3ai: Optional[Scope3AIContext] = None
Expand Down Expand Up @@ -50,7 +47,6 @@ def cohere_chat_v2_wrapper(
input_tokens=response.usage.tokens.input_tokens,
output_tokens=response.usage.tokens.output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
return ChatResponse(**response.dict(), scope3ai=scope3ai_ctx)
Expand All @@ -71,7 +67,6 @@ async def cohere_async_chat_v2_wrapper(
input_tokens=response.usage.tokens.input_tokens,
output_tokens=response.usage.tokens.output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row)
return ChatResponse(**response.dict(), scope3ai=scope3ai_ctx)
Expand All @@ -97,7 +92,6 @@ def cohere_stream_chat_v2_wrapper(
input_tokens=input_tokens,
output_tokens=output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = Scope3AI.get_instance().submit_impact(scope3_row)
yield Scope3AIStreamedChatResponseV2(type="scope3ai", scope3ai=scope3ai_ctx)
Expand All @@ -123,7 +117,6 @@ async def cohere_async_stream_chat_v2_wrapper(
input_tokens=input_tokens,
output_tokens=output_tokens,
request_duration_ms=request_latency * 1000,
managed_service_id=PROVIDER,
)
scope3ai_ctx = await Scope3AI.get_instance().asubmit_impact(scope3_row)
yield Scope3AIStreamedChatResponseV2(type="scope3ai", scope3ai=scope3ai_ctx)
4 changes: 0 additions & 4 deletions scope3ai/tracers/google_genai/chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,11 +5,8 @@

from scope3ai.api.types import Scope3AIContext
from scope3ai.api.typesgen import ImpactRow
from scope3ai.constants import PROVIDERS
from scope3ai.lib import Scope3AI

PROVIDER = PROVIDERS.GOOGLE_GENAI.value


class GenerateContentResponse(_GenerateContentResponse):
scope3ai: Optional[Scope3AIContext] = None
Expand All @@ -21,7 +18,6 @@ def get_impact_row(response: _GenerateContentResponse, duration_ms: float) -> Im
input_tokens=response.usage_metadata.prompt_token_count,
output_tokens=response.usage_metadata.candidates_token_count or 0,
request_duration_ms=duration_ms * 1000,
managed_service_id=PROVIDER,
)


Expand Down
Loading