Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
9 changes: 5 additions & 4 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ Simply run `llmpeg "with your instructions in quotes"`. See examples below for m

For more detailed information, run `llmpeg -h`:
```
usage: llmpeg [-h] [--backend {openai}] [--openai_model OPENAI_MODEL] instructions
Usage: main.py [-h] [-k KEY] [-b {openai}] [-m OPENAI_MODEL] instructions

Convert your instructions into an ffmpeg command.

Expand All @@ -52,10 +52,11 @@ positional arguments:

optional arguments:
-h, --help show this help message and exit
--backend {openai} The backend LLM API provider to use. Defaults to 'openai'.
--openai_model OPENAI_MODEL
-k KEY, --key KEY OpenAI API Key to use. Defaults to 'OPENAI_API_KEY' environment variable.
-b {openai}, --backend {openai}
The backend LLM API provider to use. Defaults to 'openai'.
-m OPENAI_MODEL, --openai_model OPENAI_MODEL
The OpenAI LLM Model that you would like to use.

```
### OpenAI
By default, `openai_model` is set to `gpt-3.5-turbo-0125`, which as of today, as the strongest model that can be used with a free developer account with $5 of free credit. If you have a developer account and are willing to pay for credits, you can change this to one of the `gpt-4` models for better performance and more accurate command generation.
Expand Down
16 changes: 15 additions & 1 deletion llmpeg/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -278,16 +278,30 @@ def main():
type=str,
help="A string containing instructions about the desired ffmpeg use",
)

parser.add_argument(
"-k",
"--key",
dest="key",
type=str,
default=os.environ.get("OPENAI_API_KEY", None),
help="OpenAI API Key to use. Defaults to 'OPENAI_API_KEY' environment variable.",
)

parser.add_argument(
"-b",
"--backend",
dest="backend",
type=str,
default="openai",
choices=["openai"],
help="The backend LLM API provider to use. Defaults to 'openai'.",
)

parser.add_argument(
"-m",
"--openai_model",
dest="openai_model",
type=str,
help="The OpenAI LLM Model that you would like to use.",
default="gpt-3.5-turbo-0125",
Expand All @@ -296,7 +310,7 @@ def main():
# Parse the command line arguments
args = parser.parse_args()
if args.backend == "openai":
llm = OpenAILLMInterface(args.openai_model)
llm = OpenAILLMInterface(args.openai_model, args.key)
else:
raise NotImplementedError(
f"The LLM backend '{args.backend}' is not supported."
Expand Down
24 changes: 15 additions & 9 deletions llmpeg/openai_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,16 +8,17 @@
class OpenAILLMInterface(LLMInterface):
"""Implementation of LLMInterface for OpenAI LLM API."""

def __init__(self, model_string: str):
def __init__(self, model_string: str, api_key: str = None):
"""Initialize OpenAI API connection and create message history.

Args:
model_string (str): The specific model to be used in API.
api_key (str, optional): The OpenAI API key. Defaults to None.
"""
self._model_string = model_string

try:
self.client = OpenAI()
self.client = OpenAI(api_key=api_key)
except OpenAIError as e:
print(
f"OpenAI API Unable to initialize, likely due to missing API Key environment Variable: {e}", # noqa: E501
Expand Down Expand Up @@ -66,13 +67,18 @@ def invoke_model(self) -> str:
Returns:
str: The output generated by the language model.
"""
response = self.client.chat.completions.create(
model=self._model_string,
response_format={"type": "json_object"},
messages=self.history,
temperature=0.2,
top_p=0.2,
)
try:
response = self.client.chat.completions.create(
model=self._model_string,
response_format={"type": "json_object"},
messages=self.history,
temperature=0.2,
top_p=0.2,
)

except OpenAIError as e:
print(f"OpenAI API Error\nError code:- {e.code}\nMessage:- {e.body.get('message')}")
exit(1)

# Extract the LLM's decision from the response
raw_json_string = response.choices[0].message.content
Expand Down
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,7 +7,7 @@
packages=find_packages(),
include_package_data=True,
install_requires=[
# Dependencies here, e.g., 'requests'
'openai'
],
entry_points={
"console_scripts": [
Expand Down