@@ -42,10 +42,10 @@ Before syncing any components, you need to authenticate with your GenGuardX inst
4242### Initialize in Your Code
4343
4444``` python
45- import genguardx
45+ import genguardx as ggx
4646
4747# Initialize connection to your GenGuardX instance
48- genguardx .init(
48+ ggx .init(
4949 api_url = " https://devaisandbox.corridorplatforms.com" , # Change for your instance
5050 api_key = " your-api-key-here" ,
5151)
@@ -68,7 +68,8 @@ Prompts define system instructions, templates, or conversation guidelines.
6868### Decorator Syntax
6969
7070``` python
71- @genguardx.Prompt.declare (
71+ import genguardx as ggx
72+ @ggx.Prompt.declare (
7273 name = ' My Prompt Name' , # Optional: defaults to function name
7374 group = ' My Group' , # Optional: organizational grouping
7475 task_type = ' Question Answering' , # Optional: Classification | Summarization | etc.
@@ -81,7 +82,7 @@ def my_prompt_function(*, cache: dict = {}, prompt: str = "Your prompt text here
8182 return prompt
8283
8384# Sync to platform
84- genguardx .sync(my_prompt_function)
85+ ggx .sync(my_prompt_function)
8586
8687```
8788
@@ -94,7 +95,8 @@ Models wrap LLM API calls with consistent interfaces and cost tracking.
9495### Decorator Syntax
9596
9697``` python
97- @genguardx.Model.declare (
98+ import genguardx as ggx
99+ @ggx.Model.declare (
98100 name = ' My Model Name' , # Optional: defaults to function name
99101 group = ' My Group' , # Optional: organizational grouping
100102 ownership_type = ' Proprietary' , # Optional: Proprietary | Open Source
@@ -109,7 +111,7 @@ def my_model_function(text: str, temperature: float = 0.7, *, cache: dict = {}):
109111 return {" response" : " ..." , " cost" : " ..." }
110112
111113# Sync to platform
112- genguardx .sync(my_model_function)
114+ ggx .sync(my_model_function)
113115
114116```
115117
@@ -123,8 +125,8 @@ RAGs connect to knowledge bases, databases, or document stores to retrieve relev
123125
124126``` python
125127import pathlib
126-
127- @genguardx .Rag.declare (
128+ import genguardx as ggx
129+ @ggx .Rag.declare (
128130 name = ' My RAG System' , # Optional: defaults to function name
129131 group = ' My Group' , # Optional: organizational grouping
130132 knowledge_base_format = ' Relational Database' , # Optional: Vector Database | Graph Database | etc.
@@ -142,7 +144,7 @@ def my_rag_function(
142144 return {" retrieved_data" : " ..." , " query_used" : " ..." }
143145
144146# Sync to platform
145- genguardx .sync(my_rag_function)
147+ ggx .sync(my_rag_function)
146148
147149```
148150
@@ -156,8 +158,8 @@ Pipelines orchestrate multiple components into complete workflows.
156158
157159``` python
158160import typing as t
159-
160- @genguardx .Pipeline.declare (
161+ import genguardx as ggx
162+ @ggx .Pipeline.declare (
161163 name = ' My Pipeline' , # Optional: defaults to function name
162164 group = ' My Group' , # Optional: organizational grouping
163165 usecase_type = ' Question Answering' , # Optional: Summarization | Translation
@@ -179,7 +181,7 @@ def my_pipeline_function(
179181 return {" output" : " ..." , " context" : " ..." }
180182
181183# Sync to platform
182- genguardx .sync(my_pipeline_function)
184+ ggx .sync(my_pipeline_function)
183185
184186```
185187
@@ -192,7 +194,8 @@ Global Functions are reusable utility functions that can be referenced by other
192194### Decorator Syntax
193195
194196``` python
195- @genguardx.GlobalFunction.declare (
197+ import genguardx as ggx
198+ @ggx.GlobalFunction.declare (
196199 name = ' My Utility Function' , # Optional: defaults to function name
197200 group = ' My Group' , # Optional: organizational grouping
198201)
@@ -203,7 +206,7 @@ def my_utility_function(input_text: str, max_length: int = 100, *, cache: dict =
203206 return input_text[:max_length]
204207
205208# Sync to platform
206- genguardx .sync(my_utility_function)
209+ ggx .sync(my_utility_function)
207210
208211```
209212
@@ -221,8 +224,8 @@ Reports generate evaluation dashboards with metrics and visualizations for monit
221224
222225``` python
223226import typing as t
224-
225- @genguardx .Report.declare (
227+ import genguardx as ggx
228+ @ggx .Report.declare (
226229 name = ' My Evaluation Report' , # Optional: defaults to function name
227230 object_types = [' PIPELINE' , ' FOUNDATION_MODEL' ], # Required: list of object types this report evaluates
228231 group = ' My Group' , # Optional: organizational grouping
@@ -241,7 +244,7 @@ def my_evaluation_report(job: t.Any, data: t.Any, *, cache: dict = {}) -> t.Any:
241244 return metrics_dict, processed_data
242245
243246# Sync to platform
244- genguardx .sync(my_evaluation_report)
247+ ggx .sync(my_evaluation_report)
245248
246249```
247250
@@ -303,20 +306,21 @@ When you sync a Pipeline, the system:
303306
304307``` python
305308# Define components
306- @genguardx.Prompt.declare (name = ' System Prompt' )
309+ import genguardx as ggx
310+ @ggx.Prompt.declare (name = ' System Prompt' )
307311def system_prompt (* , cache : dict = {}, prompt : str = " You are a helpful assistant." ):
308312 """ System instruction for the assistant."""
309313 # -- BEGIN DEFINITION --
310314 return prompt
311315
312- @genguardx .Model.declare (name = ' GPT-4' , provider = ' openai' , model = ' gpt-4' , ownership_type = ' Proprietary' , model_type = ' LLM' )
316+ @ggx .Model.declare (name = ' GPT-4' , provider = ' openai' , model = ' gpt-4' , ownership_type = ' Proprietary' , model_type = ' LLM' )
313317def gpt4 (text : str , * , cache : dict = {}):
314318 """ GPT-4 model wrapper."""
315319 # -- BEGIN DEFINITION --
316320 # Implementation
317321 return {" response" : " ..." }
318322
319- @genguardx .Pipeline.declare (name = ' Q&A Pipeline' , pipeline_type = ' Chat based - OpenAI Spec' )
323+ @ggx .Pipeline.declare (name = ' Q&A Pipeline' , pipeline_type = ' Chat based - OpenAI Spec' )
320324def qa_pipeline (
321325 user_message : str ,
322326 history : list[t.TypedDict(" T" , {' role' : str , ' content' : str }, total = False )] = (),
@@ -331,7 +335,7 @@ def qa_pipeline(
331335 return {" output" : response[" response" ]}
332336
333337# Sync only the pipeline - dependencies sync automatically
334- genguardx .sync(qa_pipeline)
338+ ggx .sync(qa_pipeline)
335339
336340```
337341
@@ -345,14 +349,14 @@ You can declare and sync components **without** using them in a pipeline:
345349
346350``` python
347351# Declare a standalone prompt
348- @genguardx .Prompt.declare (name = ' Greeting Prompt' , group = ' Standalone' )
352+ @ggx .Prompt.declare (name = ' Greeting Prompt' , group = ' Standalone' )
349353def greeting_prompt (* , cache : dict = {}, prompt : str = " Hello! How can I help you today?" ):
350354 """ Greeting message prompt."""
351355 # -- BEGIN DEFINITION --
352356 return prompt
353357
354358# Sync it independently
355- genguardx .sync(greeting_prompt)
359+ ggx .sync(greeting_prompt)
356360
357361```
358362
@@ -361,7 +365,7 @@ genguardx.sync(greeting_prompt)
361365Or use components together in a pipeline (they'll sync automatically):
362366
363367``` python
364- @genguardx .Pipeline.declare (name = ' Greeter Bot' , pipeline_type = ' Chat based - OpenAI Spec' )
368+ @ggx .Pipeline.declare (name = ' Greeter Bot' , pipeline_type = ' Chat based - OpenAI Spec' )
365369def greeter_pipeline (
366370 user_message : str ,
367371 history : list[t.TypedDict(" T" , {' role' : str , ' content' : str }, total = False )] = (),
@@ -374,7 +378,7 @@ def greeter_pipeline(
374378 prompt = greeting_prompt() # References the prompt
375379 return {" output" : prompt}
376380
377- genguardx .sync(greeter_pipeline) # Syncs both prompt and pipeline
381+ ggx .sync(greeter_pipeline) # Syncs both prompt and pipeline
378382
379383```
380384
@@ -435,17 +439,17 @@ genguardx.sync(greeter_pipeline) # Syncs both prompt and pipeline
435439## Complete Workflow Example
436440
437441``` python
438- import genguardx
442+ import genguardx as ggx
439443import typing as t
440444
441445# Step 1: Initialize
442- genguardx .init(
446+ ggx .init(
443447 api_url = " https://devaisandbox.corridorplatforms.com" ,
444448 api_key = " your-api-key-here" ,
445449)
446450
447451# Step 2: Declare components
448- @genguardx .Prompt.declare (
452+ @ggx .Prompt.declare (
449453 name = ' FAQ Prompt' ,
450454 group = ' Support' ,
451455 task_type = ' Question Answering' ,
@@ -456,7 +460,7 @@ def faq_prompt(*, cache: dict = {}, prompt: str = "Answer FAQs concisely and pro
456460 # -- BEGIN DEFINITION --
457461 return prompt
458462
459- @genguardx .Model.declare (
463+ @ggx .Model.declare (
460464 name = ' GPT-3.5' ,
461465 group = ' Models' ,
462466 ownership_type = ' Proprietary' ,
@@ -470,7 +474,7 @@ def gpt35(text: str, *, cache: dict = {}):
470474 # Implementation here
471475 return {" response" : " ..." }
472476
473- @genguardx .Pipeline.declare (
477+ @ggx .Pipeline.declare (
474478 name = ' FAQ Bot' ,
475479 group = ' Support' ,
476480 usecase_type = ' Question Answering' ,
@@ -493,7 +497,7 @@ def faq_pipeline(
493497 return {" output" : response[" response" ]}
494498
495499# Step 3: Sync (syncs all dependencies automatically)
496- genguardx .sync(faq_pipeline)
500+ ggx .sync(faq_pipeline)
497501
498502```
499503
@@ -548,7 +552,8 @@ genguardx.sync(faq_pipeline)
548552** Solution** : Add the anchor comment before your function's core logic:
549553
550554``` python
551- @genguardx.Prompt.declare (name = ' My Prompt' )
555+ import genguardx as ggx
556+ @ggx.Prompt.declare (name = ' My Prompt' )
552557def my_prompt (* , cache : dict = {}, prompt : str = " Hello" ):
553558 """ Docstring here"""
554559 # -- BEGIN DEFINITION -- # <-- Add this line
@@ -567,12 +572,13 @@ def my_prompt(*, cache: dict = {}, prompt: str = "Hello"):
567572** Solution** : Add the appropriate decorator to the referenced function:
568573
569574``` python
575+ import genguardx as ggx
570576# Before (causes warning)
571577def helper_function ():
572578 return " result"
573579
574580# After (no warning)
575- @genguardx .Prompt.declare (name = ' Helper' )
581+ @ggx .Prompt.declare (name = ' Helper' )
576582def helper_function (* , cache : dict = {}, prompt : str = " result" ):
577583 # -- BEGIN DEFINITION --
578584 return prompt
@@ -610,7 +616,8 @@ def helper_function(*, cache: dict = {}, prompt: str = "result"):
610616** Solution** : Add ` prompt: str = "your template" ` as a keyword-only parameter:
611617
612618``` python
613- @genguardx.Prompt.declare (name = ' My Prompt' )
619+ import genguardx as ggx
620+ @ggx.Prompt.declare (name = ' My Prompt' )
614621def my_prompt (* , cache : dict = {}, prompt : str = " Your prompt text here" ):
615622 """ Docstring"""
616623 # -- BEGIN DEFINITION --
0 commit comments