diff --git a/ai/go-groq/go.mod b/ai/go-groq/go.mod index 2c6be36..4168d6e 100644 --- a/ai/go-groq/go.mod +++ b/ai/go-groq/go.mod @@ -1,3 +1,3 @@ module go-groq -go 1.22.2 +go 1.22 diff --git a/htmx/README.md b/htmx/README.md new file mode 100644 index 0000000..02cd69a --- /dev/null +++ b/htmx/README.md @@ -0,0 +1,25 @@ +# HTMX and Go +This is a simple example of how to use HTMX and Go. + +![Go + HTMX example](screenshot-tagalog.png) + +Although translation might not be the best example to use with LLMs since +sometimes it has translation errors, this example is just to demonstrate how to create a simple +web application which used htmx and call a Go backend. + +I am using Groq APIs for this example. + +## Architecture +HTML -> form post using HTMX --> Go + +## Files +- go.mod - Optional. I just added it since I am using Goland to debug. +- groq_client.go - Client code which calls the Groq API which need for our LLM requirements +- index.html - This is the entry point of our web app. It contains the htmx client code. +- main.go - This runs the http server and serves index.html and APIs called by our web app +- translation.html - This is the html template we use to show the translated text in the app. + +## How to run +```bash +$ go run . +``` diff --git a/htmx/go.mod b/htmx/go.mod new file mode 100644 index 0000000..ce1bda2 --- /dev/null +++ b/htmx/go.mod @@ -0,0 +1,3 @@ +module htmx + +go 1.22.2 diff --git a/htmx/groq_client.go b/htmx/groq_client.go new file mode 100644 index 0000000..64ff04c --- /dev/null +++ b/htmx/groq_client.go @@ -0,0 +1,162 @@ +package main + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "net/http" +) + +const ( + apiBaseUrl = "https://api.groq.com/openai" + SYSTEM = "system" + USER = "user" + + LLMModelLlama38b = "llama3-8b-8192" + LLMModelLlama370b = "llama3-70b-8192" + LLMModelMixtral8x7b32k = "mixtral-8x7b-32768" + LLMModelGemma7b = "gemma-7b-it" +) + +type GroqClient struct { + ApiKey string +} + +type GroqMessage struct { + Role string `json:"role"` + Content string `json:"content"` +} + +type ChatCompletionRequest struct { + Messages []GroqMessage `json:"messages"` + Model string `json:"model"` + Temperature int `json:"temperature"` + MaxTokens int `json:"max_tokens"` + TopP int `json:"top_p"` + Stream bool `json:"stream"` + Stop interface{} `json:"stop"` +} + +type ChatCompletionResponse struct { + Id string `json:"id"` + Object string `json:"object"` + Created int `json:"created"` + Model string `json:"model"` + Choices []struct { + Index int `json:"index"` + Message struct { + Role string `json:"role"` + Content string `json:"content"` + } `json:"message"` + Logprobs interface{} `json:"logprobs"` + FinishReason string `json:"finish_reason"` + } `json:"choices"` + Usage struct { + PromptTokens int `json:"prompt_tokens"` + PromptTime float64 `json:"prompt_time"` + CompletionTokens int `json:"completion_tokens"` + CompletionTime float64 `json:"completion_time"` + TotalTokens int `json:"total_tokens"` + TotalTime float64 `json:"total_time"` + } `json:"usage"` + SystemFingerprint string `json:"system_fingerprint"` + XGroq struct { + Id string `json:"id"` + } `json:"x_groq"` +} + +func (g *GroqClient) ChatCompletion(llmModel string, systemPrompt string, prompt string) (string, error) { + + llm := llmModel + + if llmModel == "" { + //default to llama8B + llm = LLMModelLlama38b + } + groqMessages := make([]GroqMessage, 0) + + if systemPrompt != "" { + systemMessage := GroqMessage{ + Role: SYSTEM, + Content: systemPrompt, + } + groqMessages = append(groqMessages, systemMessage) + } + + if prompt != "" { + userMessage := GroqMessage{ + Role: USER, + Content: prompt, + } + groqMessages = append(groqMessages, userMessage) + } else { + return "", fmt.Errorf("prompt is required") + } + + chatCompletionRequest := &ChatCompletionRequest{ + Messages: groqMessages, + Model: llm, + Temperature: 0, + MaxTokens: 1024, + TopP: 1, + Stream: false, + Stop: nil, + } + + chatCompletionRequestJson, err := json.Marshal(chatCompletionRequest) + if err != nil { + return "", err + } + + //send http post request + chatCompletionUrl := "/v1/chat/completions" + finalUrl := fmt.Sprintf("%s%s", apiBaseUrl, chatCompletionUrl) + + req, err := http.NewRequest(http.MethodPost, finalUrl, bytes.NewBuffer(chatCompletionRequestJson)) + if err != nil { + return "", err + } + + //set headers + req.Header.Set("Content-Type", "application/json") + req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", g.ApiKey)) + + client := &http.Client{} + resp, err := client.Do(req) + if err != nil { + return "", err + } + + if resp.StatusCode != 200 { + return "", fmt.Errorf("unexpected status code: %d, reason: %s", resp.StatusCode, resp.Status) + } + + defer func(Body io.ReadCloser) { + err = Body.Close() + if err != nil { + fmt.Println("Error:", err) + } + }(resp.Body) + + body, err := io.ReadAll(resp.Body) + if err != nil { + return "", err + } + + chatCompletionResp := &ChatCompletionResponse{} + + err = json.Unmarshal(body, &chatCompletionResp) + if err != nil { + return "", err + } + + var content string + if chatCompletionResp.Choices != nil && len(chatCompletionResp.Choices) > 0 { + content = chatCompletionResp.Choices[0].Message.Content + } else { + return "", fmt.Errorf("no choices") + } + + return content, nil +} diff --git a/htmx/index.html b/htmx/index.html new file mode 100644 index 0000000..9d17177 --- /dev/null +++ b/htmx/index.html @@ -0,0 +1,44 @@ + + + HTMX + Go Example + + + + +
+

HTMX + Go

+

This simple example calls an LLM using Groq Completion API for the translation.

+
+
+ +

+ +

+ +
+
+
+ + diff --git a/htmx/main.go b/htmx/main.go new file mode 100644 index 0000000..18c1030 --- /dev/null +++ b/htmx/main.go @@ -0,0 +1,88 @@ +package main + +import ( + "embed" + "errors" + "fmt" + "html/template" + "net/http" + "os" +) + +//go:embed index.html translation.html +var htmlFiles embed.FS + +func main() { + + // Set up the http handlers + http.HandleFunc("/", handleHome) + http.HandleFunc("/translate", handleTranslate) + + // Start the server + err := http.ListenAndServe(":8080", nil) + if err != nil { + return + } + +} + +func handleHome(w http.ResponseWriter, r *http.Request) { + tmpl := template.Must(template.ParseFS(htmlFiles, "index.html")) + + err := tmpl.Execute(w, nil) + if err != nil { + return + } +} + +func handleTranslate(w http.ResponseWriter, r *http.Request) { + textToTranslate := r.FormValue("textToTranslate") + languageToTranslateTo := r.FormValue("languageToTranslateTo") + translatedText := translateText(textToTranslate, languageToTranslateTo) + + type Translation struct { + TextToTranslate string + TranslatedText string + } + + t := Translation{ + TextToTranslate: textToTranslate, + TranslatedText: translatedText, + } + + tmpl := template.Must(template.ParseFS(htmlFiles, "translation.html")) + err := tmpl.Execute(w, t) + if err != nil { + return + } + +} + +func translateText(textToTranslate string, languageToTranslateTo string) string { + apiKey := os.Getenv("GROQ_API_KEY") + if apiKey == "" { + err := errors.New("GROQ_API_KEY need to be set as an environment variable") + panic(err) + } + + groqClient := &GroqClient{ApiKey: apiKey} + + systemPrompt := "you are a professional language translator." + + "do not answer questions, just translate the text even if it is a question. " + + "only respond with the translated text and never explain the translation. " + + "first thing you do is understand the text you need to translate. " + + "check first the language of the text to translate. " + + "if the text to translate is already in the language to translate to," + + "just mention there is no need for translation." + + "if you are not able to translate just say sorry you are not able to translate." + prompt := fmt.Sprintf("translate this text to %s === text === %s === end text ===", + languageToTranslateTo, textToTranslate) + + translatedText, err := groqClient.ChatCompletion(LLMModelGemma7b, systemPrompt, prompt) + if err != nil { + fmt.Println(err) + return "No translation" + } + + return translatedText +} diff --git a/htmx/screenshot-tagalog.png b/htmx/screenshot-tagalog.png new file mode 100644 index 0000000..8f57850 Binary files /dev/null and b/htmx/screenshot-tagalog.png differ diff --git a/htmx/translation.html b/htmx/translation.html new file mode 100644 index 0000000..17e5ea9 --- /dev/null +++ b/htmx/translation.html @@ -0,0 +1,3 @@ +
+ {{.TranslatedText}} +
\ No newline at end of file