diff --git a/.claude-plugin/marketplace.json b/.claude-plugin/marketplace.json index a82ab1cc..b279f696 100644 --- a/.claude-plugin/marketplace.json +++ b/.claude-plugin/marketplace.json @@ -54,11 +54,6 @@ "source": "./product-management", "description": "Write feature specs, plan roadmaps, and synthesize user research faster. Keep stakeholders updated and stay ahead of the competitive landscape." }, - { - "name": "bio-research", - "source": "./bio-research", - "description": "Connect to preclinical research tools and databases (literature search, genomics analysis, target prioritization) to accelerate early-stage life sciences R&D" - }, { "name": "slack-by-salesforce", "source": "./partner-built/slack", diff --git a/README.md b/README.md index c902fcb2..255b5d26 100644 --- a/README.md +++ b/README.md @@ -17,11 +17,11 @@ We're open-sourcing 11 plugins built and inspired by our own work: | **[productivity](./productivity)** | Manage tasks, calendars, daily workflows, and personal context so you spend less time repeating yourself. | Slack, Notion, Asana, Linear, Jira, Monday, ClickUp, Microsoft 365 | | **[sales](./sales)** | Research prospects, prep for calls, review your pipeline, draft outreach, and build competitive battlecards. | Slack, HubSpot, Close, Clay, ZoomInfo, Notion, Jira, Fireflies, Microsoft 365 | | **[customer-support](./customer-support)** | Triage tickets, draft responses, package escalations, research customer context, and turn resolved issues into knowledge base articles. | Slack, Intercom, HubSpot, Guru, Jira, Notion, Microsoft 365 | -| **[product-management](./product-management)** | Write specs, plan roadmaps, synthesize user research, keep stakeholders updated, and track the competitive landscape. | Slack, Linear, Asana, Monday, ClickUp, Jira, Notion, Figma, Amplitude, Pendo, Intercom, Fireflies | -| **[marketing](./marketing)** | Draft content, plan campaigns, enforce brand voice, brief on competitors, and report on performance across channels. | Slack, Canva, Figma, HubSpot, Amplitude, Notion, Ahrefs, SimilarWeb, Klaviyo | +| **[product-management](./product-management)** | Write specs, plan roadmaps, synthesize user research, keep stakeholders updated, and track the competitive landscape. | Slack, Linear, Asana, Monday, ClickUp, Jira, Notion, Figma, Amplitude, Pendo, Mixpanel, Intercom, Fireflies | +| **[marketing](./marketing)** | Draft content, plan campaigns, enforce brand voice, brief on competitors, and report on performance across channels. | Slack, Canva, Figma, HubSpot, Amplitude, Mixpanel, Notion, Ahrefs, SimilarWeb, Klaviyo | | **[legal](./legal)** | Review contracts, triage NDAs, navigate compliance, assess risk, prep for meetings, and draft templated responses. | Slack, Box, Egnyte, Jira, Microsoft 365 | | **[finance](./finance)** | Prep journal entries, reconcile accounts, generate financial statements, analyze variances, manage close, and support audits. | Snowflake, Databricks, BigQuery, Slack, Microsoft 365 | -| **[data](./data)** | Query, visualize, and interpret datasets — write SQL, run statistical analysis, build dashboards, and validate your work before sharing. | Snowflake, Databricks, BigQuery, Hex, Amplitude, Jira | +| **[data](./data)** | Query, visualize, and interpret datasets — write SQL, run statistical analysis, build dashboards, and validate your work before sharing. | Snowflake, Databricks, BigQuery, Definite, Hex, Amplitude, Mixpanel, Jira | | **[enterprise-search](./enterprise-search)** | Find anything across email, chat, docs, and wikis — one query across all your company's tools. | Slack, Notion, Guru, Jira, Asana, Microsoft 365 | | **[bio-research](./bio-research)** | Connect to preclinical research tools and databases (literature search, genomics analysis, target prioritization) to accelerate early-stage life sciences R&D. | PubMed, BioRender, bioRxiv, ClinicalTrials.gov, ChEMBL, Synapse, Wiley, Owkin, Open Targets, Benchling | | **[cowork-plugin-management](./cowork-plugin-management)** | Create new plugins or customize existing ones for your organization's specific tools and workflows. | — | diff --git a/bio-research/.claude-plugin/plugin.json b/bio-research/.claude-plugin/plugin.json index 0783cd54..d494f8d1 100644 --- a/bio-research/.claude-plugin/plugin.json +++ b/bio-research/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "bio-research", - "version": "1.0.0", + "version": "1.1.0", "description": "Connect to preclinical research tools and databases (literature search, genomics analysis, target prioritization) to accelerate early-stage life sciences R&D", "author": { "name": "Anthropic" diff --git a/bio-research/skills/nextflow-development/scripts/check_environment.py b/bio-research/skills/nextflow-development/scripts/check_environment.py index 2e505d5b..efa5ac6e 100644 --- a/bio-research/skills/nextflow-development/scripts/check_environment.py +++ b/bio-research/skills/nextflow-development/scripts/check_environment.py @@ -260,7 +260,7 @@ def check_resources() -> CheckResult: ) if result.returncode == 0: mem_gb = int(result.stdout.strip()) / (1024**3) - except: + except Exception: pass # Disk space (current directory) @@ -268,7 +268,7 @@ def check_resources() -> CheckResult: try: statvfs = os.statvfs('.') disk_gb = (statvfs.f_frsize * statvfs.f_bavail) / (1024**3) - except: + except Exception: pass details = f"CPUs: {cpu_count}, Memory: {mem_gb:.1f}GB, Disk: {disk_gb:.1f}GB available" @@ -319,7 +319,7 @@ def check_network() -> CheckResult: req = urllib.request.Request("https://hub.docker.com", headers=headers) urllib.request.urlopen(req, timeout=10) docker_hub_ok = True - except: + except Exception: docker_hub_ok = False # Try nf-core (for pipeline downloads) @@ -327,7 +327,7 @@ def check_network() -> CheckResult: req = urllib.request.Request("https://nf-co.re", headers=headers) urllib.request.urlopen(req, timeout=10) nfcore_ok = True - except: + except Exception: nfcore_ok = False if docker_hub_ok and nfcore_ok: diff --git a/bio-research/skills/nextflow-development/scripts/detect_data_type.py b/bio-research/skills/nextflow-development/scripts/detect_data_type.py index 870b0643..e3acd711 100644 --- a/bio-research/skills/nextflow-development/scripts/detect_data_type.py +++ b/bio-research/skills/nextflow-development/scripts/detect_data_type.py @@ -71,7 +71,7 @@ def scan_directory(directory: str) -> Dict: try: size = os.path.getsize(os.path.join(root, filename)) info['total_size_gb'] += size / (1024**3) - except: + except Exception: pass return info diff --git a/bio-research/commands/start.md b/bio-research/skills/start/SKILL.md similarity index 91% rename from bio-research/commands/start.md rename to bio-research/skills/start/SKILL.md index d180433a..db816652 100644 --- a/bio-research/commands/start.md +++ b/bio-research/skills/start/SKILL.md @@ -1,10 +1,11 @@ --- -description: Set up your bio-research environment and explore available tools +name: start +description: Set up your bio-research environment and explore available tools. Use when first getting oriented with the plugin, checking which literature, drug-discovery, or visualization MCP servers are connected, or surveying available analysis skills before starting a new project. --- # Bio-Research Start -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). You are helping a biological researcher get oriented with the bio-research plugin. Walk through the following steps in order. diff --git a/cowork-plugin-management/skills/create-cowork-plugin/SKILL.md b/cowork-plugin-management/skills/create-cowork-plugin/SKILL.md index 51887597..ebd86a56 100644 --- a/cowork-plugin-management/skills/create-cowork-plugin/SKILL.md +++ b/cowork-plugin-management/skills/create-cowork-plugin/SKILL.md @@ -234,7 +234,14 @@ If the user says "whatever you think is best," provide specific recommendations 1. Summarize what was created — list each component and its purpose 2. Ask if the user wants any adjustments -3. Run `claude plugin validate `; fix any errors and warnings +3. Run `claude plugin validate ` to check the plugin structure. If this command is unavailable (e.g., when running inside Cowork), verify the structure manually: + - `.claude-plugin/plugin.json` exists and contains valid JSON with at least a `name` field + - The `name` field is kebab-case (lowercase letters, numbers, and hyphens only) + - Any component directories referenced by the plugin (`commands/`, `skills/`, `agents/`, `hooks/`) actually exist and contain files in the expected formats — `.md` for commands/skills/agents, `.json` for hooks + - Each skill subdirectory contains a `SKILL.md` + - Report what passed and what didn't, the same way the CLI validator would + + Fix any errors before proceeding. 4. Package as a `.plugin` file: ```bash diff --git a/customer-support/.claude-plugin/plugin.json b/customer-support/.claude-plugin/plugin.json index 63a69d46..7a3ff4d7 100644 --- a/customer-support/.claude-plugin/plugin.json +++ b/customer-support/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "customer-support", - "version": "1.1.0", + "version": "1.2.0", "description": "Triage tickets, draft responses, escalate issues, and build your knowledge base. Research customer context and turn resolved issues into self-service content.", "author": { "name": "Anthropic" diff --git a/customer-support/commands/draft-response.md b/customer-support/commands/draft-response.md deleted file mode 100644 index 349fcf23..00000000 --- a/customer-support/commands/draft-response.md +++ /dev/null @@ -1,154 +0,0 @@ ---- -description: Draft a professional customer-facing response tailored to the situation and relationship -argument-hint: "" ---- - -# Draft Response - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Draft a professional, customer-facing response tailored to the situation, customer relationship, and communication context. - -## Usage - -``` -/draft-response -``` - -Examples: -- `/draft-response Acme Corp is asking when the new dashboard feature will ship` -- `/draft-response Customer escalation — their integration has been down for 2 days` -- `/draft-response Responding to a feature request we won't be building` -- `/draft-response Customer hit a billing error and wants a resolution ASAP` - -## Workflow - -### 1. Understand the Context - -Parse the user's input to determine: - -- **Customer**: Who is the communication for? Look up account context if available. -- **Situation type**: Question, issue, escalation, announcement, negotiation, bad news, good news, follow-up -- **Urgency**: Is this time-sensitive? How long has the customer been waiting? -- **Channel**: Email, support ticket, chat, or other (adjust formality accordingly) -- **Relationship stage**: New customer, established, frustrated/escalated -- **Stakeholder level**: End user, manager, executive, technical, business - -### 2. Research Context - -Gather relevant background from available sources: - -**~~email:** -- Previous correspondence with this customer on this topic -- Any commitments or timelines previously shared -- Tone and style of the existing thread - -**~~chat:** -- Internal discussions about this customer or topic -- Any guidance from product, engineering, or leadership -- Similar situations and how they were handled - -**~~CRM (if connected):** -- Account details and plan level -- Contact information and key stakeholders -- Previous escalations or sensitive issues - -**~~support platform (if connected):** -- Related tickets and their resolution -- Known issues or workarounds -- SLA status and response time commitments - -**~~knowledge base:** -- Official documentation or help articles to reference -- Product roadmap information (if shareable) -- Policy or process documentation - -### 3. Generate the Draft - -Produce a response tailored to the situation: - -``` -## Draft Response - -**To:** [Customer contact name] -**Re:** [Subject/topic] -**Channel:** [Email / Ticket / Chat] -**Tone:** [Empathetic / Professional / Technical / Celebratory / Candid] - ---- - -[Draft response text] - ---- - -### Notes for You (internal — do not send) -- **Why this approach:** [Rationale for tone and content choices] -- **Things to verify:** [Any facts or commitments to confirm before sending] -- **Risk factors:** [Anything sensitive about this response] -- **Follow-up needed:** [Actions to take after sending] -- **Escalation note:** [If this should be reviewed by someone else first] -``` - -### 4. Situation-Specific Approaches - -**Answering a product question:** -- Lead with the direct answer -- Provide relevant documentation links -- Offer to connect them with the right resource if needed -- If you don't know the answer: say so honestly, commit to finding out, give a timeline - -**Responding to an issue or bug:** -- Acknowledge the impact on their work -- State what you know about the issue and its status -- Provide workaround if available -- Set expectations for resolution timeline -- Commit to updates at regular intervals - -**Handling an escalation:** -- Acknowledge the severity and their frustration -- Take ownership (no deflecting or excuse-making) -- Provide a clear action plan with timeline -- Identify the person accountable for resolution -- Offer a meeting or call if appropriate for the severity - -**Delivering bad news (feature sunset, delay, can't-fix):** -- Be direct — don't bury the news -- Explain the reasoning honestly -- Acknowledge the impact on them specifically -- Offer alternatives or mitigation -- Provide a clear path forward - -**Sharing good news (feature launch, milestone, recognition):** -- Lead with the positive outcome -- Connect it to their specific goals or use case -- Suggest next steps to capitalize on the good news -- Express genuine enthusiasm - -**Declining a request (feature request, discount, exception):** -- Acknowledge the request and its reasoning -- Be honest about the decision -- Explain the why without being dismissive -- Offer alternatives when possible -- Leave the door open for future conversation - -### 5. Response Quality Checks - -Before presenting the draft, verify: - -- [ ] Tone matches the situation and relationship -- [ ] No commitments beyond what's authorized -- [ ] No product roadmap details that shouldn't be shared externally -- [ ] Accurate references to previous conversations -- [ ] Clear next steps and ownership -- [ ] Appropriate for the stakeholder level (not too technical for executives, not too vague for engineers) -- [ ] Length is appropriate for the channel (shorter for chat, fuller for email) - -### 6. Offer Iterations - -After presenting the draft: -- "Want me to adjust the tone? (more formal, more casual, more empathetic, more direct)" -- "Should I add or remove any specific points?" -- "Want me to make this shorter/longer?" -- "Should I draft a version for a different stakeholder?" -- "Want me to draft the internal escalation note as well?" -- "Should I prepare a follow-up message to send after [X days] if no response?" diff --git a/customer-support/commands/escalate.md b/customer-support/commands/escalate.md deleted file mode 100644 index 3595b23e..00000000 --- a/customer-support/commands/escalate.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -description: Package an escalation for engineering, product, or leadership with full context -argument-hint: " [customer name]" ---- - -# Escalate - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Package a support issue into a structured escalation brief for engineering, product, or leadership. Gathers context, structures reproduction steps, assesses business impact, and identifies the right escalation target. - -## Usage - -``` -/escalate [customer name or account] -``` - -Examples: -- `/escalate API returning 500 errors intermittently for Acme Corp` -- `/escalate Data export is missing rows — 3 customers reported this week` -- `/escalate SSO login loop affecting all Enterprise customers` -- `/escalate Customer threatening to churn over missing audit log feature` - -## Workflow - -### 1. Understand the Issue - -Parse the input and determine: - -- **What's broken or needed**: The core technical or product issue -- **Who's affected**: Specific customer(s), segment, or all users -- **How long**: When did this start? How long has the customer been waiting? -- **What's been tried**: Any troubleshooting or workarounds attempted -- **Why escalate now**: What makes this need attention beyond normal support - -Use the "When to Escalate vs. Handle in Support" criteria from the **escalation** skill to confirm this warrants escalation. - -### 2. Gather Context - -Pull together relevant information from available sources: - -- **~~support platform**: Related tickets, timeline of communications, previous troubleshooting -- **~~CRM** (if connected): Account details, key contacts, previous escalations -- **~~chat**: Internal discussions about this issue, similar reports from other customers -- **~~project tracker** (if connected): Related bug reports or feature requests, engineering status -- **~~knowledge base**: Known issues or workarounds, relevant documentation - -### 3. Assess Business Impact - -Using the impact dimensions from the **escalation** skill, quantify: - -- **Breadth**: How many customers/users affected? Growing? -- **Depth**: Blocked vs. inconvenienced? -- **Duration**: How long has this been going on? -- **Revenue**: ARR at risk? Pending deals affected? -- **Time pressure**: Hard deadline? - -### 4. Determine Escalation Target - -Using the escalation tiers from the **escalation** skill, identify the right target: L2 Support, Engineering, Product, Security, or Leadership. - -### 5. Structure Reproduction Steps (for bugs) - -If the issue is a bug, follow the reproduction step best practices from the **escalation** skill to document clear repro steps with environment details and evidence. - -### 6. Generate Escalation Brief - -``` -## ESCALATION: [One-line summary] - -**Severity:** [Critical / High / Medium] -**Target team:** [Engineering / Product / Security / Leadership] -**Reported by:** [Your name/team] -**Date:** [Today's date] - -### Impact -- **Customers affected:** [Who and how many] -- **Workflow impact:** [What they can't do] -- **Revenue at risk:** [If applicable] -- **Time in queue:** [How long this has been an issue] - -### Issue Description -[Clear, concise description of the problem — 3-5 sentences] - -### What's Been Tried -1. [Troubleshooting step and result] -2. [Troubleshooting step and result] -3. [Troubleshooting step and result] - -### Reproduction Steps -[If applicable — follow the format from the escalation skill] - -### Customer Communication -- **Last update to customer:** [Date and what was communicated] -- **Customer expectation:** [What they're expecting and by when] -- **Escalation risk:** [Will they escalate further if not resolved by X?] - -### What's Needed -- [Specific ask — "investigate root cause", "prioritize fix", - "make product decision on X", "approve exception for Y"] -- **Deadline:** [When this needs resolution or an update] - -### Supporting Context -- [Related tickets or links] -- [Internal discussion threads] -- [Documentation or logs] -``` - -### 7. Offer Next Steps - -After generating the escalation: -- "Want me to post this in a ~~chat channel for the target team?" -- "Should I update the customer with an interim response?" -- "Want me to set a follow-up reminder to check on this?" -- "Should I draft a customer-facing update with the current status?" diff --git a/customer-support/commands/kb-article.md b/customer-support/commands/kb-article.md deleted file mode 100644 index 6b62d1df..00000000 --- a/customer-support/commands/kb-article.md +++ /dev/null @@ -1,83 +0,0 @@ ---- -description: Draft a knowledge base article from a resolved issue or common question -argument-hint: "" ---- - -# KB Article - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Draft a publish-ready knowledge base article from a resolved support issue, common question, or documented workaround. Structures the content for searchability and self-service. - -## Usage - -``` -/kb-article -``` - -Examples: -- `/kb-article How to configure SSO with Okta — resolved this for 3 customers last month` -- `/kb-article Ticket #4521 — customer couldn't export data over 10k rows` -- `/kb-article Common question: how to set up webhook notifications` -- `/kb-article Known issue: dashboard charts not loading on Safari 16` - -## Workflow - -### 1. Understand the Source Material - -Parse the input to identify: - -- **What was the problem?** The original issue, question, or error -- **What was the solution?** The resolution, workaround, or answer -- **Who does this affect?** User type, plan level, or configuration -- **How common is this?** One-off or recurring issue -- **What article type fits best?** Use the article types from the **knowledge-management** skill (how-to, troubleshooting, FAQ, known issue, reference) - -If a ticket reference is provided, look up the full context: - -- **~~support platform**: Pull the ticket thread, resolution, and any internal notes -- **~~knowledge base**: Check if a similar article already exists (update vs. create new) -- **~~project tracker**: Check if there's a related bug or feature request - -### 2. Draft the Article - -Using the article structure and formatting standards from the **knowledge-management** skill: - -- Follow the template for the chosen article type (how-to, troubleshooting, FAQ, known issue, or reference) -- Apply the searchability best practices: customer-language title, plain-language opening sentence, exact error messages, common synonyms -- Keep it scannable: headers, numbered steps, short paragraphs - -### 3. Generate the Article - -Present the draft with metadata: - -``` -## KB Article Draft - -**Title:** [Article title] -**Type:** [How-to / Troubleshooting / FAQ / Known Issue / Reference] -**Category:** [Product area or topic] -**Tags:** [Searchable tags] -**Audience:** [All users / Admins / Developers / Specific plan] - ---- - -[Full article content — using the appropriate template -from the knowledge-management skill] - ---- - -### Publishing Notes -- **Source:** [Ticket #, customer conversation, or internal discussion] -- **Existing articles to update:** [If this overlaps with existing content] -- **Review needed from:** [SME or team if technical accuracy needs verification] -- **Suggested review date:** [When to revisit for accuracy] -``` - -### 4. Offer Next Steps - -After generating the article: -- "Want me to check if a similar article already exists in your ~~knowledge base?" -- "Should I adjust the technical depth for a different audience?" -- "Want me to draft a companion article (e.g., a how-to to go with this troubleshooting guide)?" -- "Should I create an internal-only version with additional technical detail?" diff --git a/customer-support/commands/research.md b/customer-support/commands/research.md deleted file mode 100644 index 0c990e5a..00000000 --- a/customer-support/commands/research.md +++ /dev/null @@ -1,116 +0,0 @@ ---- -description: Multi-source research on a customer question or topic with source attribution -argument-hint: "" ---- - -# Research - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Multi-source research on a customer question, product topic, or account-related inquiry. Synthesizes findings from all available sources with clear attribution. - -## Usage - -``` -/research -``` - -## Workflow - -### 1. Parse the Research Request - -Identify what type of research is needed: -- **Customer question**: Something a customer has asked that needs an answer (e.g., "Does our product support SSO with Okta?") -- **Issue investigation**: Background on a reported problem (e.g., "Has this bug been reported before? What's the known workaround?") -- **Account context**: History with a specific customer (e.g., "What did we tell Acme Corp last time they asked about this?") -- **Topic research**: General topic relevant to support work (e.g., "Best practices for webhook retry logic") - -### 2. Search Available Sources - -Search in priority order, adapting to what is connected: - -**Tier 1 — Internal Documentation (highest confidence):** -- ~~knowledge base (if connected): product docs, runbooks, FAQs -- ~~cloud storage: internal documents, specs, guides, past research -- ~~CRM notes: previous answers to similar questions, account context - -**Tier 2 — Team Communications:** -- ~~chat: search for the topic in relevant channels; check if teammates have discussed or answered this before -- ~~email: search for previous correspondence on this topic -- ~~support platform (if connected): check if this has been asked/resolved before - -**Tier 3 — External Sources:** -- Web search: official documentation, blog posts, community forums -- Public knowledge bases, help centers, release notes - -### 3. Synthesize Findings - -Compile results into a structured research brief: - -``` -## Research: [Question/Topic] - -### Answer -[Clear, direct answer to the question — lead with the bottom line] - -**Confidence:** [High / Medium / Low] -[Explain what drives the confidence level] - -### Key Findings - -**From [Source 1]:** -- [Finding with specific detail] -- [Finding with specific detail] - -**From [Source 2]:** -- [Finding with specific detail] - -### Context & Nuance -[Any caveats, edge cases, or additional context that matters] - -### Sources -1. [Source name/link] — [what it contributed] -2. [Source name/link] — [what it contributed] -3. [Source name/link] — [what it contributed] - -### Gaps & Unknowns -- [What couldn't be confirmed] -- [What might need verification from a subject matter expert] - -### Recommended Next Steps -- [Action if the answer needs to go to a customer] -- [Action if further research is needed] -- [Who to consult for verification if needed] -``` - -### 4. Handle Insufficient Sources - -If no connected sources yield results: - -- Perform web research on the topic -- Ask the user for internal context: - - "I couldn't find this in connected sources. Do you have internal docs or knowledge base articles about this?" - - "Has your team discussed this topic before? Any ~~chat channels I should check?" - - "Is there a subject matter expert who would know the answer?" -- Be transparent about limitations: - - "This answer is based on web research only — please verify against your internal documentation before sharing with the customer." - - "I found a possible answer but couldn't confirm it from an authoritative internal source." - -### 5. Customer-Facing Considerations - -If the research is to answer a customer question: - -- Flag if the answer involves product roadmap, pricing, legal, or security topics that may need review -- Note if the answer differs from what may have been communicated previously -- Suggest appropriate caveats for the customer-facing response -- Offer to draft the customer response: "Want me to draft a response to the customer based on these findings?" - -### 6. Knowledge Capture - -After research is complete, suggest capturing the knowledge: - -- "Should I save these findings to your knowledge base for future reference?" -- "Want me to create a FAQ entry based on this research?" -- "This might be worth documenting — should I draft a runbook entry?" - -This helps build institutional knowledge and reduces duplicate research effort across the team. diff --git a/customer-support/commands/triage.md b/customer-support/commands/triage.md deleted file mode 100644 index 5b786f36..00000000 --- a/customer-support/commands/triage.md +++ /dev/null @@ -1,97 +0,0 @@ ---- -description: Triage and prioritize a support ticket or customer issue -argument-hint: "" ---- - -# Triage - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Categorize, prioritize, and route an incoming support ticket or customer issue. Produces a structured triage assessment with a suggested initial response. - -## Usage - -``` -/triage -``` - -Examples: -- `/triage Customer says their dashboard has been showing a blank page since this morning` -- `/triage "I was charged twice for my subscription this month"` -- `/triage User can't connect their SSO — getting a 403 error on the callback URL` -- `/triage Feature request: they want to export reports as PDF` - -## Workflow - -### 1. Parse the Issue - -Read the input and extract: - -- **Core problem**: What is the customer actually experiencing? -- **Symptoms**: What specific behavior or error are they seeing? -- **Customer context**: Who is this? Any account details, plan level, or history available? -- **Urgency signals**: Are they blocked? Is this production? How many users affected? -- **Emotional state**: Frustrated, confused, matter-of-fact, escalating? - -### 2. Categorize and Prioritize - -Using the category taxonomy and priority framework from the **ticket-triage** skill: - -- Assign a **primary category** (bug, how-to, feature request, billing, account, integration, security, data, performance) and an optional secondary category -- Assign a **priority** (P1–P4) based on impact and urgency -- Identify the **product area** the issue maps to - -### 3. Check for Duplicates and Known Issues - -Before routing, check available sources: - -- **~~support platform**: Search for similar open or recently resolved tickets -- **~~knowledge base**: Check for known issues or existing documentation -- **~~project tracker**: Check if there's an existing bug report or feature request - -### 4. Determine Routing - -Using the routing rules from the **ticket-triage** skill, recommend which team or queue should handle this based on category and complexity. - -### 5. Generate Triage Output - -``` -## Triage: [One-line issue summary] - -**Category:** [Primary] / [Secondary if applicable] -**Priority:** [P1-P4] — [Brief justification] -**Product area:** [Area/team] - -### Issue Summary -[2-3 sentence summary of what the customer is experiencing] - -### Key Details -- **Customer:** [Name/account if known] -- **Impact:** [Who and what is affected] -- **Workaround:** [Available / Not available / Unknown] -- **Related tickets:** [Links to similar issues if found] -- **Known issue:** [Yes — link / No / Checking] - -### Routing Recommendation -**Route to:** [Team or queue] -**Why:** [Brief reasoning] - -### Suggested Initial Response -[Draft first response to the customer — acknowledge the issue, -set expectations, provide workaround if available. -Use the auto-response templates from the ticket-triage skill -as a starting point.] - -### Internal Notes -- [Any additional context for the agent picking this up] -- [Reproduction hints if it's a bug] -- [Escalation triggers to watch for] -``` - -### 6. Offer Next Steps - -After presenting the triage: -- "Want me to draft a full response to the customer?" -- "Should I search for more context on this issue?" -- "Want me to check if this is a known bug in the tracker?" -- "Should I escalate this? I can package it with /escalate." diff --git a/customer-support/skills/escalation/SKILL.md b/customer-support/skills/customer-escalation/SKILL.md similarity index 60% rename from customer-support/skills/escalation/SKILL.md rename to customer-support/skills/customer-escalation/SKILL.md index 0cf4b76e..069eae32 100644 --- a/customer-support/skills/escalation/SKILL.md +++ b/customer-support/skills/customer-escalation/SKILL.md @@ -1,11 +1,127 @@ --- -name: escalation -description: Structure and package support escalations for engineering, product, or leadership with full context, reproduction steps, and business impact. Use when an issue needs to go beyond support, when writing an escalation brief, or when assessing whether an issue warrants escalation. +name: customer-escalation +description: Package an escalation for engineering, product, or leadership with full context. Use when a bug needs engineering attention beyond normal support, multiple customers report the same issue, a customer is threatening to churn, or an issue has sat unresolved past its SLA. +argument-hint: " [customer name]" --- -# Escalation Skill +# /customer-escalation -You are an expert at determining when and how to escalate support issues. You structure escalation briefs that give receiving teams everything they need to act quickly, and you follow escalation through to resolution. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Package a support issue into a structured escalation brief for engineering, product, or leadership. Gathers context, structures reproduction steps, assesses business impact, and identifies the right escalation target. + +## Usage + +``` +/customer-escalation [customer name or account] +``` + +Examples: +- `/customer-escalation API returning 500 errors intermittently for Acme Corp` +- `/customer-escalation Data export is missing rows — 3 customers reported this week` +- `/customer-escalation SSO login loop affecting all Enterprise customers` +- `/customer-escalation Customer threatening to churn over missing audit log feature` + +## Workflow + +### 1. Understand the Issue + +Parse the input and determine: + +- **What's broken or needed**: The core technical or product issue +- **Who's affected**: Specific customer(s), segment, or all users +- **How long**: When did this start? How long has the customer been waiting? +- **What's been tried**: Any troubleshooting or workarounds attempted +- **Why escalate now**: What makes this need attention beyond normal support + +Use the "When to Escalate vs. Handle in Support" criteria below to confirm this warrants escalation. + +### 2. Gather Context + +Pull together relevant information from available sources: + +- **~~support platform**: Related tickets, timeline of communications, previous troubleshooting +- **~~CRM** (if connected): Account details, key contacts, previous escalations +- **~~chat**: Internal discussions about this issue, similar reports from other customers +- **~~project tracker** (if connected): Related bug reports or feature requests, engineering status +- **~~knowledge base**: Known issues or workarounds, relevant documentation + +### 3. Assess Business Impact + +Using the impact dimensions below, quantify: + +- **Breadth**: How many customers/users affected? Growing? +- **Depth**: Blocked vs. inconvenienced? +- **Duration**: How long has this been going on? +- **Revenue**: ARR at risk? Pending deals affected? +- **Time pressure**: Hard deadline? + +### 4. Determine Escalation Target + +Using the escalation tiers below, identify the right target: L2 Support, Engineering, Product, Security, or Leadership. + +### 5. Structure Reproduction Steps (for bugs) + +If the issue is a bug, follow the reproduction step best practices below to document clear repro steps with environment details and evidence. + +### 6. Generate Escalation Brief + +``` +## ESCALATION: [One-line summary] + +**Severity:** [Critical / High / Medium] +**Target team:** [Engineering / Product / Security / Leadership] +**Reported by:** [Your name/team] +**Date:** [Today's date] + +### Impact +- **Customers affected:** [Who and how many] +- **Workflow impact:** [What they can't do] +- **Revenue at risk:** [If applicable] +- **Time in queue:** [How long this has been an issue] + +### Issue Description +[Clear, concise description of the problem — 3-5 sentences] + +### What's Been Tried +1. [Troubleshooting step and result] +2. [Troubleshooting step and result] +3. [Troubleshooting step and result] + +### Reproduction Steps +[If applicable — follow the format below] +1. [Step] +2. [Step] +3. [Step] +Expected: [X] +Actual: [Y] +Environment: [Details] + +### Customer Communication +- **Last update to customer:** [Date and what was communicated] +- **Customer expectation:** [What they're expecting and by when] +- **Escalation risk:** [Will they escalate further if not resolved by X?] + +### What's Needed +- [Specific ask — "investigate root cause", "prioritize fix", + "make product decision on X", "approve exception for Y"] +- **Deadline:** [When this needs resolution or an update] + +### Supporting Context +- [Related tickets or links] +- [Internal discussion threads] +- [Documentation or logs] +``` + +### 7. Offer Next Steps + +After generating the escalation: +- "Want me to post this in a ~~chat channel for the target team?" +- "Should I update the customer with an interim response?" +- "Want me to set a follow-up reminder to check on this?" +- "Should I draft a customer-facing update with the current status?" + +--- ## When to Escalate vs. Handle in Support @@ -57,53 +173,6 @@ You are an expert at determining when and how to escalate support issues. You st **When:** High-revenue customer threatening churn, SLA breach on critical account, cross-functional decision needed, exception to policy required, PR or legal risk **What to include:** Full business context, revenue at risk, what's been tried, specific decision or action needed, deadline -## Structured Escalation Format - -Every escalation should follow this structure: - -``` -ESCALATION: [One-line summary] -Severity: [Critical / High / Medium] -Target: [Engineering / Product / Security / Leadership] - -IMPACT -- Customers affected: [Number and names if relevant] -- Workflow impact: [What's broken for them] -- Revenue at risk: [If applicable] -- SLA status: [Within SLA / At risk / Breached] - -ISSUE DESCRIPTION -[3-5 sentences: what's happening, when it started, -how it manifests, scope of impact] - -REPRODUCTION STEPS (for bugs) -1. [Step] -2. [Step] -3. [Step] -Expected: [X] -Actual: [Y] -Environment: [Details] - -WHAT'S BEEN TRIED -1. [Action] → [Result] -2. [Action] → [Result] -3. [Action] → [Result] - -CUSTOMER COMMUNICATION -- Last update: [Date — what was said] -- Customer expectation: [What they expect and by when] -- Escalation risk: [Will they escalate further?] - -WHAT'S NEEDED -- [Specific ask: investigate, fix, decide, approve] -- Deadline: [Date/time] - -SUPPORTING CONTEXT -- [Ticket links] -- [Internal threads] -- [Logs or screenshots] -``` - ## Business Impact Assessment When escalating, quantify impact where possible: @@ -168,9 +237,7 @@ When de-escalating: - Inform the customer of the resolution - Document what was learned for future reference -## Using This Skill - -When handling escalations: +## Escalation Best Practices 1. Always quantify impact — vague escalations get deprioritized 2. Include reproduction steps for bugs — this is the #1 thing engineering needs diff --git a/customer-support/skills/customer-research/SKILL.md b/customer-support/skills/customer-research/SKILL.md index 50f1e087..eaa5e9b9 100644 --- a/customer-support/skills/customer-research/SKILL.md +++ b/customer-support/skills/customer-research/SKILL.md @@ -1,93 +1,151 @@ --- name: customer-research -description: Research customer questions by searching across documentation, knowledge bases, and connected sources, then synthesize a confidence-scored answer. Use when a customer asks a question you need to investigate, when building background on a customer situation, or when you need account context. +description: Multi-source research on a customer question or topic with source attribution. Use when a customer asks something you need to look up, investigating whether a bug has been reported before, checking what was previously told to a specific account, or gathering background before drafting a response. +argument-hint: "" --- -# Customer Research Skill +# /customer-research -You are an expert at conducting multi-source research to answer customer questions, investigate account contexts, and build comprehensive understanding of customer situations. You prioritize authoritative sources, synthesize across inputs, and clearly communicate confidence levels. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -## Multi-Source Research Methodology +Multi-source research on a customer question, product topic, or account-related inquiry. Synthesizes findings from all available sources with clear attribution and confidence scoring. -### Research Process +## Usage + +``` +/customer-research +``` + +## Workflow + +### 1. Parse the Research Request + +Identify what type of research is needed: +- **Customer question**: Something a customer has asked that needs an answer (e.g., "Does our product support SSO with Okta?") +- **Issue investigation**: Background on a reported problem (e.g., "Has this bug been reported before? What's the known workaround?") +- **Account context**: History with a specific customer (e.g., "What did we tell Acme Corp last time they asked about this?") +- **Topic research**: General topic relevant to support work (e.g., "Best practices for webhook retry logic") -**Step 1: Understand the Question** Before searching, clarify what you're actually trying to find: - Is this a factual question with a definitive answer? - Is this a contextual question requiring multiple perspectives? - Is this an exploratory question where the scope is still being defined? - Who is the audience for the answer (internal team, customer, leadership)? -**Step 2: Plan Your Search Strategy** -Map the question to likely source types: -- Product capability question → documentation, knowledge base, product specs -- Customer context question → CRM, email history, meeting notes, chat -- Process/policy question → internal wikis, runbooks, policy docs -- Technical question → documentation, engineering resources, support tickets -- Market/competitive question → web research, analyst reports, competitive intel +### 2. Search Available Sources -**Step 3: Execute Searches Systematically** -Search sources in priority order (see below). Don't stop at the first result — cross-reference across sources. +Search systematically through the source tiers below, adapting to what is connected. Don't stop at the first result — cross-reference across sources. -**Step 4: Synthesize and Validate** -Combine findings, check for contradictions, and assess overall confidence. +**Tier 1 — Official Internal Sources (highest confidence):** +- ~~knowledge base (if connected): product docs, runbooks, FAQs, policy documents +- ~~cloud storage: internal documents, specs, guides, past research +- Product roadmap (internal-facing): feature timelines, priorities -**Step 5: Present with Attribution** -Always cite sources and note confidence level. +**Tier 2 — Organizational Context:** +- ~~CRM notes: account notes, activity history, previous answers, opportunity details +- ~~support platform (if connected): previous resolutions, known issues, workarounds +- Meeting notes: previous discussions, decisions, commitments -## Source Prioritization +**Tier 3 — Team Communications:** +- ~~chat: search for the topic in relevant channels; check if teammates have discussed or answered this before +- ~~email: search for previous correspondence on this topic +- Calendar notes: meeting agendas and post-meeting notes -Search sources in this order, with decreasing authority: +**Tier 4 — External Sources:** +- Web search: official documentation, blog posts, community forums +- Public knowledge bases, help centers, release notes +- Third-party documentation: integration partners, complementary tools -### Tier 1 — Official Internal Sources (Highest Confidence) -These are authoritative and should be trusted unless outdated. +**Tier 5 — Inferred or Analogical (use when direct sources don't yield answers):** +- Similar situations: how similar questions were handled before +- Analogous customers: what worked for comparable accounts +- General best practices: industry standards and norms -- **Product documentation**: Official docs, specs, API references -- **Knowledge base / wiki**: Internal articles, runbooks, FAQs -- **Policy documents**: Official policies, terms, SLAs -- **Product roadmap** (internal-facing): Feature timelines, priorities +### 3. Synthesize Findings -Confidence level: **High** (unless clearly outdated — check dates) +Compile results into a structured research brief: -### Tier 2 — Organizational Context -These provide context but may reflect one perspective. +``` +## Research: [Question/Topic] -- **CRM records**: Account notes, activity history, opportunity details -- **Support tickets**: Previous resolutions, known issues, workarounds -- **Internal documents** (Drive, shared folders): Specs, plans, analyses -- **Meeting notes**: Previous discussions, decisions, commitments +### Answer +[Clear, direct answer to the question — lead with the bottom line] -Confidence level: **Medium-High** (may be subjective or incomplete) +**Confidence:** [High / Medium / Low] +[Explain what drives the confidence level] -### Tier 3 — Team Communications -Informal but often contain the most recent information. +### Key Findings -- **Chat history**: Team discussions, quick answers, context -- **Email threads**: Customer correspondence, internal discussions -- **Calendar notes**: Meeting agendas and post-meeting notes +**From [Source 1]:** +- [Finding with specific detail] +- [Finding with specific detail] -Confidence level: **Medium** (informal, may be out of context, could be speculative) +**From [Source 2]:** +- [Finding with specific detail] -### Tier 4 — External Sources -Useful for general knowledge but not authoritative for internal matters. +### Context & Nuance +[Any caveats, edge cases, or additional context that matters] -- **Web search**: Official websites, blog posts, industry resources -- **Community forums**: User discussions, workarounds, experiences -- **Third-party documentation**: Integration partners, complementary tools -- **News and analyst reports**: Market context, competitive intelligence +### Sources +1. [Source name/link] — [what it contributed] +2. [Source name/link] — [what it contributed] +3. [Source name/link] — [what it contributed] + +### Gaps & Unknowns +- [What couldn't be confirmed] +- [What might need verification from a subject matter expert] + +### Recommended Next Steps +- [Action if the answer needs to go to a customer] +- [Action if further research is needed] +- [Who to consult for verification if needed] +``` + +### 4. Handle Insufficient Sources + +If no connected sources yield results: -Confidence level: **Low-Medium** (may not reflect your specific situation) +- Perform web research on the topic +- Ask the user for internal context: + - "I couldn't find this in connected sources. Do you have internal docs or knowledge base articles about this?" + - "Has your team discussed this topic before? Any ~~chat channels I should check?" + - "Is there a subject matter expert who would know the answer?" +- Be transparent about limitations: + - "This answer is based on web research only — please verify against your internal documentation before sharing with the customer." + - "I found a possible answer but couldn't confirm it from an authoritative internal source." -### Tier 5 — Inferred or Analogical -Use when direct sources don't yield answers. +### 5. Customer-Facing Considerations -- **Similar situations**: How similar questions were handled before -- **Analogous customers**: What worked for comparable accounts -- **General best practices**: Industry standards and norms +If the research is to answer a customer question: -Confidence level: **Low** (clearly flag as inference, not fact) +- Flag if the answer involves product roadmap, pricing, legal, or security topics that may need review +- Note if the answer differs from what may have been communicated previously +- Suggest appropriate caveats for the customer-facing response +- Offer to draft the customer response: "Want me to draft a response to the customer based on these findings?" -## Answer Synthesis +### 6. Knowledge Capture + +After research is complete, suggest capturing the knowledge: + +- "Should I save these findings to your knowledge base for future reference?" +- "Want me to create a FAQ entry based on this research?" +- "This might be worth documenting — should I draft a runbook entry?" + +This helps build institutional knowledge and reduces duplicate research effort across the team. + +--- + +## Source Prioritization and Confidence + +### Confidence by Source Tier + +| Tier | Source Type | Confidence | Notes | +|------|-------------|------------|-------| +| 1 | Official internal docs, KB, policies | **High** | Trust unless clearly outdated — check dates | +| 2 | CRM, support tickets, meeting notes | **Medium-High** | May be subjective or incomplete | +| 3 | Chat, email, calendar notes | **Medium** | Informal, may be out of context or speculative | +| 4 | Web, forums, third-party docs | **Low-Medium** | May not reflect your specific situation | +| 5 | Inference, analogies, best practices | **Low** | Clearly flag as inference, not fact | ### Confidence Levels @@ -125,26 +183,6 @@ When sources disagree: 4. Recommend how to resolve the discrepancy 5. If going to a customer: use the most conservative/cautious answer until resolved -### Synthesis Structure - -``` -**Direct Answer:** [Bottom-line answer — lead with this] - -**Confidence:** [High / Medium / Low] - -**Supporting Evidence:** -- [Source 1]: [What it says] -- [Source 2]: [What it says — corroborates or adds nuance] - -**Caveats:** -- [Any limitations or conditions on the answer] -- [Anything that might change the answer in specific contexts] - -**Recommendation:** -- [Whether this is ready to share with customers] -- [Any verification steps recommended] -``` - ## When to Escalate vs. Answer Directly ### Answer Directly When: @@ -174,7 +212,7 @@ When sources disagree: ## Research Documentation for Team Knowledge Base -After completing research, capture the knowledge for future use: +After completing research, capture the knowledge for future use. ### When to Document: - Question has come up before or likely will again @@ -212,15 +250,3 @@ After completing research, capture the knowledge for future use: - Review and update entries quarterly - Archive entries that are no longer relevant - Tag entries for searchability (by topic, product area, customer segment) - -## Using This Skill - -When conducting customer research: - -1. Always start by clarifying what you're actually looking for -2. Search systematically — don't skip tiers even if you think you know where the answer is -3. Cross-reference findings across multiple sources -4. Be transparent about confidence levels — never present uncertain information as fact -5. When in doubt about whether to share with a customer, err on the side of verifying first -6. Document your research for future team benefit -7. If the research reveals a gap in your knowledge base, flag it for documentation diff --git a/customer-support/skills/response-drafting/SKILL.md b/customer-support/skills/draft-response/SKILL.md similarity index 60% rename from customer-support/skills/response-drafting/SKILL.md rename to customer-support/skills/draft-response/SKILL.md index 923494b3..3fd6fdc0 100644 --- a/customer-support/skills/response-drafting/SKILL.md +++ b/customer-support/skills/draft-response/SKILL.md @@ -1,11 +1,118 @@ --- -name: response-drafting -description: Draft professional, empathetic customer-facing responses adapted to the situation, urgency, and channel. Use when responding to customer tickets, escalations, outage notifications, bug reports, feature requests, or any customer-facing communication. +name: draft-response +description: Draft a professional customer-facing response tailored to the situation and relationship. Use when answering a product question, responding to an escalation or outage, delivering bad news like a delay or won't-fix, declining a feature request, or replying to a billing issue. +argument-hint: "" --- -# Response Drafting Skill +# /draft-response -You are an expert at drafting professional, empathetic, and effective customer-facing communications. You adapt tone, structure, and content based on the situation, relationship stage, stakeholder level, and communication channel. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Draft a professional, customer-facing response tailored to the situation, customer relationship, and communication context. + +## Usage + +``` +/draft-response +``` + +Examples: +- `/draft-response Acme Corp is asking when the new dashboard feature will ship` +- `/draft-response Customer escalation — their integration has been down for 2 days` +- `/draft-response Responding to a feature request we won't be building` +- `/draft-response Customer hit a billing error and wants a resolution ASAP` + +## Workflow + +### 1. Understand the Context + +Parse the user's input to determine: + +- **Customer**: Who is the communication for? Look up account context if available. +- **Situation type**: Question, issue, escalation, announcement, negotiation, bad news, good news, follow-up +- **Urgency**: Is this time-sensitive? How long has the customer been waiting? +- **Channel**: Email, support ticket, chat, or other (adjust formality accordingly) +- **Relationship stage**: New customer, established, frustrated/escalated +- **Stakeholder level**: End user, manager, executive, technical, business + +### 2. Research Context + +Gather relevant background from available sources: + +**~~email:** +- Previous correspondence with this customer on this topic +- Any commitments or timelines previously shared +- Tone and style of the existing thread + +**~~chat:** +- Internal discussions about this customer or topic +- Any guidance from product, engineering, or leadership +- Similar situations and how they were handled + +**~~CRM (if connected):** +- Account details and plan level +- Contact information and key stakeholders +- Previous escalations or sensitive issues + +**~~support platform (if connected):** +- Related tickets and their resolution +- Known issues or workarounds +- SLA status and response time commitments + +**~~knowledge base:** +- Official documentation or help articles to reference +- Product roadmap information (if shareable) +- Policy or process documentation + +### 3. Generate the Draft + +Produce a response tailored to the situation: + +``` +## Draft Response + +**To:** [Customer contact name] +**Re:** [Subject/topic] +**Channel:** [Email / Ticket / Chat] +**Tone:** [Empathetic / Professional / Technical / Celebratory / Candid] + +--- + +[Draft response text] + +--- + +### Notes for You (internal — do not send) +- **Why this approach:** [Rationale for tone and content choices] +- **Things to verify:** [Any facts or commitments to confirm before sending] +- **Risk factors:** [Anything sensitive about this response] +- **Follow-up needed:** [Actions to take after sending] +- **Escalation note:** [If this should be reviewed by someone else first] +``` + +### 4. Run Quality Checks + +Before presenting the draft, verify: + +- [ ] Tone matches the situation and relationship +- [ ] No commitments beyond what's authorized +- [ ] No product roadmap details that shouldn't be shared externally +- [ ] Accurate references to previous conversations +- [ ] Clear next steps and ownership +- [ ] Appropriate for the stakeholder level (not too technical for executives, not too vague for engineers) +- [ ] Length is appropriate for the channel (shorter for chat, fuller for email) + +### 5. Offer Iterations + +After presenting the draft: +- "Want me to adjust the tone? (more formal, more casual, more empathetic, more direct)" +- "Should I add or remove any specific points?" +- "Want me to make this shorter/longer?" +- "Should I draft a version for a different stakeholder?" +- "Want me to draft the internal escalation note as well?" +- "Should I prepare a follow-up message to send after [X days] if no response?" + +--- ## Customer Communication Best Practices @@ -21,7 +128,7 @@ You are an expert at drafting professional, empathetic, and effective customer-f ### Response Structure -**For most customer communications, follow this structure:** +For most customer communications, follow this structure: ``` 1. Acknowledgment / Context (1-2 sentences) @@ -104,6 +211,48 @@ You are an expert at drafting professional, empathetic, and effective customer-f - CC people unnecessarily — only include those who need to be in the conversation - Use exclamation marks excessively (one per email max, if any) +## Situation-Specific Approaches + +**Answering a product question:** +- Lead with the direct answer +- Provide relevant documentation links +- Offer to connect them with the right resource if needed +- If you don't know the answer: say so honestly, commit to finding out, give a timeline + +**Responding to an issue or bug:** +- Acknowledge the impact on their work +- State what you know about the issue and its status +- Provide workaround if available +- Set expectations for resolution timeline +- Commit to updates at regular intervals + +**Handling an escalation:** +- Acknowledge the severity and their frustration +- Take ownership (no deflecting or excuse-making) +- Provide a clear action plan with timeline +- Identify the person accountable for resolution +- Offer a meeting or call if appropriate for the severity + +**Delivering bad news (feature sunset, delay, can't-fix):** +- Be direct — don't bury the news +- Explain the reasoning honestly +- Acknowledge the impact on them specifically +- Offer alternatives or mitigation +- Provide a clear path forward + +**Sharing good news (feature launch, milestone, recognition):** +- Lead with the positive outcome +- Connect it to their specific goals or use case +- Suggest next steps to capitalize on the good news +- Express genuine enthusiasm + +**Declining a request (feature request, discount, exception):** +- Acknowledge the request and its reasoning +- Be honest about the decision +- Explain the why without being dismissive +- Offer alternatives when possible +- Leave the door open for future conversation + ## Response Templates for Common Scenarios ### Acknowledging a Bug Report @@ -229,26 +378,6 @@ Best, [Your name] ``` -## Personalization Based on Customer Context - -### New Customer -- Include more context and explanation -- Reference onboarding milestones and goals -- Proactively share resources and best practices -- Introduce relevant self-service resources - -### Established Customer -- Reference their history and previous interactions -- Skip introductory explanations they already know -- Acknowledge their experience with the product -- Be more direct and efficient - -### Frustrated or Escalated Customer -- Increase empathy and acknowledgment -- Focus on solving their problem, not deflecting -- Provide concrete action plans with timelines -- Offer direct escalation paths if needed - ## Follow-up and Escalation Guidance ### Follow-up Cadence @@ -287,16 +416,3 @@ What I've tried: [Actions taken so far] What I need: [Specific help or decision needed] Deadline: [When this needs to be resolved by] ``` - -## Using This Skill - -When drafting customer responses: - -1. Identify the situation type first (good news, bad news, technical, etc.) -2. Consider the customer's relationship stage and stakeholder level -3. Match your tone to the situation — empathy first for problems, enthusiasm for wins -4. Be specific with dates, names, and commitments -5. Always include a clear next step -6. Read the draft from the customer's perspective before finalizing -7. If the response involves commitments or sensitive topics, get internal alignment first -8. Keep it concise — every sentence should earn its place diff --git a/customer-support/skills/knowledge-management/SKILL.md b/customer-support/skills/kb-article/SKILL.md similarity index 72% rename from customer-support/skills/knowledge-management/SKILL.md rename to customer-support/skills/kb-article/SKILL.md index 58df8277..d9f3274c 100644 --- a/customer-support/skills/knowledge-management/SKILL.md +++ b/customer-support/skills/kb-article/SKILL.md @@ -1,11 +1,88 @@ --- -name: knowledge-management -description: Write and maintain knowledge base articles from resolved support issues. Use when a ticket has been resolved and the solution should be documented, when updating existing KB articles, or when creating how-to guides, troubleshooting docs, or FAQ entries. +name: kb-article +description: Draft a knowledge base article from a resolved issue or common question. Use when a ticket resolution is worth documenting for self-service, the same question keeps coming up, a workaround needs to be published, or a known issue should be communicated to customers. +argument-hint: "" --- -# Knowledge Management Skill +# /kb-article -You are an expert at creating, organizing, and maintaining support knowledge base content. You write articles that are searchable, scannable, and solve customer problems on the first read. You understand that every good KB article reduces future ticket volume. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Draft a publish-ready knowledge base article from a resolved support issue, common question, or documented workaround. Structures the content for searchability and self-service. + +## Usage + +``` +/kb-article +``` + +Examples: +- `/kb-article How to configure SSO with Okta — resolved this for 3 customers last month` +- `/kb-article Ticket #4521 — customer couldn't export data over 10k rows` +- `/kb-article Common question: how to set up webhook notifications` +- `/kb-article Known issue: dashboard charts not loading on Safari 16` + +## Workflow + +### 1. Understand the Source Material + +Parse the input to identify: + +- **What was the problem?** The original issue, question, or error +- **What was the solution?** The resolution, workaround, or answer +- **Who does this affect?** User type, plan level, or configuration +- **How common is this?** One-off or recurring issue +- **What article type fits best?** How-to, troubleshooting, FAQ, known issue, or reference (see article types below) + +If a ticket reference is provided, look up the full context: + +- **~~support platform**: Pull the ticket thread, resolution, and any internal notes +- **~~knowledge base**: Check if a similar article already exists (update vs. create new) +- **~~project tracker**: Check if there's a related bug or feature request + +### 2. Draft the Article + +Using the article structure, formatting standards, and searchability best practices below: + +- Follow the template for the chosen article type (how-to, troubleshooting, FAQ, known issue, or reference) +- Apply the searchability best practices: customer-language title, plain-language opening sentence, exact error messages, common synonyms +- Keep it scannable: headers, numbered steps, short paragraphs + +### 3. Generate the Article + +Present the draft with metadata: + +``` +## KB Article Draft + +**Title:** [Article title] +**Type:** [How-to / Troubleshooting / FAQ / Known Issue / Reference] +**Category:** [Product area or topic] +**Tags:** [Searchable tags] +**Audience:** [All users / Admins / Developers / Specific plan] + +--- + +[Full article content — using the appropriate template below] + +--- + +### Publishing Notes +- **Source:** [Ticket #, customer conversation, or internal discussion] +- **Existing articles to update:** [If this overlaps with existing content] +- **Review needed from:** [SME or team if technical accuracy needs verification] +- **Suggested review date:** [When to revisit for accuracy] +``` + +### 4. Offer Next Steps + +After generating the article: +- "Want me to check if a similar article already exists in your ~~knowledge base?" +- "Should I adjust the technical depth for a different audience?" +- "Want me to draft a companion article (e.g., a how-to to go with this troubleshooting guide)?" +- "Should I create an internal-only version with additional technical detail?" + +--- ## Article Structure and Formatting Standards @@ -61,7 +138,7 @@ Start every article with a sentence that restates the problem or task in plain l - **FAQ**: "[Question in the customer's words]? Here's the answer." - **Known issue**: "Some users are experiencing [symptom]. Here's what we know and how to work around it." -## Common Article Types +## Article Type Templates ### How-to Articles @@ -263,9 +340,7 @@ Billing & Account - **Use relative links** within the KB — they survive restructuring better than absolute URLs - **Avoid circular links** — if A links to B, B shouldn't link back to A unless both are genuinely useful entry points -## Using This Skill - -When creating and maintaining KB content: +## KB Writing Best Practices 1. Write for the customer who is frustrated and searching for an answer — be clear, direct, and helpful 2. Every article should be findable through search using the words a customer would type diff --git a/customer-support/skills/ticket-triage/SKILL.md b/customer-support/skills/ticket-triage/SKILL.md index 2d60c70b..d5ebee41 100644 --- a/customer-support/skills/ticket-triage/SKILL.md +++ b/customer-support/skills/ticket-triage/SKILL.md @@ -1,11 +1,104 @@ --- name: ticket-triage -description: Triage incoming support tickets by categorizing issues, assigning priority (P1-P4), and recommending routing. Use when a new ticket or customer issue comes in, when assessing severity, or when deciding which team should handle an issue. +description: Triage and prioritize a support ticket or customer issue. Use when a new ticket comes in and needs categorization, assigning P1-P4 priority, deciding which team should handle it, or checking whether it's a duplicate or known issue before routing. +argument-hint: "" --- -# Ticket Triage Skill +# /ticket-triage -You are an expert at rapidly categorizing, prioritizing, and routing customer support tickets. You assess issues systematically, identify urgency and impact, and ensure tickets reach the right team with the right context. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Categorize, prioritize, and route an incoming support ticket or customer issue. Produces a structured triage assessment with a suggested initial response. + +## Usage + +``` +/ticket-triage +``` + +Examples: +- `/ticket-triage Customer says their dashboard has been showing a blank page since this morning` +- `/ticket-triage "I was charged twice for my subscription this month"` +- `/ticket-triage User can't connect their SSO — getting a 403 error on the callback URL` +- `/ticket-triage Feature request: they want to export reports as PDF` + +## Workflow + +### 1. Parse the Issue + +Read the input and extract: + +- **Core problem**: What is the customer actually experiencing? +- **Symptoms**: What specific behavior or error are they seeing? +- **Customer context**: Who is this? Any account details, plan level, or history available? +- **Urgency signals**: Are they blocked? Is this production? How many users affected? +- **Emotional state**: Frustrated, confused, matter-of-fact, escalating? + +### 2. Categorize and Prioritize + +Using the category taxonomy and priority framework below: + +- Assign a **primary category** (bug, how-to, feature request, billing, account, integration, security, data, performance) and an optional secondary category +- Assign a **priority** (P1–P4) based on impact and urgency +- Identify the **product area** the issue maps to + +### 3. Check for Duplicates and Known Issues + +Before routing, check available sources: + +- **~~support platform**: Search for similar open or recently resolved tickets +- **~~knowledge base**: Check for known issues or existing documentation +- **~~project tracker**: Check if there's an existing bug report or feature request + +Apply the duplicate detection process below. + +### 4. Determine Routing + +Using the routing rules below, recommend which team or queue should handle this based on category and complexity. + +### 5. Generate Triage Output + +``` +## Triage: [One-line issue summary] + +**Category:** [Primary] / [Secondary if applicable] +**Priority:** [P1-P4] — [Brief justification] +**Product area:** [Area/team] + +### Issue Summary +[2-3 sentence summary of what the customer is experiencing] + +### Key Details +- **Customer:** [Name/account if known] +- **Impact:** [Who and what is affected] +- **Workaround:** [Available / Not available / Unknown] +- **Related tickets:** [Links to similar issues if found] +- **Known issue:** [Yes — link / No / Checking] + +### Routing Recommendation +**Route to:** [Team or queue] +**Why:** [Brief reasoning] + +### Suggested Initial Response +[Draft first response to the customer — acknowledge the issue, +set expectations, provide workaround if available. +Use the auto-response templates below as a starting point.] + +### Internal Notes +- [Any additional context for the agent picking this up] +- [Reproduction hints if it's a bug] +- [Escalation triggers to watch for] +``` + +### 6. Offer Next Steps + +After presenting the triage: +- "Want me to draft a full response to the customer?" +- "Should I search for more context on this issue?" +- "Want me to check if this is a known bug in the tracker?" +- "Should I escalate this? I can package it with /customer-escalation." + +--- ## Category Taxonomy @@ -171,9 +264,7 @@ We'll follow up with you within [timeframe] with our findings. [protective action]."] ``` -## Using This Skill - -When triaging tickets: +## Triage Best Practices 1. Read the full ticket before categorizing — context in later messages often changes the assessment 2. Categorize by **root cause**, not just the symptom described diff --git a/data/.claude-plugin/plugin.json b/data/.claude-plugin/plugin.json index b8b97ebb..fa4f3168 100644 --- a/data/.claude-plugin/plugin.json +++ b/data/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "data", - "version": "1.0.0", + "version": "1.1.0", "description": "Write SQL, explore datasets, and generate insights faster. Build visualizations and dashboards, and turn raw data into clear stories for stakeholders.", "author": { "name": "Anthropic" diff --git a/data/.mcp.json b/data/.mcp.json index a5ea8fe2..d0d6f2ac 100644 --- a/data/.mcp.json +++ b/data/.mcp.json @@ -20,9 +20,21 @@ "type": "http", "url": "https://mcp.amplitude.com/mcp" }, + "amplitude-eu": { + "type": "http", + "url": "https://mcp.eu.amplitude.com/mcp" + }, "atlassian": { "type": "http", "url": "https://mcp.atlassian.com/v1/mcp" + }, + "definite": { + "type": "http", + "url": "https://api.definite.app/v3/mcp/http" + }, + "mixpanel": { + "type": "http", + "url": "https://mcp.mixpanel.com/mcp" } } } diff --git a/data/CONNECTORS.md b/data/CONNECTORS.md index 927afe8b..2c0f2694 100644 --- a/data/CONNECTORS.md +++ b/data/CONNECTORS.md @@ -10,9 +10,9 @@ Plugins are **tool-agnostic** — they describe workflows in terms of categories | Category | Placeholder | Included servers | Other options | |----------|-------------|-----------------|---------------| -| Data warehouse | `~~data warehouse` | Snowflake\*, Databricks\*, BigQuery | Redshift, PostgreSQL, MySQL | +| Data warehouse | `~~data warehouse` | Snowflake\*, Databricks\*, BigQuery, Definite | Redshift, PostgreSQL, MySQL | | Notebook | `~~notebook` | Hex | Jupyter, Deepnote, Observable | -| Product analytics | `~~product analytics` | Amplitude | Mixpanel, Heap | +| Product analytics | `~~product analytics` | Amplitude, Mixpanel | Heap | | Project tracker | `~~project tracker` | Atlassian (Jira/Confluence) | Linear, Asana | \* Placeholder — MCP URL not yet configured diff --git a/data/README.md b/data/README.md index 735ba483..fc451a9f 100644 --- a/data/README.md +++ b/data/README.md @@ -110,8 +110,8 @@ Claude: [Reviews methodology] → [Checks for survivorship bias in churn analysi This plugin works best when connected to your data infrastructure. Add MCP servers for: -- **Data Warehouse**: Snowflake, Databricks, BigQuery, or any SQL-compatible database -- **Analytics/BI**: Amplitude, Looker, Tableau, or similar +- **Data Warehouse**: Snowflake, Databricks, BigQuery, Definite, or any SQL-compatible database +- **Analytics/BI**: Amplitude, Looker, Mixpanel, Tableau, or similar - **Notebooks**: Jupyter, Hex, or similar - **Spreadsheets**: Google Sheets, Excel - **Data Orchestration**: Airflow, dbt, Dagster, Prefect diff --git a/data/commands/build-dashboard.md b/data/commands/build-dashboard.md deleted file mode 100644 index 7d2114f9..00000000 --- a/data/commands/build-dashboard.md +++ /dev/null @@ -1,192 +0,0 @@ ---- -description: Build an interactive HTML dashboard with charts, filters, and tables -argument-hint: " [data source]" ---- - -# /build-dashboard - Build Interactive Dashboards - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Build a self-contained interactive HTML dashboard with charts, filters, tables, and professional styling. Opens directly in a browser -- no server or dependencies required. - -## Usage - -``` -/build-dashboard [data source] -``` - -## Workflow - -### 1. Understand the Dashboard Requirements - -Determine: - -- **Purpose**: Executive overview, operational monitoring, deep-dive analysis, team reporting -- **Audience**: Who will use this dashboard? -- **Key metrics**: What numbers matter most? -- **Dimensions**: What should users be able to filter or slice by? -- **Data source**: Live query, pasted data, CSV file, or sample data - -### 2. Gather the Data - -**If data warehouse is connected:** -1. Query the necessary data -2. Embed the results as JSON within the HTML file - -**If data is pasted or uploaded:** -1. Parse and clean the data -2. Embed as JSON in the dashboard - -**If working from a description without data:** -1. Create a realistic sample dataset matching the described schema -2. Note in the dashboard that it uses sample data -3. Provide instructions for swapping in real data - -### 3. Design the Dashboard Layout - -Follow a standard dashboard layout pattern: - -``` -┌──────────────────────────────────────────────────┐ -│ Dashboard Title [Filters ▼] │ -├────────────┬────────────┬────────────┬───────────┤ -│ KPI Card │ KPI Card │ KPI Card │ KPI Card │ -├────────────┴────────────┼────────────┴───────────┤ -│ │ │ -│ Primary Chart │ Secondary Chart │ -│ (largest area) │ │ -│ │ │ -├─────────────────────────┴────────────────────────┤ -│ │ -│ Detail Table (sortable, scrollable) │ -│ │ -└──────────────────────────────────────────────────┘ -``` - -**Adapt the layout to the content:** -- 2-4 KPI cards at the top for headline numbers -- 1-3 charts in the middle section for trends and breakdowns -- Optional detail table at the bottom for drill-down data -- Filters in the header or sidebar depending on complexity - -### 4. Build the HTML Dashboard - -Generate a single self-contained HTML file that includes: - -**Structure (HTML):** -- Semantic HTML5 layout -- Responsive grid using CSS Grid or Flexbox -- Filter controls (dropdowns, date pickers, toggles) -- KPI cards with values and labels -- Chart containers -- Data table with sortable headers - -**Styling (CSS):** -- Professional color scheme (clean whites, grays, with accent colors for data) -- Card-based layout with subtle shadows -- Consistent typography (system fonts for fast loading) -- Responsive design that works on different screen sizes -- Print-friendly styles - -**Interactivity (JavaScript):** -- Chart.js for interactive charts (included via CDN) -- Filter dropdowns that update all charts and tables simultaneously -- Sortable table columns -- Hover tooltips on charts -- Number formatting (commas, currency, percentages) - -**Data (embedded JSON):** -- All data embedded directly in the HTML as JavaScript variables -- No external data fetches required -- Dashboard works completely offline - -### 5. Implement Chart Types - -Use Chart.js for all charts. Common dashboard chart patterns: - -- **Line chart**: Time series trends -- **Bar chart**: Category comparisons -- **Doughnut chart**: Composition (when <6 categories) -- **Stacked bar**: Composition over time -- **Mixed (bar + line)**: Volume with rate overlay - -### 6. Add Interactivity - -**Filters:** -```javascript -// All filters update a central filter state -// Charts and tables re-render when filters change -function applyFilters() { - const filtered = data.filter(row => matchesFilters(row)); - updateKPIs(filtered); - updateCharts(filtered); - updateTable(filtered); -} -``` - -**Table sorting:** -- Click column headers to sort ascending/descending -- Visual indicator for current sort column and direction - -**Tooltips:** -- Charts show detailed values on hover -- KPI cards show comparison to previous period - -### 7. Save and Open - -1. Save the dashboard as an HTML file with a descriptive name (e.g., `sales_dashboard.html`) -2. Open it in the user's default browser -3. Confirm it renders correctly -4. Provide instructions for updating data or customizing - -## Output Template - -The generated HTML file follows this structure: - -```html - - - - - - [Dashboard Title] - - - - -
-
-
-
-
-
- - - -``` - -## Examples - -``` -/build-dashboard Monthly sales dashboard with revenue trend, top products, and regional breakdown. Data is in the orders table. -``` - -``` -/build-dashboard Here's our support ticket data [pastes CSV]. Build a dashboard showing volume by priority, response time trends, and resolution rates. -``` - -``` -/build-dashboard Create a template executive dashboard for a SaaS company showing MRR, churn, new customers, and NPS. Use sample data. -``` - -## Tips - -- Dashboards are fully self-contained HTML files -- share them with anyone by sending the file -- For real-time dashboards, consider connecting to a BI tool instead. These dashboards are point-in-time snapshots -- Request "dark mode" or "presentation mode" for different styling -- You can request a specific color scheme to match your brand diff --git a/data/commands/explore-data.md b/data/commands/explore-data.md deleted file mode 100644 index eb14f49a..00000000 --- a/data/commands/explore-data.md +++ /dev/null @@ -1,115 +0,0 @@ ---- -description: Profile and explore a dataset to understand its shape, quality, and patterns -argument-hint: "" ---- - -# /explore-data - Profile and Explore a Dataset - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Generate a comprehensive data profile for a table or uploaded file. Understand its shape, quality, and patterns before diving into analysis. - -## Usage - -``` -/explore-data -``` - -## Workflow - -### 1. Access the Data - -**If a data warehouse MCP server is connected:** - -1. Resolve the table name (handle schema prefixes, suggest matches if ambiguous) -2. Query table metadata: column names, types, descriptions if available -3. Run profiling queries against the live data - -**If a file is provided (CSV, Excel, Parquet, JSON):** - -1. Read the file and load into a working dataset -2. Infer column types from the data - -**If neither:** - -1. Ask the user to provide a table name (with their warehouse connected) or upload a file -2. If they describe a table schema, provide guidance on what profiling queries to run - -### 2. Generate Data Profile - -Run the following profiling checks: - -**Table-level metrics:** -- Total row count -- Column count and types breakdown -- Approximate table size (if available from metadata) -- Date range coverage (min/max of date columns) - -**Column-level metrics for each column:** -- Data type (and whether it matches expected type) -- Null count and null rate (%) -- Distinct count and cardinality (distinct / total) -- For numeric columns: min, max, mean, median, stddev, percentiles (p25, p50, p75, p95, p99) -- For string columns: min/max length, most common values (top 10), empty string count -- For date/timestamp columns: min, max, distribution by time period -- For boolean columns: true/false/null distribution - -**Present the profile as a clean summary table**, grouped by column type (dimensions, metrics, dates, IDs). - -### 3. Identify Data Quality Issues - -Flag potential problems: - -- **High null rates**: Columns with >5% nulls (warn), >20% nulls (alert) -- **Low cardinality surprises**: Columns that should be high-cardinality but aren't (e.g., a "user_id" with only 50 distinct values) -- **High cardinality surprises**: Columns that should be categorical but have too many distinct values -- **Suspicious values**: Negative amounts where only positive expected, future dates in historical data, obviously placeholder values (e.g., "N/A", "TBD", "test", "999999") -- **Duplicate detection**: Check if there's a natural key and whether it has duplicates -- **Distribution skew**: Extremely skewed numeric distributions that could affect averages -- **Encoding issues**: Mixed case in categorical fields, trailing whitespace, inconsistent formats - -### 4. Suggest Interesting Dimensions and Metrics - -Based on the column profile, recommend: - -- **Best dimension columns** for slicing data (categorical columns with reasonable cardinality, 3-50 values) -- **Key metric columns** for measurement (numeric columns with meaningful distributions) -- **Time columns** suitable for trend analysis -- **Natural groupings** or hierarchies apparent in the data -- **Potential join keys** linking to other tables (ID columns, foreign keys) - -### 5. Recommend Follow-Up Analyses - -Suggest 3-5 specific analyses the user could run next: - -- "Trend analysis on [metric] by [time_column] grouped by [dimension]" -- "Distribution deep-dive on [skewed_column] to understand outliers" -- "Data quality investigation on [problematic_column]" -- "Correlation analysis between [metric_a] and [metric_b]" -- "Cohort analysis using [date_column] and [status_column]" - -## Output Format - -``` -## Data Profile: [table_name] - -### Overview -- Rows: 2,340,891 -- Columns: 23 (8 dimensions, 6 metrics, 4 dates, 5 IDs) -- Date range: 2021-03-15 to 2024-01-22 - -### Column Details -[summary table] - -### Data Quality Issues -[flagged issues with severity] - -### Recommended Explorations -[numbered list of suggested follow-up analyses] -``` - -## Tips - -- For very large tables (100M+ rows), profiling queries use sampling by default -- mention if you need exact counts -- If exploring a new dataset for the first time, this command gives you the lay of the land before writing specific queries -- The quality flags are heuristic -- not every flag is a real problem, but each is worth a quick look diff --git a/data/commands/validate.md b/data/commands/validate.md deleted file mode 100644 index 68e65c49..00000000 --- a/data/commands/validate.md +++ /dev/null @@ -1,168 +0,0 @@ ---- -description: QA an analysis before sharing -- methodology, accuracy, and bias checks -argument-hint: "" ---- - -# /validate - Validate Analysis Before Sharing - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Review an analysis for accuracy, methodology, and potential biases before sharing with stakeholders. Generates a confidence assessment and improvement suggestions. - -## Usage - -``` -/validate -``` - -The analysis can be: -- A document or report in the conversation -- A file (markdown, notebook, spreadsheet) -- SQL queries and their results -- Charts and their underlying data -- A description of methodology and findings - -## Workflow - -### 1. Review Methodology and Assumptions - -Examine: - -- **Question framing**: Is the analysis answering the right question? Could the question be interpreted differently? -- **Data selection**: Are the right tables/datasets being used? Is the time range appropriate? -- **Population definition**: Is the analysis population correctly defined? Are there unintended exclusions? -- **Metric definitions**: Are metrics defined clearly and consistently? Do they match how stakeholders understand them? -- **Baseline and comparison**: Is the comparison fair? Are time periods, cohort sizes, and contexts comparable? - -### 2. Check for Common Analytical Errors - -Systematically review for: - -**Data completeness:** -- Missing data that could skew results (e.g., nulls in key fields, missing time periods) -- Data freshness issues (is the most recent data actually complete or still loading?) -- Survivorship bias (are you only looking at entities that "survived" to the analysis date?) - -**Statistical issues:** -- Simpson's paradox (trend reverses when data is aggregated vs. segmented) -- Correlation presented as causation without supporting evidence -- Small sample sizes leading to unreliable conclusions -- Outliers disproportionately affecting averages (should medians be used instead?) -- Multiple testing / cherry-picking significant results - -**Aggregation errors:** -- Double-counting from improper joins (many-to-many explosions) -- Incorrect denominators in rate calculations -- Mixing granularity levels (e.g., user-level metrics averaged with account-level) -- Revenue recognized vs. billed vs. collected confusion - -**Time-related issues:** -- Seasonality not accounted for in comparisons -- Incomplete periods included in averages (e.g., partial month compared to full months) -- Timezone inconsistencies between data sources -- Look-ahead bias (using future information to explain past events) - -**Selection and scope:** -- Cherry-picked time ranges that favor a particular narrative -- Excluded segments without justification -- Changing definitions mid-analysis - -### 3. Verify Calculations and Aggregations - -Where possible, spot-check: - -- Recalculate a few key numbers independently -- Verify that subtotals sum to totals -- Check that percentages sum to 100% (or close to it) where expected -- Confirm that YoY/MoM comparisons use the correct base periods -- Validate that filters are applied consistently across all metrics - -### 4. Assess Visualizations - -If the analysis includes charts: - -- Do axes start at appropriate values (zero for bar charts)? -- Are scales consistent across comparison charts? -- Do chart titles accurately describe what's shown? -- Could the visualization mislead a quick reader? -- Are there truncated axes, inconsistent intervals, or 3D effects that distort perception? - -### 5. Evaluate Narrative and Conclusions - -Review whether: - -- Conclusions are supported by the data shown -- Alternative explanations are acknowledged -- Uncertainty is communicated appropriately -- Recommendations follow logically from findings -- The level of confidence matches the strength of evidence - -### 6. Suggest Improvements - -Provide specific, actionable suggestions: - -- Additional analyses that would strengthen the conclusions -- Caveats or limitations that should be noted -- Better visualizations or framings for key points -- Missing context that stakeholders would want - -### 7. Generate Confidence Assessment - -Rate the analysis on a 3-level scale: - -**Ready to share** -- Analysis is methodologically sound, calculations verified, caveats noted. Minor suggestions for improvement but nothing blocking. - -**Share with noted caveats** -- Analysis is largely correct but has specific limitations or assumptions that must be communicated to stakeholders. List the required caveats. - -**Needs revision** -- Found specific errors, methodological issues, or missing analyses that should be addressed before sharing. List the required changes with priority order. - -## Output Format - -``` -## Validation Report - -### Overall Assessment: [Ready to share | Share with caveats | Needs revision] - -### Methodology Review -[Findings about approach, data selection, definitions] - -### Issues Found -1. [Severity: High/Medium/Low] [Issue description and impact] -2. ... - -### Calculation Spot-Checks -- [Metric]: [Verified / Discrepancy found] -- ... - -### Visualization Review -[Any issues with charts or visual presentation] - -### Suggested Improvements -1. [Improvement and why it matters] -2. ... - -### Required Caveats for Stakeholders -- [Caveat that must be communicated] -- ... -``` - -## Examples - -``` -/validate Review this quarterly revenue analysis before I send it to the exec team: [analysis] -``` - -``` -/validate Check my churn analysis -- I'm comparing Q4 churn rates to Q3 but Q4 has a shorter measurement window -``` - -``` -/validate Here's a SQL query and its results for our conversion funnel. Does the logic look right? [query + results] -``` - -## Tips - -- Run /validate before any high-stakes presentation or decision -- Even quick analyses benefit from a sanity check -- it takes a minute and can save your credibility -- If the validation finds issues, fix them and re-validate -- Share the validation output alongside your analysis to build stakeholder confidence diff --git a/data/commands/analyze.md b/data/skills/analyze/SKILL.md similarity index 94% rename from data/commands/analyze.md rename to data/skills/analyze/SKILL.md index a451075e..31ce9c58 100644 --- a/data/commands/analyze.md +++ b/data/skills/analyze/SKILL.md @@ -1,11 +1,12 @@ --- -description: Answer data questions -- from quick lookups to full analyses +name: analyze +description: Answer data questions -- from quick lookups to full analyses. Use when looking up a single metric, investigating what's driving a trend or drop, comparing segments over time, or preparing a formal data report for stakeholders. argument-hint: "" --- # /analyze - Answer Data Questions -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Answer a data question, from a quick lookup to a full analysis to a formal report. diff --git a/data/skills/interactive-dashboard-builder/SKILL.md b/data/skills/build-dashboard/SKILL.md similarity index 76% rename from data/skills/interactive-dashboard-builder/SKILL.md rename to data/skills/build-dashboard/SKILL.md index 6ccc7fcb..030c8aad 100644 --- a/data/skills/interactive-dashboard-builder/SKILL.md +++ b/data/skills/build-dashboard/SKILL.md @@ -1,15 +1,132 @@ --- -name: interactive-dashboard-builder -description: Build self-contained interactive HTML dashboards with Chart.js, dropdown filters, and professional styling. Use when creating dashboards, building interactive reports, or generating shareable HTML files with charts and filters that work without a server. +name: build-dashboard +description: Build an interactive HTML dashboard with charts, filters, and tables. Use when creating an executive overview with KPI cards, turning query results into a shareable self-contained report, building a team monitoring snapshot, or needing multiple charts with filters in one browser-openable file. +argument-hint: " [data source]" --- -# Interactive Dashboard Builder Skill +# /build-dashboard - Build Interactive Dashboards -Patterns and techniques for building self-contained HTML/JS dashboards with Chart.js, filters, interactivity, and professional styling. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -## HTML/JS Dashboard Patterns +Build a self-contained interactive HTML dashboard with charts, filters, tables, and professional styling. Opens directly in a browser -- no server or dependencies required. -### Base Template +## Usage + +``` +/build-dashboard [data source] +``` + +## Workflow + +### 1. Understand the Dashboard Requirements + +Determine: + +- **Purpose**: Executive overview, operational monitoring, deep-dive analysis, team reporting +- **Audience**: Who will use this dashboard? +- **Key metrics**: What numbers matter most? +- **Dimensions**: What should users be able to filter or slice by? +- **Data source**: Live query, pasted data, CSV file, or sample data + +### 2. Gather the Data + +**If data warehouse is connected:** +1. Query the necessary data +2. Embed the results as JSON within the HTML file + +**If data is pasted or uploaded:** +1. Parse and clean the data +2. Embed as JSON in the dashboard + +**If working from a description without data:** +1. Create a realistic sample dataset matching the described schema +2. Note in the dashboard that it uses sample data +3. Provide instructions for swapping in real data + +### 3. Design the Dashboard Layout + +Follow a standard dashboard layout pattern: + +``` +┌──────────────────────────────────────────────────┐ +│ Dashboard Title [Filters ▼] │ +├────────────┬────────────┬────────────┬───────────┤ +│ KPI Card │ KPI Card │ KPI Card │ KPI Card │ +├────────────┴────────────┼────────────┴───────────┤ +│ │ │ +│ Primary Chart │ Secondary Chart │ +│ (largest area) │ │ +│ │ │ +├─────────────────────────┴────────────────────────┤ +│ │ +│ Detail Table (sortable, scrollable) │ +│ │ +└──────────────────────────────────────────────────┘ +``` + +**Adapt the layout to the content:** +- 2-4 KPI cards at the top for headline numbers +- 1-3 charts in the middle section for trends and breakdowns +- Optional detail table at the bottom for drill-down data +- Filters in the header or sidebar depending on complexity + +### 4. Build the HTML Dashboard + +Generate a single self-contained HTML file using the base template below. The file includes: + +**Structure (HTML):** +- Semantic HTML5 layout +- Responsive grid using CSS Grid or Flexbox +- Filter controls (dropdowns, date pickers, toggles) +- KPI cards with values and labels +- Chart containers +- Data table with sortable headers + +**Styling (CSS):** +- Professional color scheme (clean whites, grays, with accent colors for data) +- Card-based layout with subtle shadows +- Consistent typography (system fonts for fast loading) +- Responsive design that works on different screen sizes +- Print-friendly styles + +**Interactivity (JavaScript):** +- Chart.js for interactive charts (included via CDN) +- Filter dropdowns that update all charts and tables simultaneously +- Sortable table columns +- Hover tooltips on charts +- Number formatting (commas, currency, percentages) + +**Data (embedded JSON):** +- All data embedded directly in the HTML as JavaScript variables +- No external data fetches required +- Dashboard works completely offline + +### 5. Implement Chart Types + +Use Chart.js for all charts. Common dashboard chart patterns: + +- **Line chart**: Time series trends +- **Bar chart**: Category comparisons +- **Doughnut chart**: Composition (when <6 categories) +- **Stacked bar**: Composition over time +- **Mixed (bar + line)**: Volume with rate overlay + +Use the Chart.js integration patterns below for each chart type. + +### 6. Add Interactivity + +Use the filter and interactivity implementation patterns below for dropdown filters, date range filters, combined filter logic, sortable tables, and chart updates. + +### 7. Save and Open + +1. Save the dashboard as an HTML file with a descriptive name (e.g., `sales_dashboard.html`) +2. Open it in the user's default browser +3. Confirm it renders correctly +4. Provide instructions for updating data or customizing + +--- + +## Base Template Every dashboard follows this structure: @@ -92,7 +209,7 @@ Every dashboard follows this structure: ``` -### KPI Card Pattern +## KPI Card Pattern ```html
@@ -137,6 +254,8 @@ function formatValue(value, format) { } ``` +## Chart.js Integration + ### Chart Container Pattern ```html @@ -146,8 +265,6 @@ function formatValue(value, format) {
``` -## Chart.js Integration - ### Line Chart ```javascript @@ -784,3 +901,24 @@ function renderTablePage(data, page, pageSize = 50) { // Show pagination controls: "Showing 1-50 of 2,340" } ``` + +## Examples + +``` +/build-dashboard Monthly sales dashboard with revenue trend, top products, and regional breakdown. Data is in the orders table. +``` + +``` +/build-dashboard Here's our support ticket data [pastes CSV]. Build a dashboard showing volume by priority, response time trends, and resolution rates. +``` + +``` +/build-dashboard Create a template executive dashboard for a SaaS company showing MRR, churn, new customers, and NPS. Use sample data. +``` + +## Tips + +- Dashboards are fully self-contained HTML files -- share them with anyone by sending the file +- For real-time dashboards, consider connecting to a BI tool instead. These dashboards are point-in-time snapshots +- Request "dark mode" or "presentation mode" for different styling +- You can request a specific color scheme to match your brand diff --git a/data/commands/create-viz.md b/data/skills/create-viz/SKILL.md similarity index 94% rename from data/commands/create-viz.md rename to data/skills/create-viz/SKILL.md index 4aa81cdd..54840e67 100644 --- a/data/commands/create-viz.md +++ b/data/skills/create-viz/SKILL.md @@ -1,11 +1,12 @@ --- -description: Create publication-quality visualizations with Python +name: create-viz +description: Create publication-quality visualizations with Python. Use when turning query results or a DataFrame into a chart, selecting the right chart type for a trend or comparison, generating a plot for a report or presentation, or needing an interactive chart with hover and zoom. argument-hint: " [chart type]" --- # /create-viz - Create Visualizations -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Create publication-quality data visualizations using Python. Generates charts from data with best practices for clarity, accuracy, and design. diff --git a/data/skills/data-visualization/SKILL.md b/data/skills/data-visualization/SKILL.md index d3c0ace9..409cce60 100644 --- a/data/skills/data-visualization/SKILL.md +++ b/data/skills/data-visualization/SKILL.md @@ -1,6 +1,7 @@ --- name: data-visualization description: Create effective data visualizations with Python (matplotlib, seaborn, plotly). Use when building charts, choosing the right chart type for a dataset, creating publication-quality figures, or applying design principles like accessibility and color theory. +user-invocable: false --- # Data Visualization Skill diff --git a/data/skills/data-exploration/SKILL.md b/data/skills/explore-data/SKILL.md similarity index 62% rename from data/skills/data-exploration/SKILL.md rename to data/skills/explore-data/SKILL.md index cf4b3029..f1e7032d 100644 --- a/data/skills/data-exploration/SKILL.md +++ b/data/skills/explore-data/SKILL.md @@ -1,15 +1,42 @@ --- -name: data-exploration -description: Profile and explore datasets to understand their shape, quality, and patterns before analysis. Use when encountering a new dataset, assessing data quality, discovering column distributions, identifying nulls and outliers, or deciding which dimensions to analyze. +name: explore-data +description: Profile and explore a dataset to understand its shape, quality, and patterns. Use when encountering a new table or file, checking null rates and column distributions, spotting data quality issues like duplicates or suspicious values, or deciding which dimensions and metrics to analyze. +argument-hint: "
" --- -# Data Exploration Skill +# /explore-data - Profile and Explore a Dataset -Systematic methodology for profiling datasets, assessing data quality, discovering patterns, and understanding schemas. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -## Data Profiling Methodology +Generate a comprehensive data profile for a table or uploaded file. Understand its shape, quality, and patterns before diving into analysis. -### Phase 1: Structural Understanding +## Usage + +``` +/explore-data +``` + +## Workflow + +### 1. Access the Data + +**If a data warehouse MCP server is connected:** + +1. Resolve the table name (handle schema prefixes, suggest matches if ambiguous) +2. Query table metadata: column names, types, descriptions if available +3. Run profiling queries against the live data + +**If a file is provided (CSV, Excel, Parquet, JSON):** + +1. Read the file and load into a working dataset +2. Infer column types from the data + +**If neither:** + +1. Ask the user to provide a table name (with their warehouse connected) or upload a file +2. If they describe a table schema, provide guidance on what profiling queries to run + +### 2. Understand Structure Before analyzing any data, understand its structure: @@ -20,8 +47,7 @@ Before analyzing any data, understand its structure: - When was the data last updated? - How far back does the data go? -**Column classification:** -Categorize each column as one of: +**Column classification** — categorize each column as one of: - **Identifier**: Unique keys, foreign keys, entity IDs - **Dimension**: Categorical attributes for grouping/filtering (status, type, region, category) - **Metric**: Quantitative values for measurement (revenue, count, duration, score) @@ -30,9 +56,15 @@ Categorize each column as one of: - **Boolean**: True/false flags - **Structural**: JSON, arrays, nested structures -### Phase 2: Column-Level Profiling +### 3. Generate Data Profile -For each column, compute: +Run the following profiling checks: + +**Table-level metrics:** +- Total row count +- Column count and types breakdown +- Approximate table size (if available from metadata) +- Date range coverage (min/max of date columns) **All columns:** - Null count and null rate @@ -73,7 +105,21 @@ true count, false count, null count true rate ``` -### Phase 3: Relationship Discovery +**Present the profile as a clean summary table**, grouped by column type (dimensions, metrics, dates, IDs). + +### 4. Identify Data Quality Issues + +Apply the quality assessment framework below. Flag potential problems: + +- **High null rates**: Columns with >5% nulls (warn), >20% nulls (alert) +- **Low cardinality surprises**: Columns that should be high-cardinality but aren't (e.g., a "user_id" with only 50 distinct values) +- **High cardinality surprises**: Columns that should be categorical but have too many distinct values +- **Suspicious values**: Negative amounts where only positive expected, future dates in historical data, obviously placeholder values (e.g., "N/A", "TBD", "test", "999999") +- **Duplicate detection**: Check if there's a natural key and whether it has duplicates +- **Distribution skew**: Extremely skewed numeric distributions that could affect averages +- **Encoding issues**: Mixed case in categorical fields, trailing whitespace, inconsistent formats + +### 5. Discover Relationships and Patterns After profiling individual columns: @@ -83,6 +129,48 @@ After profiling individual columns: - **Derived columns**: Columns that appear to be computed from others - **Redundant columns**: Columns with identical or near-identical information +### 6. Suggest Interesting Dimensions and Metrics + +Based on the column profile, recommend: + +- **Best dimension columns** for slicing data (categorical columns with reasonable cardinality, 3-50 values) +- **Key metric columns** for measurement (numeric columns with meaningful distributions) +- **Time columns** suitable for trend analysis +- **Natural groupings** or hierarchies apparent in the data +- **Potential join keys** linking to other tables (ID columns, foreign keys) + +### 7. Recommend Follow-Up Analyses + +Suggest 3-5 specific analyses the user could run next: + +- "Trend analysis on [metric] by [time_column] grouped by [dimension]" +- "Distribution deep-dive on [skewed_column] to understand outliers" +- "Data quality investigation on [problematic_column]" +- "Correlation analysis between [metric_a] and [metric_b]" +- "Cohort analysis using [date_column] and [status_column]" + +## Output Format + +``` +## Data Profile: [table_name] + +### Overview +- Rows: 2,340,891 +- Columns: 23 (8 dimensions, 6 metrics, 4 dates, 5 IDs) +- Date range: 2021-03-15 to 2024-01-22 + +### Column Details +[summary table] + +### Data Quality Issues +[flagged issues with severity] + +### Recommended Explorations +[numbered list of suggested follow-up analyses] +``` + +--- + ## Quality Assessment Framework ### Completeness Score @@ -229,3 +317,9 @@ When exploring an unfamiliar data environment: 3. Identify raw/staging/mart layers 4. Map the transformation chain from raw data to analytical tables 5. Note where data is enriched, filtered, or aggregated + +## Tips + +- For very large tables (100M+ rows), profiling queries use sampling by default -- mention if you need exact counts +- If exploring a new dataset for the first time, this command gives you the lay of the land before writing specific queries +- The quality flags are heuristic -- not every flag is a real problem, but each is worth a quick look diff --git a/data/skills/sql-queries/SKILL.md b/data/skills/sql-queries/SKILL.md index e2225c2b..f92e232c 100644 --- a/data/skills/sql-queries/SKILL.md +++ b/data/skills/sql-queries/SKILL.md @@ -1,6 +1,7 @@ --- name: sql-queries description: Write correct, performant SQL across all major data warehouse dialects (Snowflake, BigQuery, Databricks, PostgreSQL, etc.). Use when writing queries, optimizing slow SQL, translating between dialects, or building complex analytical queries with CTEs, window functions, or aggregations. +user-invocable: false --- # SQL Queries Skill diff --git a/data/skills/statistical-analysis/SKILL.md b/data/skills/statistical-analysis/SKILL.md index c408d856..59cc87de 100644 --- a/data/skills/statistical-analysis/SKILL.md +++ b/data/skills/statistical-analysis/SKILL.md @@ -1,6 +1,7 @@ --- name: statistical-analysis description: Apply statistical methods including descriptive stats, trend analysis, outlier detection, and hypothesis testing. Use when analyzing distributions, testing for significance, detecting anomalies, computing correlations, or interpreting statistical results. +user-invocable: false --- # Statistical Analysis Skill diff --git a/data/skills/data-validation/SKILL.md b/data/skills/validate-data/SKILL.md similarity index 60% rename from data/skills/data-validation/SKILL.md rename to data/skills/validate-data/SKILL.md index 06a4d6fa..30a1bac4 100644 --- a/data/skills/data-validation/SKILL.md +++ b/data/skills/validate-data/SKILL.md @@ -1,11 +1,130 @@ --- -name: data-validation -description: QA an analysis before sharing with stakeholders — methodology checks, accuracy verification, and bias detection. Use when reviewing an analysis for errors, checking for survivorship bias, validating aggregation logic, or preparing documentation for reproducibility. +name: validate-data +description: QA an analysis before sharing -- methodology, accuracy, and bias checks. Use when reviewing an analysis before a stakeholder presentation, spot-checking calculations and aggregation logic, verifying a SQL query's results look right, or assessing whether conclusions are actually supported by the data. +argument-hint: "" --- -# Data Validation Skill +# /validate-data - Validate Analysis Before Sharing -Pre-delivery QA checklist, common data analysis pitfalls, result sanity checking, and documentation standards for reproducibility. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Review an analysis for accuracy, methodology, and potential biases before sharing with stakeholders. Generates a confidence assessment and improvement suggestions. + +## Usage + +``` +/validate-data +``` + +The analysis can be: +- A document or report in the conversation +- A file (markdown, notebook, spreadsheet) +- SQL queries and their results +- Charts and their underlying data +- A description of methodology and findings + +## Workflow + +### 1. Review Methodology and Assumptions + +Examine: + +- **Question framing**: Is the analysis answering the right question? Could the question be interpreted differently? +- **Data selection**: Are the right tables/datasets being used? Is the time range appropriate? +- **Population definition**: Is the analysis population correctly defined? Are there unintended exclusions? +- **Metric definitions**: Are metrics defined clearly and consistently? Do they match how stakeholders understand them? +- **Baseline and comparison**: Is the comparison fair? Are time periods, cohort sizes, and contexts comparable? + +### 2. Run the Pre-Delivery QA Checklist + +Work through the checklist below — data quality, calculation, reasonableness, and presentation checks. + +### 3. Check for Common Analytical Pitfalls + +Systematically review against the detailed pitfall catalog below (join explosion, survivorship bias, incomplete period comparison, denominator shifting, average of averages, timezone mismatches, selection bias). + +### 4. Verify Calculations and Aggregations + +Where possible, spot-check: + +- Recalculate a few key numbers independently +- Verify that subtotals sum to totals +- Check that percentages sum to 100% (or close to it) where expected +- Confirm that YoY/MoM comparisons use the correct base periods +- Validate that filters are applied consistently across all metrics + +Apply the result sanity-checking techniques below (magnitude checks, cross-validation, red-flag detection). + +### 5. Assess Visualizations + +If the analysis includes charts: + +- Do axes start at appropriate values (zero for bar charts)? +- Are scales consistent across comparison charts? +- Do chart titles accurately describe what's shown? +- Could the visualization mislead a quick reader? +- Are there truncated axes, inconsistent intervals, or 3D effects that distort perception? + +### 6. Evaluate Narrative and Conclusions + +Review whether: + +- Conclusions are supported by the data shown +- Alternative explanations are acknowledged +- Uncertainty is communicated appropriately +- Recommendations follow logically from findings +- The level of confidence matches the strength of evidence + +### 7. Suggest Improvements + +Provide specific, actionable suggestions: + +- Additional analyses that would strengthen the conclusions +- Caveats or limitations that should be noted +- Better visualizations or framings for key points +- Missing context that stakeholders would want + +### 8. Generate Confidence Assessment + +Rate the analysis on a 3-level scale: + +**Ready to share** -- Analysis is methodologically sound, calculations verified, caveats noted. Minor suggestions for improvement but nothing blocking. + +**Share with noted caveats** -- Analysis is largely correct but has specific limitations or assumptions that must be communicated to stakeholders. List the required caveats. + +**Needs revision** -- Found specific errors, methodological issues, or missing analyses that should be addressed before sharing. List the required changes with priority order. + +## Output Format + +``` +## Validation Report + +### Overall Assessment: [Ready to share | Share with caveats | Needs revision] + +### Methodology Review +[Findings about approach, data selection, definitions] + +### Issues Found +1. [Severity: High/Medium/Low] [Issue description and impact] +2. ... + +### Calculation Spot-Checks +- [Metric]: [Verified / Discrepancy found] +- ... + +### Visualization Review +[Any issues with charts or visual presentation] + +### Suggested Improvements +1. [Improvement and why it matters] +2. ... + +### Required Caveats for Stakeholders +- [Caveat that must be communicated] +- ... +``` + +--- ## Pre-Delivery QA Checklist @@ -126,6 +245,16 @@ SELECT COUNT(*) FROM table_a a JOIN table_b b ON a.id = b.a_id; -- 3,500 (uh oh **How to prevent**: Define segments based on pre-treatment characteristics, not outcomes. +### Other Statistical Traps + +- **Simpson's paradox**: Trend reverses when data is aggregated vs. segmented +- **Correlation presented as causation** without supporting evidence +- **Small sample sizes** leading to unreliable conclusions +- **Outliers disproportionately affecting averages** (should medians be used instead?) +- **Multiple testing / cherry-picking** significant results +- **Look-ahead bias**: Using future information to explain past events +- **Cherry-picked time ranges** that favor a particular narrative + ## Result Sanity Checking ### Magnitude Checks @@ -231,3 +360,24 @@ Output: - Note the date of the data snapshot used - If an analysis is re-run with updated data, document what changed and why - Link to prior versions of recurring analyses for trend comparison + +## Examples + +``` +/validate-data Review this quarterly revenue analysis before I send it to the exec team: [analysis] +``` + +``` +/validate-data Check my churn analysis -- I'm comparing Q4 churn rates to Q3 but Q4 has a shorter measurement window +``` + +``` +/validate-data Here's a SQL query and its results for our conversion funnel. Does the logic look right? [query + results] +``` + +## Tips + +- Run /validate-data before any high-stakes presentation or decision +- Even quick analyses benefit from a sanity check -- it takes a minute and can save your credibility +- If the validation finds issues, fix them and re-validate +- Share the validation output alongside your analysis to build stakeholder confidence diff --git a/data/commands/write-query.md b/data/skills/write-query/SKILL.md similarity index 93% rename from data/commands/write-query.md rename to data/skills/write-query/SKILL.md index 2b0611b7..ae8333dd 100644 --- a/data/commands/write-query.md +++ b/data/skills/write-query/SKILL.md @@ -1,11 +1,12 @@ --- -description: Write optimized SQL for your dialect with best practices +name: write-query +description: Write optimized SQL for your dialect with best practices. Use when translating a natural-language data need into SQL, building a multi-CTE query with joins and aggregations, optimizing a query against a large partitioned table, or getting dialect-specific syntax for Snowflake, BigQuery, Postgres, etc. argument-hint: "" --- # /write-query - Write Optimized SQL -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Write a SQL query from a natural language description, optimized for your specific SQL dialect and following best practices. diff --git a/design/.claude-plugin/plugin.json b/design/.claude-plugin/plugin.json index a946b434..90175dd4 100644 --- a/design/.claude-plugin/plugin.json +++ b/design/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "design", - "version": "1.1.0", + "version": "1.2.0", "description": "Accelerate design workflows — critique, design system management, UX writing, accessibility audits, research synthesis, and dev handoff. From exploration to pixel-perfect specs.", "author": { "name": "Anthropic" diff --git a/design/.mcp.json b/design/.mcp.json index 098d35b4..d18c712c 100644 --- a/design/.mcp.json +++ b/design/.mcp.json @@ -24,6 +24,10 @@ "type": "http", "url": "https://mcp.notion.com/mcp" }, + "mixpanel": { + "type": "http", + "url": "https://mcp.mixpanel.com/mcp" + }, "intercom": { "type": "http", "url": "https://mcp.intercom.com/mcp" diff --git a/design/CONNECTORS.md b/design/CONNECTORS.md index 1443063b..a835c735 100644 --- a/design/CONNECTORS.md +++ b/design/CONNECTORS.md @@ -15,4 +15,4 @@ Plugins are **tool-agnostic** — they describe workflows in terms of categories | Knowledge base | `~~knowledge base` | Notion | Confluence, Guru, Coda | | Project tracker | `~~project tracker` | Linear, Asana, Atlassian (Jira/Confluence) | Shortcut, ClickUp | | User feedback | `~~user feedback` | Intercom | Productboard, Canny, UserVoice, Dovetail | -| Product analytics | `~~product analytics` | — | Amplitude, Mixpanel, Heap, FullStory | +| Product analytics | `~~product analytics` | Mixpanel | Amplitude, Heap, FullStory | diff --git a/design/commands/accessibility.md b/design/commands/accessibility.md deleted file mode 100644 index 324d0af8..00000000 --- a/design/commands/accessibility.md +++ /dev/null @@ -1,86 +0,0 @@ ---- -description: Run a WCAG accessibility audit on a design or page -argument-hint: "" ---- - -# /accessibility - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Audit a design or page for WCAG 2.1 AA accessibility compliance. See the **accessibility-review** skill for WCAG criteria reference and common issues checklist. - -## Usage - -``` -/accessibility $ARGUMENTS -``` - -Audit for accessibility: @$1 - -## Output - -```markdown -## Accessibility Audit: [Design/Page Name] -**Standard:** WCAG 2.1 AA | **Date:** [Date] - -### Summary -**Issues found:** [X] | **Critical:** [X] | **Major:** [X] | **Minor:** [X] - -### Findings - -#### Perceivable -| # | Issue | WCAG Criterion | Severity | Recommendation | -|---|-------|---------------|----------|----------------| -| 1 | [Issue] | [1.4.3 Contrast] | 🔴 Critical | [Fix] | - -#### Operable -| # | Issue | WCAG Criterion | Severity | Recommendation | -|---|-------|---------------|----------|----------------| -| 1 | [Issue] | [2.1.1 Keyboard] | 🟡 Major | [Fix] | - -#### Understandable -| # | Issue | WCAG Criterion | Severity | Recommendation | -|---|-------|---------------|----------|----------------| -| 1 | [Issue] | [3.3.2 Labels] | 🟢 Minor | [Fix] | - -#### Robust -| # | Issue | WCAG Criterion | Severity | Recommendation | -|---|-------|---------------|----------|----------------| -| 1 | [Issue] | [4.1.2 Name, Role, Value] | 🟡 Major | [Fix] | - -### Color Contrast Check -| Element | Foreground | Background | Ratio | Required | Pass? | -|---------|-----------|------------|-------|----------|-------| -| [Body text] | [color] | [color] | [X]:1 | 4.5:1 | ✅/❌ | - -### Keyboard Navigation -| Element | Tab Order | Enter/Space | Escape | Arrow Keys | -|---------|-----------|-------------|--------|------------| -| [Element] | [Order] | [Behavior] | [Behavior] | [Behavior] | - -### Screen Reader -| Element | Announced As | Issue | -|---------|-------------|-------| -| [Element] | [What SR says] | [Problem if any] | - -### Priority Fixes -1. **[Critical fix]** — Affects [who] and blocks [what] -2. **[Major fix]** — Improves [what] for [who] -3. **[Minor fix]** — Nice to have -``` - -## If Connectors Available - -If **~~design tool** is connected: -- Inspect color values, font sizes, and touch targets directly from Figma -- Check component ARIA roles and keyboard behavior in the design spec - -If **~~project tracker** is connected: -- Create tickets for each accessibility finding with severity and WCAG criterion -- Link findings to existing accessibility remediation epics - -## Tips - -1. **Start with contrast and keyboard** — These catch the most common and impactful issues. -2. **Test with real assistive technology** — My audit is a great start, but manual testing with VoiceOver/NVDA catches things I can't. -3. **Prioritize by impact** — Fix issues that block users first, polish later. diff --git a/design/commands/critique.md b/design/commands/critique.md deleted file mode 100644 index 85efb925..00000000 --- a/design/commands/critique.md +++ /dev/null @@ -1,79 +0,0 @@ ---- -description: Get structured design feedback on usability, hierarchy, and consistency -argument-hint: "" ---- - -# /critique - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Get structured design feedback across multiple dimensions. See the **design-critique** skill for the evaluation framework and feedback principles. - -## Usage - -``` -/critique $ARGUMENTS -``` - -Review the design: @$1 - -If a Figma URL is provided, pull the design from Figma. If a file is referenced, read it. Otherwise, ask the user to describe or share their design. - -## What I Need From You - -- **The design**: Figma URL, screenshot, or detailed description -- **Context**: What is this? Who is it for? What stage (exploration, refinement, final)? -- **Focus** (optional): "Focus on mobile" or "Focus on the onboarding flow" - -## Output - -```markdown -## Design Critique: [Design Name] - -### Overall Impression -[1-2 sentence first reaction — what works, what's the biggest opportunity] - -### Usability -| Finding | Severity | Recommendation | -|---------|----------|----------------| -| [Issue] | 🔴 Critical / 🟡 Moderate / 🟢 Minor | [Fix] | - -### Visual Hierarchy -- **What draws the eye first**: [Element] — [Is this correct?] -- **Reading flow**: [How does the eye move through the layout?] -- **Emphasis**: [Are the right things emphasized?] - -### Consistency -| Element | Issue | Recommendation | -|---------|-------|----------------| -| [Typography/spacing/color] | [Inconsistency] | [Fix] | - -### Accessibility -- **Color contrast**: [Pass/fail for key text] -- **Touch targets**: [Adequate size?] -- **Text readability**: [Font size, line height] - -### What Works Well -- [Positive observation 1] -- [Positive observation 2] - -### Priority Recommendations -1. **[Most impactful change]** — [Why and how] -2. **[Second priority]** — [Why and how] -3. **[Third priority]** — [Why and how] -``` - -## If Connectors Available - -If **~~design tool** is connected: -- Pull the design directly from Figma and inspect components, tokens, and layers -- Compare against the existing design system for consistency - -If **~~user feedback** is connected: -- Cross-reference design decisions with recent user feedback and support tickets - -## Tips - -1. **Share the context** — "This is a checkout flow for a B2B SaaS" helps me give relevant feedback. -2. **Specify your stage** — Early exploration gets different feedback than final polish. -3. **Ask me to focus** — "Just look at the navigation" gives you more depth on one area. diff --git a/design/commands/handoff.md b/design/commands/handoff.md deleted file mode 100644 index 551e2830..00000000 --- a/design/commands/handoff.md +++ /dev/null @@ -1,90 +0,0 @@ ---- -description: Generate developer handoff specs from a design -argument-hint: "" ---- - -# /handoff - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Generate comprehensive developer handoff documentation from a design. See the **design-handoff** skill for guidance on what to include and handoff principles. - -## Usage - -``` -/handoff $ARGUMENTS -``` - -Generate handoff specs for: @$1 - -If a Figma URL is provided, pull the design from Figma. Otherwise, work from the provided description or screenshot. - -## Output - -```markdown -## Handoff Spec: [Feature/Screen Name] - -### Overview -[What this screen/feature does, user context] - -### Layout -[Grid system, breakpoints, responsive behavior] - -### Design Tokens Used -| Token | Value | Usage | -|-------|-------|-------| -| `color-primary` | #[hex] | CTA buttons, links | -| `spacing-md` | [X]px | Between sections | -| `font-heading-lg` | [size/weight/family] | Page title | - -### Components -| Component | Variant | Props | Notes | -|-----------|---------|-------|-------| -| [Component] | [Variant] | [Props] | [Special behavior] | - -### States and Interactions -| Element | State | Behavior | -|---------|-------|----------| -| [CTA Button] | Hover | [Background darken 10%] | -| [CTA Button] | Loading | [Spinner, disabled] | -| [Form] | Error | [Red border, error message below] | - -### Responsive Behavior -| Breakpoint | Changes | -|------------|---------| -| Desktop (>1024px) | [Default layout] | -| Tablet (768-1024px) | [What changes] | -| Mobile (<768px) | [What changes] | - -### Edge Cases -- **Empty state**: [What to show when no data] -- **Long text**: [Truncation rules] -- **Loading**: [Skeleton or spinner] -- **Error**: [Error state appearance] - -### Animation / Motion -| Element | Trigger | Animation | Duration | Easing | -|---------|---------|-----------|----------|--------| -| [Element] | [Trigger] | [Description] | [ms] | [easing] | - -### Accessibility Notes -- [Focus order] -- [ARIA labels needed] -- [Keyboard interactions] -``` - -## If Connectors Available - -If **~~design tool** is connected: -- Pull exact measurements, tokens, and component specs from Figma -- Export assets and generate a complete spec sheet - -If **~~project tracker** is connected: -- Link the handoff to the implementation ticket -- Create sub-tasks for each section of the spec - -## Tips - -1. **Share the Figma link** — I can pull exact measurements, tokens, and component info. -2. **Mention edge cases** — "What happens with 100 items?" helps me spec boundary conditions. -3. **Specify the tech stack** — "We use React + Tailwind" helps me give relevant implementation notes. diff --git a/design/commands/ux-copy.md b/design/commands/ux-copy.md deleted file mode 100644 index 7ba5ac13..00000000 --- a/design/commands/ux-copy.md +++ /dev/null @@ -1,71 +0,0 @@ ---- -description: Write or review UX copy — microcopy, error messages, empty states, CTAs -argument-hint: "" ---- - -# /ux-copy - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Write or review UX copy for any interface context. See the **ux-writing** skill for copy principles, patterns, and voice/tone guidance. - -## Usage - -``` -/ux-copy $ARGUMENTS -``` - -## What I Need From You - -- **Context**: What screen, flow, or feature? -- **User state**: What is the user trying to do? How are they feeling? -- **Tone**: Formal, friendly, playful, reassuring? -- **Constraints**: Character limits, platform guidelines? - -## Output - -```markdown -## UX Copy: [Context] - -### Recommended Copy -**[Element]**: [Copy] - -### Alternatives -| Option | Copy | Tone | Best For | -|--------|------|------|----------| -| A | [Copy] | [Tone] | [When to use] | -| B | [Copy] | [Tone] | [When to use] | -| C | [Copy] | [Tone] | [When to use] | - -### Rationale -[Why this copy works — user context, clarity, action-orientation] - -### Localization Notes -[Anything translators should know — idioms to avoid, character expansion, cultural context] -``` - -## Common UX Copy Types - -- **CTAs**: Clear, specific, action-oriented ("Start free trial" not "Submit") -- **Error messages**: What happened, why, and how to fix it -- **Empty states**: Guide the user to take their first action -- **Confirmation dialogs**: Make the consequences clear -- **Onboarding**: Progressive disclosure, one concept at a time -- **Tooltips**: Concise, helpful, never obvious -- **Loading states**: Set expectations, reduce anxiety - -## If Connectors Available - -If **~~knowledge base** is connected: -- Pull your brand voice guidelines and content style guide -- Check for existing copy patterns and terminology standards - -If **~~design tool** is connected: -- View the screen context in Figma to understand the full user flow -- Check character limits and layout constraints from the design - -## Tips - -1. **Be specific about context** — "Error message when payment fails" is better than "error message." -2. **Share your brand voice** — "We're professional but warm" helps me match your tone. -3. **Consider the user's emotional state** — Error messages need empathy. Success messages can celebrate. diff --git a/design/skills/accessibility-review/SKILL.md b/design/skills/accessibility-review/SKILL.md index 6aaa100b..3ef332cd 100644 --- a/design/skills/accessibility-review/SKILL.md +++ b/design/skills/accessibility-review/SKILL.md @@ -1,11 +1,22 @@ --- name: accessibility-review -description: Audit designs and code for WCAG 2.1 AA compliance. Trigger with "is this accessible", "accessibility check", "WCAG audit", "can screen readers use this", "color contrast", or when the user asks about making designs or code accessible to all users. +description: Run a WCAG 2.1 AA accessibility audit on a design or page. Trigger with "audit accessibility", "check a11y", "is this accessible?", or when reviewing a design for color contrast, keyboard navigation, touch target size, or screen reader behavior before handoff. +argument-hint: "" --- -# Accessibility Review +# /accessibility-review -Evaluate designs and implementations against WCAG 2.1 AA standards. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Audit a design or page for WCAG 2.1 AA accessibility compliance. + +## Usage + +``` +/accessibility-review $ARGUMENTS +``` + +Audit for accessibility: @$1 ## WCAG 2.1 AA Quick Reference @@ -47,3 +58,71 @@ Evaluate designs and implementations against WCAG 2.1 AA standards. 3. Screen reader testing (VoiceOver, NVDA) 4. Color contrast verification 5. Zoom to 200% — does layout break? + +## Output + +```markdown +## Accessibility Audit: [Design/Page Name] +**Standard:** WCAG 2.1 AA | **Date:** [Date] + +### Summary +**Issues found:** [X] | **Critical:** [X] | **Major:** [X] | **Minor:** [X] + +### Findings + +#### Perceivable +| # | Issue | WCAG Criterion | Severity | Recommendation | +|---|-------|---------------|----------|----------------| +| 1 | [Issue] | [1.4.3 Contrast] | 🔴 Critical | [Fix] | + +#### Operable +| # | Issue | WCAG Criterion | Severity | Recommendation | +|---|-------|---------------|----------|----------------| +| 1 | [Issue] | [2.1.1 Keyboard] | 🟡 Major | [Fix] | + +#### Understandable +| # | Issue | WCAG Criterion | Severity | Recommendation | +|---|-------|---------------|----------|----------------| +| 1 | [Issue] | [3.3.2 Labels] | 🟢 Minor | [Fix] | + +#### Robust +| # | Issue | WCAG Criterion | Severity | Recommendation | +|---|-------|---------------|----------|----------------| +| 1 | [Issue] | [4.1.2 Name, Role, Value] | 🟡 Major | [Fix] | + +### Color Contrast Check +| Element | Foreground | Background | Ratio | Required | Pass? | +|---------|-----------|------------|-------|----------|-------| +| [Body text] | [color] | [color] | [X]:1 | 4.5:1 | ✅/❌ | + +### Keyboard Navigation +| Element | Tab Order | Enter/Space | Escape | Arrow Keys | +|---------|-----------|-------------|--------|------------| +| [Element] | [Order] | [Behavior] | [Behavior] | [Behavior] | + +### Screen Reader +| Element | Announced As | Issue | +|---------|-------------|-------| +| [Element] | [What SR says] | [Problem if any] | + +### Priority Fixes +1. **[Critical fix]** — Affects [who] and blocks [what] +2. **[Major fix]** — Improves [what] for [who] +3. **[Minor fix]** — Nice to have +``` + +## If Connectors Available + +If **~~design tool** is connected: +- Inspect color values, font sizes, and touch targets directly from Figma +- Check component ARIA roles and keyboard behavior in the design spec + +If **~~project tracker** is connected: +- Create tickets for each accessibility finding with severity and WCAG criterion +- Link findings to existing accessibility remediation epics + +## Tips + +1. **Start with contrast and keyboard** — These catch the most common and impactful issues. +2. **Test with real assistive technology** — My audit is a great start, but manual testing with VoiceOver/NVDA catches things I can't. +3. **Prioritize by impact** — Fix issues that block users first, polish later. diff --git a/design/skills/design-critique/SKILL.md b/design/skills/design-critique/SKILL.md index fc01b765..b6eaac15 100644 --- a/design/skills/design-critique/SKILL.md +++ b/design/skills/design-critique/SKILL.md @@ -1,11 +1,30 @@ --- name: design-critique -description: Evaluate designs for usability, visual hierarchy, consistency, and adherence to design principles. Trigger with "what do you think of this design", "give me feedback on", "critique this", "review this mockup", or when the user shares a design and asks for opinions. +description: Get structured design feedback on usability, hierarchy, and consistency. Trigger with "review this design", "critique this mockup", "what do you think of this screen?", or when sharing a Figma link or screenshot for feedback at any stage from exploration to final polish. +argument-hint: "" --- -# Design Critique +# /design-critique -Provide structured, actionable design feedback. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Get structured design feedback across multiple dimensions. + +## Usage + +``` +/design-critique $ARGUMENTS +``` + +Review the design: @$1 + +If a Figma URL is provided, pull the design from Figma. If a file is referenced, read it. Otherwise, ask the user to describe or share their design. + +## What I Need From You + +- **The design**: Figma URL, screenshot, or detailed description +- **Context**: What is this? Who is it for? What stage (exploration, refinement, final)? +- **Focus** (optional): "Focus on mobile" or "Focus on the onboarding flow" ## Critique Framework @@ -44,3 +63,56 @@ Provide structured, actionable design feedback. - **Suggest alternatives**: Don't just identify problems, propose solutions - **Acknowledge what works**: Good feedback includes positive observations - **Match the stage**: Early exploration gets different feedback than final polish + +## Output + +```markdown +## Design Critique: [Design Name] + +### Overall Impression +[1-2 sentence first reaction — what works, what's the biggest opportunity] + +### Usability +| Finding | Severity | Recommendation | +|---------|----------|----------------| +| [Issue] | 🔴 Critical / 🟡 Moderate / 🟢 Minor | [Fix] | + +### Visual Hierarchy +- **What draws the eye first**: [Element] — [Is this correct?] +- **Reading flow**: [How does the eye move through the layout?] +- **Emphasis**: [Are the right things emphasized?] + +### Consistency +| Element | Issue | Recommendation | +|---------|-------|----------------| +| [Typography/spacing/color] | [Inconsistency] | [Fix] | + +### Accessibility +- **Color contrast**: [Pass/fail for key text] +- **Touch targets**: [Adequate size?] +- **Text readability**: [Font size, line height] + +### What Works Well +- [Positive observation 1] +- [Positive observation 2] + +### Priority Recommendations +1. **[Most impactful change]** — [Why and how] +2. **[Second priority]** — [Why and how] +3. **[Third priority]** — [Why and how] +``` + +## If Connectors Available + +If **~~design tool** is connected: +- Pull the design directly from Figma and inspect components, tokens, and layers +- Compare against the existing design system for consistency + +If **~~user feedback** is connected: +- Cross-reference design decisions with recent user feedback and support tickets + +## Tips + +1. **Share the context** — "This is a checkout flow for a B2B SaaS" helps me give relevant feedback. +2. **Specify your stage** — Early exploration gets different feedback than final polish. +3. **Ask me to focus** — "Just look at the navigation" gives you more depth on one area. diff --git a/design/skills/design-handoff/SKILL.md b/design/skills/design-handoff/SKILL.md index 1068916a..74d7d9ee 100644 --- a/design/skills/design-handoff/SKILL.md +++ b/design/skills/design-handoff/SKILL.md @@ -1,11 +1,24 @@ --- name: design-handoff -description: Create comprehensive developer handoff documentation from designs. Trigger with "handoff to engineering", "developer specs", "implementation notes", "design specs for developers", or when a design needs to be translated into detailed implementation guidance. +description: Generate developer handoff specs from a design. Use when a design is ready for engineering and needs a spec sheet covering layout, design tokens, component props, interaction states, responsive breakpoints, edge cases, and animation details. +argument-hint: "" --- -# Design Handoff +# /design-handoff -Create clear, complete handoff documentation so developers can implement designs accurately. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Generate comprehensive developer handoff documentation from a design. + +## Usage + +``` +/design-handoff $ARGUMENTS +``` + +Generate handoff specs for: @$1 + +If a Figma URL is provided, pull the design from Figma. Otherwise, work from the provided description or screenshot. ## What to Include @@ -46,3 +59,73 @@ Create clear, complete handoff documentation so developers can implement designs 2. **Use tokens, not values** — Reference `spacing-md` not `16px`. 3. **Show all states** — Default, hover, active, disabled, loading, error, empty. 4. **Describe the why** — "This collapses on mobile because users primarily use one-handed" helps developers make good judgment calls. + +## Output + +```markdown +## Handoff Spec: [Feature/Screen Name] + +### Overview +[What this screen/feature does, user context] + +### Layout +[Grid system, breakpoints, responsive behavior] + +### Design Tokens Used +| Token | Value | Usage | +|-------|-------|-------| +| `color-primary` | #[hex] | CTA buttons, links | +| `spacing-md` | [X]px | Between sections | +| `font-heading-lg` | [size/weight/family] | Page title | + +### Components +| Component | Variant | Props | Notes | +|-----------|---------|-------|-------| +| [Component] | [Variant] | [Props] | [Special behavior] | + +### States and Interactions +| Element | State | Behavior | +|---------|-------|----------| +| [CTA Button] | Hover | [Background darken 10%] | +| [CTA Button] | Loading | [Spinner, disabled] | +| [Form] | Error | [Red border, error message below] | + +### Responsive Behavior +| Breakpoint | Changes | +|------------|---------| +| Desktop (>1024px) | [Default layout] | +| Tablet (768-1024px) | [What changes] | +| Mobile (<768px) | [What changes] | + +### Edge Cases +- **Empty state**: [What to show when no data] +- **Long text**: [Truncation rules] +- **Loading**: [Skeleton or spinner] +- **Error**: [Error state appearance] + +### Animation / Motion +| Element | Trigger | Animation | Duration | Easing | +|---------|---------|-----------|----------|--------| +| [Element] | [Trigger] | [Description] | [ms] | [easing] | + +### Accessibility Notes +- [Focus order] +- [ARIA labels needed] +- [Keyboard interactions] +``` + +## If Connectors Available + +If **~~design tool** is connected: +- Pull exact measurements, tokens, and component specs from Figma +- Export assets and generate a complete spec sheet + +If **~~project tracker** is connected: +- Link the handoff to the implementation ticket +- Create sub-tasks for each section of the spec + +## Tips + +1. **Share the Figma link** — I can pull exact measurements, tokens, and component info. +2. **Mention edge cases** — "What happens with 100 items?" helps me spec boundary conditions. +3. **Specify the tech stack** — "We use React + Tailwind" helps me give relevant implementation notes. diff --git a/design/skills/design-system-management/SKILL.md b/design/skills/design-system-management/SKILL.md deleted file mode 100644 index 02626641..00000000 --- a/design/skills/design-system-management/SKILL.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: design-system-management -description: Manage design tokens, component libraries, and pattern documentation. Trigger with "design system", "component library", "design tokens", "style guide", or when the user asks about maintaining consistency across designs. ---- - -# Design System Management - -Help build, maintain, and evolve design systems. - -## Components of a Design System - -### Design Tokens -Atomic values that define the visual language: -- Colors (brand, semantic, neutral) -- Typography (scale, weights, line heights) -- Spacing (scale, component padding) -- Borders (radius, width) -- Shadows (elevation levels) -- Motion (durations, easings) - -### Components -Reusable UI elements with defined: -- Variants (primary, secondary, ghost) -- States (default, hover, active, disabled, loading, error) -- Sizes (sm, md, lg) -- Behavior (interactions, animations) -- Accessibility (ARIA, keyboard) - -### Patterns -Common UI solutions combining components: -- Forms (input groups, validation, submission) -- Navigation (sidebar, tabs, breadcrumbs) -- Data display (tables, cards, lists) -- Feedback (toasts, modals, inline messages) - -## Principles - -1. **Consistency over creativity** — The system exists so teams don't reinvent the wheel -2. **Flexibility within constraints** — Components should be composable, not rigid -3. **Document everything** — If it's not documented, it doesn't exist -4. **Version and migrate** — Breaking changes need migration paths diff --git a/design/commands/design-system.md b/design/skills/design-system/SKILL.md similarity index 73% rename from design/commands/design-system.md rename to design/skills/design-system/SKILL.md index b5863bf4..0346fc13 100644 --- a/design/commands/design-system.md +++ b/design/skills/design-system/SKILL.md @@ -1,11 +1,12 @@ --- -description: Audit, document, or extend your design system +name: design-system +description: Audit, document, or extend your design system. Use when checking for naming inconsistencies or hardcoded values across components, writing documentation for a component's variants, states, and accessibility notes, or designing a new pattern that fits the existing system. argument-hint: "[audit | document | extend] " --- # /design-system -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Manage your design system — audit for consistency, document components, or design new patterns. @@ -17,6 +18,39 @@ Manage your design system — audit for consistency, document components, or des /design-system extend [pattern] # Design a new component or pattern ``` +## Components of a Design System + +### Design Tokens +Atomic values that define the visual language: +- Colors (brand, semantic, neutral) +- Typography (scale, weights, line heights) +- Spacing (scale, component padding) +- Borders (radius, width) +- Shadows (elevation levels) +- Motion (durations, easings) + +### Components +Reusable UI elements with defined: +- Variants (primary, secondary, ghost) +- States (default, hover, active, disabled, loading, error) +- Sizes (sm, md, lg) +- Behavior (interactions, animations) +- Accessibility (ARIA, keyboard) + +### Patterns +Common UI solutions combining components: +- Forms (input groups, validation, submission) +- Navigation (sidebar, tabs, breadcrumbs) +- Data display (tables, cards, lists) +- Feedback (toasts, modals, inline messages) + +## Principles + +1. **Consistency over creativity** — The system exists so teams don't reinvent the wheel +2. **Flexibility within constraints** — Components should be composable, not rigid +3. **Document everything** — If it's not documented, it doesn't exist +4. **Version and migrate** — Breaking changes need migration paths + ## Output — Audit ```markdown @@ -139,8 +173,6 @@ Manage your design system — audit for consistency, document components, or des - [Edge case to resolve] ``` -See the **design-system-management** skill for guidance on token naming, component structure, and design system principles. - ## If Connectors Available If **~~design tool** is connected: diff --git a/design/commands/research-synthesis.md b/design/skills/research-synthesis/SKILL.md similarity index 90% rename from design/commands/research-synthesis.md rename to design/skills/research-synthesis/SKILL.md index 43f05426..4807340a 100644 --- a/design/commands/research-synthesis.md +++ b/design/skills/research-synthesis/SKILL.md @@ -1,11 +1,12 @@ --- -description: Synthesize user research into themes, insights, and recommendations +name: research-synthesis +description: Synthesize user research into themes, insights, and recommendations. Use when you have interview transcripts, survey results, usability test notes, support tickets, or NPS responses that need to be distilled into patterns, user segments, and prioritized next steps. argument-hint: "" --- # /research-synthesis -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Synthesize user research data into actionable insights. See the **user-research** skill for research methods, interview guides, and analysis frameworks. diff --git a/design/skills/ux-copy/SKILL.md b/design/skills/ux-copy/SKILL.md new file mode 100644 index 00000000..14ad66b9 --- /dev/null +++ b/design/skills/ux-copy/SKILL.md @@ -0,0 +1,107 @@ +--- +name: ux-copy +description: Write or review UX copy — microcopy, error messages, empty states, CTAs. Trigger with "write copy for", "what should this button say?", "review this error message", or when naming a CTA, wording a confirmation dialog, filling an empty state, or writing onboarding text. +argument-hint: "" +--- + +# /ux-copy + +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Write or review UX copy for any interface context. + +## Usage + +``` +/ux-copy $ARGUMENTS +``` + +## What I Need From You + +- **Context**: What screen, flow, or feature? +- **User state**: What is the user trying to do? How are they feeling? +- **Tone**: Formal, friendly, playful, reassuring? +- **Constraints**: Character limits, platform guidelines? + +## Principles + +1. **Clear**: Say exactly what you mean. No jargon, no ambiguity. +2. **Concise**: Use the fewest words that convey the full meaning. +3. **Consistent**: Same terms for the same things everywhere. +4. **Useful**: Every word should help the user accomplish their goal. +5. **Human**: Write like a helpful person, not a robot. + +## Copy Patterns + +### CTAs +- Start with a verb: "Start free trial", "Save changes", "Download report" +- Be specific: "Create account" not "Submit" +- Match the outcome to the label + +### Error Messages +Structure: What happened + Why + How to fix +- "Payment declined. Your card was declined by your bank. Try a different card or contact your bank." + +### Empty States +Structure: What this is + Why it's empty + How to start +- "No projects yet. Create your first project to start collaborating with your team." + +### Confirmation Dialogs +- Make the action clear: "Delete 3 files?" not "Are you sure?" +- Describe consequences: "This can't be undone" +- Label buttons with the action: "Delete files" / "Keep files" not "OK" / "Cancel" + +### Tooltips +- Concise, helpful, never obvious + +### Loading States +- Set expectations, reduce anxiety + +### Onboarding +- Progressive disclosure, one concept at a time + +## Voice and Tone + +Adapt tone to context: +- **Success**: Celebratory but not over the top +- **Error**: Empathetic and helpful +- **Warning**: Clear and actionable +- **Neutral**: Informative and concise + +## Output + +```markdown +## UX Copy: [Context] + +### Recommended Copy +**[Element]**: [Copy] + +### Alternatives +| Option | Copy | Tone | Best For | +|--------|------|------|----------| +| A | [Copy] | [Tone] | [When to use] | +| B | [Copy] | [Tone] | [When to use] | +| C | [Copy] | [Tone] | [When to use] | + +### Rationale +[Why this copy works — user context, clarity, action-orientation] + +### Localization Notes +[Anything translators should know — idioms to avoid, character expansion, cultural context] +``` + +## If Connectors Available + +If **~~knowledge base** is connected: +- Pull your brand voice guidelines and content style guide +- Check for existing copy patterns and terminology standards + +If **~~design tool** is connected: +- View the screen context in Figma to understand the full user flow +- Check character limits and layout constraints from the design + +## Tips + +1. **Be specific about context** — "Error message when payment fails" is better than "error message." +2. **Share your brand voice** — "We're professional but warm" helps me match your tone. +3. **Consider the user's emotional state** — Error messages need empathy. Success messages can celebrate. diff --git a/design/skills/ux-writing/SKILL.md b/design/skills/ux-writing/SKILL.md deleted file mode 100644 index c0f738c2..00000000 --- a/design/skills/ux-writing/SKILL.md +++ /dev/null @@ -1,44 +0,0 @@ ---- -name: ux-writing -description: Write effective microcopy for user interfaces. Trigger with "write copy for", "help with UX copy", "what should this button say", "error message for", "empty state copy", or when the user needs help with any interface text. ---- - -# UX Writing - -Write clear, concise, and helpful interface copy. - -## Principles - -1. **Clear**: Say exactly what you mean. No jargon, no ambiguity. -2. **Concise**: Use the fewest words that convey the full meaning. -3. **Consistent**: Same terms for the same things everywhere. -4. **Useful**: Every word should help the user accomplish their goal. -5. **Human**: Write like a helpful person, not a robot. - -## Copy Patterns - -### CTAs -- Start with a verb: "Start free trial", "Save changes", "Download report" -- Be specific: "Create account" not "Submit" -- Match the outcome to the label - -### Error Messages -Structure: What happened + Why + How to fix -- "Payment declined. Your card was declined by your bank. Try a different card or contact your bank." - -### Empty States -Structure: What this is + Why it's empty + How to start -- "No projects yet. Create your first project to start collaborating with your team." - -### Confirmation Dialogs -- Make the action clear: "Delete 3 files?" not "Are you sure?" -- Describe consequences: "This can't be undone" -- Label buttons with the action: "Delete files" / "Keep files" not "OK" / "Cancel" - -## Voice and Tone - -Adapt tone to context: -- **Success**: Celebratory but not over the top -- **Error**: Empathetic and helpful -- **Warning**: Clear and actionable -- **Neutral**: Informative and concise diff --git a/engineering/.claude-plugin/plugin.json b/engineering/.claude-plugin/plugin.json index 58ebfcf5..040404e9 100644 --- a/engineering/.claude-plugin/plugin.json +++ b/engineering/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "engineering", - "version": "1.1.0", + "version": "1.2.0", "description": "Streamline engineering workflows — standups, code review, architecture decisions, incident response, and technical documentation. Works with your existing tools or standalone.", "author": { "name": "Anthropic" diff --git a/engineering/commands/incident.md b/engineering/commands/incident.md deleted file mode 100644 index ed7a9326..00000000 --- a/engineering/commands/incident.md +++ /dev/null @@ -1,144 +0,0 @@ ---- -description: Run an incident response workflow — triage, communicate, and write postmortem -argument-hint: "" ---- - -# /incident - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Manage an incident from detection through postmortem. - -## Usage - -``` -/incident $ARGUMENTS -``` - -## Modes - -``` -/incident new [description] # Start a new incident -/incident update [status] # Post a status update -/incident postmortem # Generate postmortem from incident data -``` - -If no mode is specified, ask what phase the incident is in. See the **incident-response** skill for severity definitions, response frameworks, and communication templates. - -## How It Works - -``` -┌─────────────────────────────────────────────────────────────────┐ -│ INCIDENT RESPONSE │ -├─────────────────────────────────────────────────────────────────┤ -│ Phase 1: TRIAGE │ -│ ✓ Assess severity (SEV1-4) │ -│ ✓ Identify affected systems and users │ -│ ✓ Assign roles (IC, comms, responders) │ -│ │ -│ Phase 2: COMMUNICATE │ -│ ✓ Draft internal status update │ -│ ✓ Draft customer communication (if needed) │ -│ ✓ Set up war room and cadence │ -│ │ -│ Phase 3: MITIGATE │ -│ ✓ Document mitigation steps taken │ -│ ✓ Track timeline of events │ -│ ✓ Confirm resolution │ -│ │ -│ Phase 4: POSTMORTEM │ -│ ✓ Blameless postmortem document │ -│ ✓ Timeline reconstruction │ -│ ✓ Root cause analysis (5 whys) │ -│ ✓ Action items with owners │ -└─────────────────────────────────────────────────────────────────┘ -``` - -## Output — Status Update - -```markdown -## Incident Update: [Title] -**Severity:** SEV[1-4] | **Status:** Investigating | Identified | Monitoring | Resolved -**Impact:** [Who/what is affected] -**Last Updated:** [Timestamp] - -### Current Status -[What we know now] - -### Actions Taken -- [Action 1] -- [Action 2] - -### Next Steps -- [What's happening next and ETA] - -### Timeline -| Time | Event | -|------|-------| -| [HH:MM] | [Event] | -``` - -## Output — Postmortem - -```markdown -## Postmortem: [Incident Title] -**Date:** [Date] | **Duration:** [X hours] | **Severity:** SEV[X] -**Authors:** [Names] | **Status:** Draft - -### Summary -[2-3 sentence plain-language summary] - -### Impact -- [Users affected] -- [Duration of impact] -- [Business impact if quantifiable] - -### Timeline -| Time (UTC) | Event | -|------------|-------| -| [HH:MM] | [Event] | - -### Root Cause -[Detailed explanation of what caused the incident] - -### 5 Whys -1. Why did [symptom]? → [Because...] -2. Why did [cause 1]? → [Because...] -3. Why did [cause 2]? → [Because...] -4. Why did [cause 3]? → [Because...] -5. Why did [cause 4]? → [Root cause] - -### What Went Well -- [Things that worked] - -### What Went Poorly -- [Things that didn't work] - -### Action Items -| Action | Owner | Priority | Due Date | -|--------|-------|----------|----------| -| [Action] | [Person] | P0/P1/P2 | [Date] | - -### Lessons Learned -[Key takeaways for the team] -``` - -## If Connectors Available - -If **~~monitoring** is connected: -- Pull alert details and metrics -- Show graphs of affected metrics - -If **~~incident management** is connected: -- Create or update incident in PagerDuty/Opsgenie -- Page on-call responders - -If **~~chat** is connected: -- Post status updates to incident channel -- Create war room channel - -## Tips - -1. **Start writing immediately** — Don't wait for complete information. Update as you learn more. -2. **Keep updates factual** — What we know, what we've done, what's next. No speculation. -3. **Postmortems are blameless** — Focus on systems and processes, not individuals. diff --git a/engineering/commands/review.md b/engineering/commands/review.md deleted file mode 100644 index 0f759abb..00000000 --- a/engineering/commands/review.md +++ /dev/null @@ -1,95 +0,0 @@ ---- -description: Review code changes for security, performance, and correctness -argument-hint: "" ---- - -# /review - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Review code changes with a structured lens on security, performance, correctness, and maintainability. - -## Usage - -``` -/review -``` - -Review the provided code changes: @$1 - -If no specific file or URL is provided, ask what to review. - -## How It Works - -``` -┌─────────────────────────────────────────────────────────────────┐ -│ CODE REVIEW │ -├─────────────────────────────────────────────────────────────────┤ -│ STANDALONE (always works) │ -│ ✓ Paste a diff, PR URL, or point to files │ -│ ✓ Security audit (OWASP top 10, injection, auth) │ -│ ✓ Performance review (N+1, memory leaks, complexity) │ -│ ✓ Correctness (edge cases, error handling, race conditions) │ -│ ✓ Style (naming, structure, readability) │ -│ ✓ Actionable suggestions with code examples │ -├─────────────────────────────────────────────────────────────────┤ -│ SUPERCHARGED (when you connect your tools) │ -│ + Source control: Pull PR diff automatically │ -│ + Project tracker: Link findings to tickets │ -│ + Knowledge base: Check against team coding standards │ -└─────────────────────────────────────────────────────────────────┘ -``` - -## Output - -```markdown -## Code Review: [PR title or file] - -### Summary -[1-2 sentence overview of the changes and overall quality] - -### Critical Issues -| # | File | Line | Issue | Severity | -|---|------|------|-------|----------| -| 1 | [file] | [line] | [description] | 🔴 Critical | - -### Suggestions -| # | File | Line | Suggestion | Category | -|---|------|------|------------|----------| -| 1 | [file] | [line] | [description] | Performance | - -### What Looks Good -- [Positive observations] - -### Verdict -[Approve / Request Changes / Needs Discussion] -``` - -## Review Checklist - -See the **code-review** skill for detailed guidance on security patterns, performance anti-patterns, and maintainability heuristics. - -I check for: -- **Security**: SQL injection, XSS, auth bypass, secrets in code, insecure deserialization -- **Performance**: N+1 queries, unnecessary allocations, algorithmic complexity, missing indexes -- **Correctness**: Edge cases, null handling, race conditions, error propagation -- **Maintainability**: Naming clarity, single responsibility, test coverage, documentation - -## If Connectors Available - -If **~~source control** is connected: -- Pull the PR diff automatically from the URL -- Check CI status and test results - -If **~~project tracker** is connected: -- Link findings to related tickets -- Verify the PR addresses the stated requirements - -If **~~knowledge base** is connected: -- Check changes against team coding standards and style guides - -## Tips - -1. **Provide context** — "This is a hot path" or "This handles PII" helps me focus. -2. **Specify concerns** — "Focus on security" narrows the review. -3. **Include tests** — I'll check test coverage and quality too. diff --git a/engineering/commands/architecture.md b/engineering/skills/architecture/SKILL.md similarity index 87% rename from engineering/commands/architecture.md rename to engineering/skills/architecture/SKILL.md index 93697386..f316e466 100644 --- a/engineering/commands/architecture.md +++ b/engineering/skills/architecture/SKILL.md @@ -1,11 +1,12 @@ --- -description: Create or evaluate an architecture decision record (ADR) +name: architecture +description: Create or evaluate an architecture decision record (ADR). Use when choosing between technologies (e.g., Kafka vs SQS), documenting a design decision with trade-offs and consequences, reviewing a system design proposal, or designing a new component from requirements and constraints. argument-hint: "" --- # /architecture -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Create an Architecture Decision Record (ADR) or evaluate a system design. diff --git a/engineering/skills/code-review/SKILL.md b/engineering/skills/code-review/SKILL.md index 58fb5939..8b98fe4c 100644 --- a/engineering/skills/code-review/SKILL.md +++ b/engineering/skills/code-review/SKILL.md @@ -1,11 +1,45 @@ --- name: code-review -description: Review code for bugs, security vulnerabilities, performance issues, and maintainability. Trigger with "review this code", "check this PR", "look at this diff", "is this code safe?", or when the user shares code and asks for feedback. +description: Review code changes for security, performance, and correctness. Trigger with a PR URL or diff, "review this before I merge", "is this code safe?", or when checking a change for N+1 queries, injection risks, missing edge cases, or error handling gaps. +argument-hint: "" --- -# Code Review +# /code-review -Structured code review covering security, performance, correctness, and maintainability. Works on diffs, PRs, files, or pasted code snippets. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Review code changes with a structured lens on security, performance, correctness, and maintainability. + +## Usage + +``` +/code-review +``` + +Review the provided code changes: @$1 + +If no specific file or URL is provided, ask what to review. + +## How It Works + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ CODE REVIEW │ +├─────────────────────────────────────────────────────────────────┤ +│ STANDALONE (always works) │ +│ ✓ Paste a diff, PR URL, or point to files │ +│ ✓ Security audit (OWASP top 10, injection, auth) │ +│ ✓ Performance review (N+1, memory leaks, complexity) │ +│ ✓ Correctness (edge cases, error handling, race conditions) │ +│ ✓ Style (naming, structure, readability) │ +│ ✓ Actionable suggestions with code examples │ +├─────────────────────────────────────────────────────────────────┤ +│ SUPERCHARGED (when you connect your tools) │ +│ + Source control: Pull PR diff automatically │ +│ + Project tracker: Link findings to tickets │ +│ + Knowledge base: Check against team coding standards │ +└─────────────────────────────────────────────────────────────────┘ +``` ## Review Dimensions @@ -39,6 +73,46 @@ Structured code review covering security, performance, correctness, and maintain - Test coverage - Documentation for non-obvious logic -## Output Format +## Output + +```markdown +## Code Review: [PR title or file] + +### Summary +[1-2 sentence overview of the changes and overall quality] + +### Critical Issues +| # | File | Line | Issue | Severity | +|---|------|------|-------|----------| +| 1 | [file] | [line] | [description] | 🔴 Critical | + +### Suggestions +| # | File | Line | Suggestion | Category | +|---|------|------|------------|----------| +| 1 | [file] | [line] | [description] | Performance | + +### What Looks Good +- [Positive observations] + +### Verdict +[Approve / Request Changes / Needs Discussion] +``` + +## If Connectors Available + +If **~~source control** is connected: +- Pull the PR diff automatically from the URL +- Check CI status and test results + +If **~~project tracker** is connected: +- Link findings to related tickets +- Verify the PR addresses the stated requirements + +If **~~knowledge base** is connected: +- Check changes against team coding standards and style guides + +## Tips -Rate each dimension and provide specific, actionable findings with file and line references. Prioritize critical issues first. Always include positive observations alongside issues. +1. **Provide context** — "This is a hot path" or "This handles PII" helps me focus. +2. **Specify concerns** — "Focus on security" narrows the review. +3. **Include tests** — I'll check test coverage and quality too. diff --git a/engineering/commands/debug.md b/engineering/skills/debug/SKILL.md similarity index 93% rename from engineering/commands/debug.md rename to engineering/skills/debug/SKILL.md index 55315975..6763b0b1 100644 --- a/engineering/commands/debug.md +++ b/engineering/skills/debug/SKILL.md @@ -1,11 +1,12 @@ --- -description: Structured debugging session — reproduce, isolate, diagnose, and fix +name: debug +description: Structured debugging session — reproduce, isolate, diagnose, and fix. Trigger with an error message or stack trace, "this works in staging but not prod", "something broke after the deploy", or when behavior diverges from expected and the cause isn't obvious. argument-hint: "" --- # /debug -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Run a structured debugging session to find and fix issues systematically. diff --git a/engineering/commands/deploy-checklist.md b/engineering/skills/deploy-checklist/SKILL.md similarity index 85% rename from engineering/commands/deploy-checklist.md rename to engineering/skills/deploy-checklist/SKILL.md index 4b75dce9..91171a09 100644 --- a/engineering/commands/deploy-checklist.md +++ b/engineering/skills/deploy-checklist/SKILL.md @@ -1,11 +1,12 @@ --- -description: Pre-deployment verification checklist +name: deploy-checklist +description: Pre-deployment verification checklist. Use when about to ship a release, deploying a change with database migrations or feature flags, verifying CI status and approvals before going to production, or documenting rollback triggers ahead of time. argument-hint: "[service or release name]" --- # /deploy-checklist -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Generate a pre-deployment checklist to verify readiness before shipping. diff --git a/engineering/skills/incident-response/SKILL.md b/engineering/skills/incident-response/SKILL.md index 64e26b25..b483514f 100644 --- a/engineering/skills/incident-response/SKILL.md +++ b/engineering/skills/incident-response/SKILL.md @@ -1,11 +1,59 @@ --- name: incident-response -description: Triage and manage production incidents. Trigger with "we have an incident", "production is down", "something is broken", "there's an outage", "SEV1", or when the user describes a production issue needing immediate response. +description: Run an incident response workflow — triage, communicate, and write postmortem. Trigger with "we have an incident", "production is down", an alert that needs severity assessment, a status update mid-incident, or when writing a blameless postmortem after resolution. +argument-hint: "" --- -# Incident Response +# /incident-response -Guide incident response from detection through resolution and postmortem. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Manage an incident from detection through postmortem. + +## Usage + +``` +/incident-response $ARGUMENTS +``` + +## Modes + +``` +/incident-response new [description] # Start a new incident +/incident-response update [status] # Post a status update +/incident-response postmortem # Generate postmortem from incident data +``` + +If no mode is specified, ask what phase the incident is in. + +## How It Works + +``` +┌─────────────────────────────────────────────────────────────────┐ +│ INCIDENT RESPONSE │ +├─────────────────────────────────────────────────────────────────┤ +│ Phase 1: TRIAGE │ +│ ✓ Assess severity (SEV1-4) │ +│ ✓ Identify affected systems and users │ +│ ✓ Assign roles (IC, comms, responders) │ +│ │ +│ Phase 2: COMMUNICATE │ +│ ✓ Draft internal status update │ +│ ✓ Draft customer communication (if needed) │ +│ ✓ Set up war room and cadence │ +│ │ +│ Phase 3: MITIGATE │ +│ ✓ Document mitigation steps taken │ +│ ✓ Track timeline of events │ +│ ✓ Confirm resolution │ +│ │ +│ Phase 4: POSTMORTEM │ +│ ✓ Blameless postmortem document │ +│ ✓ Timeline reconstruction │ +│ ✓ Root cause analysis (5 whys) │ +│ ✓ Action items with owners │ +└─────────────────────────────────────────────────────────────────┘ +``` ## Severity Classification @@ -16,18 +64,95 @@ Guide incident response from detection through resolution and postmortem. | SEV3 | Minor feature issue, some users affected | Within 1 hour | | SEV4 | Cosmetic or low-impact issue | Next business day | -## Response Framework +## Communication Guidance -1. **Triage**: Classify severity, identify scope, assign incident commander -2. **Communicate**: Status page, internal updates, customer comms if needed -3. **Mitigate**: Stop the bleeding first, root cause later -4. **Resolve**: Implement fix, verify, confirm resolution -5. **Postmortem**: Blameless review, 5 whys, action items +Provide clear, factual updates at regular cadence. Include: what's happening, who's affected, what we're doing, when the next update is. -## Communication Templates +## Output — Status Update -Provide clear, factual updates at regular cadence. Include: what's happening, who's affected, what we're doing, when the next update is. +```markdown +## Incident Update: [Title] +**Severity:** SEV[1-4] | **Status:** Investigating | Identified | Monitoring | Resolved +**Impact:** [Who/what is affected] +**Last Updated:** [Timestamp] + +### Current Status +[What we know now] + +### Actions Taken +- [Action 1] +- [Action 2] + +### Next Steps +- [What's happening next and ETA] + +### Timeline +| Time | Event | +|------|-------| +| [HH:MM] | [Event] | +``` + +## Output — Postmortem + +```markdown +## Postmortem: [Incident Title] +**Date:** [Date] | **Duration:** [X hours] | **Severity:** SEV[X] +**Authors:** [Names] | **Status:** Draft + +### Summary +[2-3 sentence plain-language summary] + +### Impact +- [Users affected] +- [Duration of impact] +- [Business impact if quantifiable] + +### Timeline +| Time (UTC) | Event | +|------------|-------| +| [HH:MM] | [Event] | + +### Root Cause +[Detailed explanation of what caused the incident] + +### 5 Whys +1. Why did [symptom]? → [Because...] +2. Why did [cause 1]? → [Because...] +3. Why did [cause 2]? → [Because...] +4. Why did [cause 3]? → [Because...] +5. Why did [cause 4]? → [Root cause] + +### What Went Well +- [Things that worked] + +### What Went Poorly +- [Things that didn't work] + +### Action Items +| Action | Owner | Priority | Due Date | +|--------|-------|----------|----------| +| [Action] | [Person] | P0/P1/P2 | [Date] | + +### Lessons Learned +[Key takeaways for the team] +``` + +## If Connectors Available + +If **~~monitoring** is connected: +- Pull alert details and metrics +- Show graphs of affected metrics + +If **~~incident management** is connected: +- Create or update incident in PagerDuty/Opsgenie +- Page on-call responders + +If **~~chat** is connected: +- Post status updates to incident channel +- Create war room channel -## Postmortem Format +## Tips -Blameless. Focus on systems and processes. Include timeline, root cause analysis (5 whys), what went well, what went poorly, and action items with owners and due dates. +1. **Start writing immediately** — Don't wait for complete information. Update as you learn more. +2. **Keep updates factual** — What we know, what we've done, what's next. No speculation. +3. **Postmortems are blameless** — Focus on systems and processes, not individuals. diff --git a/engineering/commands/standup.md b/engineering/skills/standup/SKILL.md similarity index 89% rename from engineering/commands/standup.md rename to engineering/skills/standup/SKILL.md index 72721be7..22fb33ef 100644 --- a/engineering/commands/standup.md +++ b/engineering/skills/standup/SKILL.md @@ -1,11 +1,12 @@ --- -description: Generate a standup update from recent activity +name: standup +description: Generate a standup update from recent activity. Use when preparing for daily standup, summarizing yesterday's commits and PRs and ticket moves, formatting work into yesterday/today/blockers, or structuring a few rough notes into a shareable update. argument-hint: "[yesterday | today | blockers]" --- # /standup -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Generate a standup update by pulling together recent activity across your tools. diff --git a/enterprise-search/.claude-plugin/plugin.json b/enterprise-search/.claude-plugin/plugin.json index 317a2d92..0252dc9c 100644 --- a/enterprise-search/.claude-plugin/plugin.json +++ b/enterprise-search/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "enterprise-search", - "version": "1.1.0", + "version": "1.2.0", "description": "Search across all of your company's tools in one place. Find anything across email, chat, documents, and wikis without switching between apps.", "author": { "name": "Anthropic" diff --git a/enterprise-search/commands/digest.md b/enterprise-search/skills/digest/SKILL.md similarity index 95% rename from enterprise-search/commands/digest.md rename to enterprise-search/skills/digest/SKILL.md index 28012ec3..c13f52b0 100644 --- a/enterprise-search/commands/digest.md +++ b/enterprise-search/skills/digest/SKILL.md @@ -1,11 +1,12 @@ --- -description: Generate a daily or weekly digest of activity across all connected sources +name: digest +description: Generate a daily or weekly digest of activity across all connected sources. Use when catching up after time away, starting the day and wanting a summary of mentions and action items, or reviewing a week's decisions and document updates grouped by project. argument-hint: "[--daily | --weekly | --since ]" --- # Digest Command -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Scan recent activity across all connected sources and generate a structured digest highlighting what matters. diff --git a/enterprise-search/skills/knowledge-synthesis/SKILL.md b/enterprise-search/skills/knowledge-synthesis/SKILL.md index b70510a2..a5e816fb 100644 --- a/enterprise-search/skills/knowledge-synthesis/SKILL.md +++ b/enterprise-search/skills/knowledge-synthesis/SKILL.md @@ -1,6 +1,7 @@ --- name: knowledge-synthesis description: Combines search results from multiple sources into coherent, deduplicated answers with source attribution. Handles confidence scoring based on freshness and authority, and summarizes large result sets effectively. +user-invocable: false --- # Knowledge Synthesis diff --git a/enterprise-search/skills/search-strategy/SKILL.md b/enterprise-search/skills/search-strategy/SKILL.md index 902a5345..d42cddb9 100644 --- a/enterprise-search/skills/search-strategy/SKILL.md +++ b/enterprise-search/skills/search-strategy/SKILL.md @@ -1,6 +1,7 @@ --- name: search-strategy description: Query decomposition and multi-source search orchestration. Breaks natural language questions into targeted searches per source, translates queries into source-specific syntax, ranks results by relevance, and handles ambiguity and fallback strategies. +user-invocable: false --- # Search Strategy diff --git a/enterprise-search/commands/search.md b/enterprise-search/skills/search/SKILL.md similarity index 94% rename from enterprise-search/commands/search.md rename to enterprise-search/skills/search/SKILL.md index 3d3d2bca..5a0dab22 100644 --- a/enterprise-search/commands/search.md +++ b/enterprise-search/skills/search/SKILL.md @@ -1,11 +1,12 @@ --- -description: Search across all connected sources in one query +name: search +description: Search across all connected sources in one query. Trigger with "find that doc about...", "what did we decide on...", "where was the conversation about...", or when looking for a decision, document, or discussion that could live in chat, email, cloud storage, or a project tracker. argument-hint: "" --- # Search Command -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Search across all connected MCP sources in a single query. Decompose the user's question, run parallel searches, and synthesize results. diff --git a/enterprise-search/skills/source-management/SKILL.md b/enterprise-search/skills/source-management/SKILL.md index 30968574..8df82355 100644 --- a/enterprise-search/skills/source-management/SKILL.md +++ b/enterprise-search/skills/source-management/SKILL.md @@ -1,6 +1,7 @@ --- name: source-management description: Manages connected MCP sources for enterprise search. Detects available sources, guides users to connect new ones, handles source priority ordering, and manages rate limiting awareness. +user-invocable: false --- # Source Management diff --git a/finance/.claude-plugin/plugin.json b/finance/.claude-plugin/plugin.json index 8fb8a5a5..fe42b230 100644 --- a/finance/.claude-plugin/plugin.json +++ b/finance/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "finance", - "version": "1.1.0", + "version": "1.2.0", "description": "Streamline finance and accounting workflows, from journal entries and reconciliation to financial statements and variance analysis. Speed up audit prep, month-end close, and keeping your books clean.", "author": { "name": "Anthropic" diff --git a/finance/commands/income-statement.md b/finance/commands/income-statement.md deleted file mode 100644 index 4f1c4c4b..00000000 --- a/finance/commands/income-statement.md +++ /dev/null @@ -1,138 +0,0 @@ ---- -description: Generate an income statement with period-over-period comparison and variance analysis -argument-hint: " " ---- - -# Income Statement Generation - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -**Important**: This command assists with financial statement workflows but does not provide financial advice. All statements should be reviewed by qualified financial professionals before use in reporting or filings. - -Generate an income statement with period-over-period comparison and variance analysis. Highlight material variances for investigation. - -## Usage - -``` -/income-statement -``` - -### Arguments - -- `period-type` — The reporting period type: - - `monthly` — Single month P&L with prior month and prior year month comparison - - `quarterly` — Quarter P&L with prior quarter and prior year quarter comparison - - `annual` — Full year P&L with prior year comparison - - `ytd` — Year-to-date P&L with prior year YTD comparison -- `period` — The period to report (e.g., `2024-12`, `2024-Q4`, `2024`) - -## Workflow - -### 1. Gather Financial Data - -If ~~erp or ~~data warehouse is connected: -- Pull trial balance or income statement data for the specified period -- Pull comparison period data (prior period, prior year, budget/forecast) -- Pull account hierarchy and groupings for presentation - -If no data source is connected: -> Connect ~~erp or ~~data warehouse to pull financial data automatically. You can also paste trial balance data, upload a spreadsheet, or provide income statement data for analysis. - -Prompt the user to provide: -- Current period revenue and expense data (by account or category) -- Comparison period data (prior period, prior year, and/or budget) -- Any known adjustments or reclassifications - -### 2. Generate Income Statement - -Present in standard multi-column format: - -``` -INCOME STATEMENT -Period: [Period description] -(in thousands, unless otherwise noted) - - Current Prior Variance Variance Budget Budget - Period Period ($) (%) Amount Var ($) - -------- -------- -------- -------- -------- -------- -REVENUE - Product revenue $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - Service revenue $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - Other revenue $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - -------- -------- -------- -------- -------- -TOTAL REVENUE $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - -COST OF REVENUE - [Cost items] $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - -------- -------- -------- -------- -------- -GROSS PROFIT $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - Gross Margin XX.X% XX.X% - -OPERATING EXPENSES - Research & development $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - Sales & marketing $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - General & administrative $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - -------- -------- -------- -------- -------- -TOTAL OPERATING EXPENSES $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - -OPERATING INCOME (LOSS) $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - Operating Margin XX.X% XX.X% - -OTHER INCOME (EXPENSE) - Interest income $XX,XXX $XX,XXX $X,XXX X.X% - Interest expense ($XX,XXX) ($XX,XXX) $X,XXX X.X% - Other, net $XX,XXX $XX,XXX $X,XXX X.X% - -------- -------- -------- -TOTAL OTHER INCOME (EXPENSE) $XX,XXX $XX,XXX $X,XXX X.X% - -INCOME BEFORE TAXES $XX,XXX $XX,XXX $X,XXX X.X% - Income tax expense $XX,XXX $XX,XXX $X,XXX X.X% - -------- -------- -------- - -NET INCOME (LOSS) $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX - Net Margin XX.X% XX.X% -``` - -### 3. Variance Analysis - -For each line item, calculate and flag material variances: - -**Materiality thresholds** (flag if either condition met): -- Dollar variance exceeds a defined threshold (e.g., $50K, $100K — ask user for their threshold) -- Percentage variance exceeds 10% (or user-defined threshold) - -For flagged variances, provide: -- Direction and magnitude of the variance -- Possible drivers (if data is available to decompose) -- Questions to investigate -- Whether the variance is favorable or unfavorable - -### 4. Key Metrics Summary - -``` -KEY METRICS - Current Prior Change -Revenue growth (%) X.X% -Gross margin (%) XX.X% XX.X% X.X pp -Operating margin (%) XX.X% XX.X% X.X pp -Net margin (%) XX.X% XX.X% X.X pp -OpEx as % of revenue XX.X% XX.X% X.X pp -Effective tax rate (%) XX.X% XX.X% X.X pp -``` - -### 5. Material Variance Summary - -List all material variances requiring investigation: - -| Line Item | Variance ($) | Variance (%) | Direction | Preliminary Driver | Action | -|-----------|-------------|-------------|-----------|-------------------|--------| -| [Item] | $X,XXX | X.X% | Unfav. | [If known] | Investigate | - -### 6. Output - -Provide: -1. Formatted income statement with comparisons -2. Key metrics summary -3. Material variance listing with investigation flags -4. Suggested follow-up questions for unexplained variances -5. Offer to drill into any specific variance with `/flux` diff --git a/finance/commands/reconciliation.md b/finance/commands/reconciliation.md deleted file mode 100644 index 319725d9..00000000 --- a/finance/commands/reconciliation.md +++ /dev/null @@ -1,147 +0,0 @@ ---- -description: Reconcile GL balances to subledger, bank, or third-party balances -argument-hint: " [period]" ---- - -# Account Reconciliation - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -**Important**: This command assists with reconciliation workflows but does not provide financial advice. All reconciliations should be reviewed by qualified financial professionals before sign-off. - -Reconcile GL account balances to subledger, bank, or third-party balances. Identify and categorize reconciling items and generate a reconciliation workpaper. - -## Usage - -``` -/recon -``` - -### Arguments - -- `account` — The account or account category to reconcile. Examples: - - `cash` or `bank` — Bank reconciliation (GL cash to bank statement) - - `accounts-receivable` or `ar` — AR subledger reconciliation - - `accounts-payable` or `ap` — AP subledger reconciliation - - `fixed-assets` or `fa` — Fixed asset subledger reconciliation - - `intercompany` or `ic` — Intercompany balance reconciliation - - `prepaid` — Prepaid expense schedule reconciliation - - `accrued-liabilities` — Accrued liabilities detail reconciliation - - Any specific GL account code (e.g., `1010`, `2100`) -- `period` — The accounting period end date (e.g., `2024-12`, `2024-12-31`) - -## Workflow - -### 1. Gather Both Sides of the Reconciliation - -If ~~erp or ~~data warehouse is connected: -- Pull the GL balance for the specified account(s) as of period end -- Pull the subledger, bank statement, or third-party balance for comparison -- Pull prior period reconciliation (if available) for outstanding item carryforward - -If no data source is connected: -> Connect ~~erp or ~~data warehouse to pull account balances automatically. To reconcile manually, provide: -> 1. **GL side:** The general ledger balance for the account as of period end -> 2. **Other side:** The subledger balance, bank statement balance, or third-party confirmation balance -> 3. **Prior period outstanding items** (optional): Any reconciling items from the prior period reconciliation - -### 2. Compare Balances - -Calculate the difference between the two sides: - -``` -GL Balance: $XX,XXX.XX -Subledger/Bank/Other Balance: $XX,XXX.XX - ---------- -Difference: $XX,XXX.XX -``` - -### 3. Identify Reconciling Items - -Analyze the difference and categorize reconciling items: - -**Timing Differences** (items that will clear in subsequent periods): -- Outstanding checks / payments issued but not yet cleared -- Deposits in transit / receipts recorded but not yet credited -- Invoices posted in one system but pending in the other -- Accruals awaiting reversal - -**Permanent Differences** (items requiring adjustment): -- Errors in recording (wrong amount, wrong account, duplicate entries) -- Missing entries (transactions in one system but not the other) -- Bank fees or charges not yet recorded -- Foreign currency translation differences - -**Prior Period Items** (carryforward from prior reconciliation): -- Items from prior period that have now cleared (remove from reconciliation) -- Items from prior period still outstanding (carry forward with aging) - -### 4. Generate Reconciliation Workpaper - -``` -ACCOUNT RECONCILIATION -Account: [Account code] — [Account name] -Period End: [Date] -Prepared by: [User] -Date Prepared: [Today] - -RECONCILIATION SUMMARY -======================= - -Balance per General Ledger: $XX,XXX.XX - -Add: Reconciling items increasing GL - [Item description] $X,XXX.XX - [Item description] $X,XXX.XX - --------- - Subtotal additions: $X,XXX.XX - -Less: Reconciling items decreasing GL - [Item description] ($X,XXX.XX) - [Item description] ($X,XXX.XX) - --------- - Subtotal deductions: ($X,XXX.XX) - -Adjusted GL Balance: $XX,XXX.XX - -Balance per [Subledger/Bank/Other]: $XX,XXX.XX - -Add: Reconciling items - [Item description] $X,XXX.XX - -Less: Reconciling items - [Item description] ($X,XXX.XX) - -Adjusted [Other] Balance: $XX,XXX.XX - -DIFFERENCE: $0.00 -``` - -### 5. Reconciling Items Detail - -Present each reconciling item with: - -| # | Description | Amount | Category | Age (Days) | Status | Action Required | -|---|-------------|--------|----------|------------|--------|-----------------| -| 1 | [Detail] | $X,XXX | Timing | 5 | Expected to clear | Monitor | -| 2 | [Detail] | $X,XXX | Error | N/A | Requires correction | Post adjusting JE | - -### 6. Review and Escalation - -Flag items that require attention: - -- **Aged items:** Reconciling items outstanding more than 30/60/90 days -- **Large items:** Individual items exceeding materiality thresholds -- **Growing balances:** Reconciling item totals increasing period over period -- **Unresolved prior period items:** Items carried forward without resolution -- **Unexplained differences:** Amounts that cannot be tied to specific transactions - -### 7. Output - -Provide: -1. The formatted reconciliation workpaper -2. List of reconciling items with categorization and aging -3. Required adjusting entries (if any permanent differences identified) -4. Action items for items requiring follow-up -5. Comparison to prior period reconciliation (if available) -6. Sign-off section for preparer and reviewer diff --git a/finance/commands/variance-analysis.md b/finance/commands/variance-analysis.md deleted file mode 100644 index a48d9f78..00000000 --- a/finance/commands/variance-analysis.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -description: Decompose variances into drivers with narrative explanations and waterfall analysis -argument-hint: " vs " ---- - -# Variance / Flux Analysis - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -**Important**: This command assists with variance analysis workflows but does not provide financial advice. All analyses should be reviewed by qualified financial professionals before use in reporting. - -Decompose variances into underlying drivers, provide narrative explanations for significant variances, and generate waterfall analysis. - -## Usage - -``` -/flux -``` - -### Arguments - -- `area` — The area to analyze: - - `revenue` — Revenue variance by stream, product, geography, customer segment - - `opex` — Operating expense variance by category, department, cost center - - `capex` — Capital expenditure variance vs budget by project and asset class - - `headcount` — Headcount and compensation variance by department and role level - - `cogs` or `cost-of-revenue` — Cost of revenue variance by component - - `gross-margin` — Gross margin analysis with mix and rate effects - - Any specific GL account or account group -- `period-comparison` — The periods to compare. Formats: - - `2024-12 vs 2024-11` — Month over month - - `2024-12 vs 2023-12` — Year over year - - `2024-Q4 vs 2024-Q3` — Quarter over quarter - - `2024-12 vs budget` — Actual vs budget - - `2024-12 vs forecast` — Actual vs forecast - - `2024-Q4 vs 2024-Q3 vs 2023-Q4` — Three-way comparison - -## Workflow - -### 1. Gather Data - -If ~~erp or ~~data warehouse is connected: -- Pull actuals for both comparison periods at the detail level -- Pull budget/forecast data if comparing to plan -- Pull supporting operational metrics (headcount, volumes, rates) -- Pull prior variance analyses for context - -If no data source is connected: -> Connect ~~erp or ~~data warehouse to pull financial data automatically. To analyze manually, provide: -> 1. Actual data for both comparison periods (at account or line-item detail) -> 2. Budget/forecast data (if comparing to plan) -> 3. Any operational metrics that drive the financial results (headcount, volumes, pricing, etc.) - -### 2. Calculate Top-Level Variance - -``` -VARIANCE SUMMARY: [Area] — [Period 1] vs [Period 2] - - Period 1 Period 2 Variance ($) Variance (%) - -------- -------- ------------ ------------ -Total [Area] $XX,XXX $XX,XXX $X,XXX X.X% -``` - -### 3. Decompose Variance by Driver - -Break down the total variance into constituent drivers. Use the appropriate decomposition method for the area: - -**Revenue Decomposition:** -- **Volume effect:** Change in units/customers/transactions at prior period pricing -- **Price/rate effect:** Change in pricing/ASP applied to current period volume -- **Mix effect:** Shift between products/segments at different margin levels -- **New vs existing:** Revenue from new customers/products vs base business -- **Currency effect:** FX impact on international revenue (if applicable) - -**Operating Expense Decomposition:** -- **Headcount-driven:** Salary and benefits changes from headcount additions/reductions -- **Compensation changes:** Merit increases, promotions, bonus accruals -- **Volume-driven:** Expenses that scale with business activity (hosting, commissions, travel) -- **New programs/investments:** Incremental spend on new initiatives -- **One-time items:** Non-recurring expenses (severance, legal settlements, write-offs) -- **Timing:** Expenses shifted between periods (prepaid amortization changes, contract timing) - -**CapEx Decomposition:** -- **Project-level:** Variance by capital project vs approved budget -- **Timing:** Projects ahead of or behind schedule -- **Scope changes:** Approved scope expansions or reductions -- **Cost overruns:** Unit cost increases vs plan - -**Headcount Decomposition:** -- **Hiring pace:** Actual hires vs plan by department and level -- **Attrition:** Unplanned departures and backfill timing -- **Compensation mix:** Salary, bonus, equity, benefits variance -- **Contractor/temp:** Supplemental workforce changes - -### 4. Waterfall Analysis - -Generate a text-based waterfall showing how each driver contributes to the total variance: - -``` -WATERFALL: [Area] — [Period 1] vs [Period 2] - -[Period 2 Base] $XX,XXX - | - |--[+] [Driver 1 description] +$X,XXX - |--[+] [Driver 2 description] +$X,XXX - |--[-] [Driver 3 description] -$X,XXX - |--[+] [Driver 4 description] +$X,XXX - |--[-] [Driver 5 description] -$X,XXX - | -[Period 1 Actual] $XX,XXX - -Variance Reconciliation: - Driver 1: +$X,XXX (XX% of total variance) - Driver 2: +$X,XXX (XX% of total variance) - Driver 3: -$X,XXX (XX% of total variance) - Driver 4: +$X,XXX (XX% of total variance) - Driver 5: -$X,XXX (XX% of total variance) - Unexplained: $X,XXX (XX% of total variance) - -------- - Total: $X,XXX (100%) -``` - -### 5. Narrative Explanations - -For each significant driver, generate a narrative explanation: - -> **[Driver name]** — [Favorable/Unfavorable] variance of $X,XXX (X.X%) -> -> [2-3 sentence explanation of what caused this variance, referencing specific operational factors, business events, or decisions. Include quantification where possible.] -> -> *Outlook:* [Whether this is expected to continue, reverse, or change in future periods] - -### 6. Identify Unexplained Variances - -If the decomposition does not fully explain the total variance, flag the residual: - -> **Unexplained variance:** $X,XXX (X.X% of total) -> -> Possible causes to investigate: -> - [Suggested area 1] -> - [Suggested area 2] -> - [Suggested area 3] - -Ask the user for additional context on unexplained variances: -- "Can you provide context on [specific unexplained item]?" -- "Were there any business events in [period] that would explain [variance area]?" -- "Is the [specific driver] variance expected or a surprise?" - -### 7. Output - -Provide: -1. Top-level variance summary -2. Detailed variance decomposition by driver -3. Waterfall analysis (text format, or suggest chart if spreadsheet tool is connected) -4. Narrative explanations for each significant driver -5. Unexplained variance flag with investigation suggestions -6. Trend context (is this variance new, growing, or consistent with recent periods?) -7. Suggested actions or follow-ups diff --git a/finance/skills/audit-support/SKILL.md b/finance/skills/audit-support/SKILL.md index a5ba6262..6480bcdd 100644 --- a/finance/skills/audit-support/SKILL.md +++ b/finance/skills/audit-support/SKILL.md @@ -1,6 +1,7 @@ --- name: audit-support description: Support SOX 404 compliance with control testing methodology, sample selection, and documentation standards. Use when generating testing workpapers, selecting audit samples, classifying control deficiencies, or preparing for internal or external audits. +user-invocable: false --- # Audit Support diff --git a/finance/skills/close-management/SKILL.md b/finance/skills/close-management/SKILL.md index 7edf7e26..f302f6e3 100644 --- a/finance/skills/close-management/SKILL.md +++ b/finance/skills/close-management/SKILL.md @@ -1,6 +1,7 @@ --- name: close-management description: Manage the month-end close process with task sequencing, dependencies, and status tracking. Use when planning the close calendar, tracking close progress, identifying blockers, or sequencing close activities by day. +user-invocable: false --- # Close Management diff --git a/finance/skills/financial-statements/SKILL.md b/finance/skills/financial-statements/SKILL.md index 452dcd98..5a9a8fc8 100644 --- a/finance/skills/financial-statements/SKILL.md +++ b/finance/skills/financial-statements/SKILL.md @@ -1,56 +1,182 @@ --- name: financial-statements -description: Generate income statements, balance sheets, and cash flow statements with GAAP presentation and period-over-period comparison. Use when preparing financial statements, running flux analysis, or creating P&L reports with variance commentary. +description: Generate financial statements (income statement, balance sheet, cash flow) with period-over-period comparison and variance analysis. Use when preparing a monthly or quarterly P&L, closing the books and need to flag material variances, comparing actuals to budget, building a financial summary for leadership review, or looking up GAAP presentation requirements and period-end adjustments. +argument-hint: " " --- -# Financial Statements +# /financial-statements -**Important**: This skill assists with financial statement workflows but does not provide financial advice. All statements should be reviewed by qualified financial professionals before use in reporting or filings. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -Formats, GAAP presentation requirements, common adjustments, and flux analysis methodology for income statements, balance sheets, and cash flow statements. +**Important**: This command assists with financial statement workflows but does not provide financial advice. All statements should be reviewed by qualified financial professionals before use in reporting or filings. -## Income Statement +Generate financial statements with period-over-period comparison and variance analysis. The workflow below walks through income statement generation; balance sheet and cash flow statement reference formats, GAAP presentation requirements (ASC 220/210/230), and common period-end adjustments are included as supporting reference material. -### Standard Format (Classification of Expenses by Function) +## Usage ``` -Revenue - Product revenue - Service revenue - Other revenue -Total Revenue - -Cost of Revenue - Product costs - Service costs -Total Cost of Revenue - -Gross Profit - -Operating Expenses - Research and development - Sales and marketing - General and administrative -Total Operating Expenses - -Operating Income (Loss) - -Other Income (Expense) - Interest income - Interest expense - Other income (expense), net -Total Other Income (Expense) - -Income (Loss) Before Income Taxes - Income tax expense (benefit) -Net Income (Loss) - -Earnings Per Share (if applicable) - Basic - Diluted +/financial-statements ``` -### GAAP Presentation Requirements (ASC 220 / IAS 1) +### Arguments + +- `period-type` — The reporting period type: + - `monthly` — Single month P&L with prior month and prior year month comparison + - `quarterly` — Quarter P&L with prior quarter and prior year quarter comparison + - `annual` — Full year P&L with prior year comparison + - `ytd` — Year-to-date P&L with prior year YTD comparison +- `period` — The period to report (e.g., `2024-12`, `2024-Q4`, `2024`) + +## Workflow + +### 1. Gather Financial Data + +If ~~erp or ~~data warehouse is connected: +- Pull trial balance or income statement data for the specified period +- Pull comparison period data (prior period, prior year, budget/forecast) +- Pull account hierarchy and groupings for presentation + +If no data source is connected: +> Connect ~~erp or ~~data warehouse to pull financial data automatically. You can also paste trial balance data, upload a spreadsheet, or provide income statement data for analysis. + +Prompt the user to provide: +- Current period revenue and expense data (by account or category) +- Comparison period data (prior period, prior year, and/or budget) +- Any known adjustments or reclassifications + +### 2. Generate Income Statement + +Present in standard multi-column format: + +``` +INCOME STATEMENT +Period: [Period description] +(in thousands, unless otherwise noted) + + Current Prior Variance Variance Budget Budget + Period Period ($) (%) Amount Var ($) + -------- -------- -------- -------- -------- -------- +REVENUE + Product revenue $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + Service revenue $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + Other revenue $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + -------- -------- -------- -------- -------- +TOTAL REVENUE $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + +COST OF REVENUE + [Cost items] $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + -------- -------- -------- -------- -------- +GROSS PROFIT $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + Gross Margin XX.X% XX.X% + +OPERATING EXPENSES + Research & development $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + Sales & marketing $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + General & administrative $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + -------- -------- -------- -------- -------- +TOTAL OPERATING EXPENSES $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + +OPERATING INCOME (LOSS) $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + Operating Margin XX.X% XX.X% + +OTHER INCOME (EXPENSE) + Interest income $XX,XXX $XX,XXX $X,XXX X.X% + Interest expense ($XX,XXX) ($XX,XXX) $X,XXX X.X% + Other, net $XX,XXX $XX,XXX $X,XXX X.X% + -------- -------- -------- +TOTAL OTHER INCOME (EXPENSE) $XX,XXX $XX,XXX $X,XXX X.X% + +INCOME BEFORE TAXES $XX,XXX $XX,XXX $X,XXX X.X% + Income tax expense $XX,XXX $XX,XXX $X,XXX X.X% + -------- -------- -------- + +NET INCOME (LOSS) $XX,XXX $XX,XXX $X,XXX X.X% $XX,XXX $X,XXX + Net Margin XX.X% XX.X% +``` + +### 3. Variance Analysis + +For each line item, calculate and flag material variances. + +#### Variance Calculation + +For each line item, calculate: +- **Dollar variance:** Current period - Prior period (or current period - budget) +- **Percentage variance:** (Current - Prior) / |Prior| x 100 +- **Basis point change:** For margins and ratios, express change in basis points (1 bp = 0.01%) + +#### Materiality Thresholds + +Define what constitutes a "material" variance requiring investigation. Common approaches: + +- **Fixed dollar threshold:** Variances exceeding a set dollar amount (e.g., $50K, $100K) +- **Percentage threshold:** Variances exceeding a set percentage (e.g., 10%, 15%) +- **Combined:** Either the dollar OR percentage threshold is exceeded +- **Scaled:** Different thresholds for different line items based on their size and volatility + +*Example thresholds (adjust for your organization):* + +| Line Item Size | Dollar Threshold | Percentage Threshold | +|---------------|-----------------|---------------------| +| > $10M | $500K | 5% | +| $1M - $10M | $100K | 10% | +| < $1M | $50K | 15% | + +#### Variance Decomposition + +Break down total variance into component drivers: + +- **Volume/quantity effect:** Change in volume at prior period rates +- **Rate/price effect:** Change in rate/price at current period volume +- **Mix effect:** Shift in composition between items with different rates/margins +- **New/discontinued items:** Items present in one period but not the other +- **One-time/non-recurring items:** Items that are not expected to repeat +- **Timing effect:** Items shifting between periods (not a true change in run rate) +- **Currency effect:** Impact of FX rate changes on translated results + +#### Investigation and Narrative + +For each material variance: +1. Quantify the variance ($ and %) +2. Identify whether favorable or unfavorable +3. Decompose into drivers using the categories above +4. Provide a narrative explanation of the business reason +5. Assess whether the variance is temporary or represents a trend change +6. Note any actions required (further investigation, forecast update, process change) + +### 4. Key Metrics Summary + +``` +KEY METRICS + Current Prior Change +Revenue growth (%) X.X% +Gross margin (%) XX.X% XX.X% X.X pp +Operating margin (%) XX.X% XX.X% X.X pp +Net margin (%) XX.X% XX.X% X.X pp +OpEx as % of revenue XX.X% XX.X% X.X pp +Effective tax rate (%) XX.X% XX.X% X.X pp +``` + +### 5. Material Variance Summary + +List all material variances requiring investigation: + +| Line Item | Variance ($) | Variance (%) | Direction | Preliminary Driver | Action | +|-----------|-------------|-------------|-----------|-------------------|--------| +| [Item] | $X,XXX | X.X% | Unfav. | [If known] | Investigate | + +### 6. Output + +Provide: +1. Formatted income statement with comparisons +2. Key metrics summary +3. Material variance listing with investigation flags +4. Suggested follow-up questions for unexplained variances +5. Offer to drill into any specific variance with `/flux` + +## GAAP Presentation Requirements + +### Income Statement (ASC 220 / IAS 1) - Present all items of income and expense recognized in a period - Classify expenses either by nature (materials, labor, depreciation) or by function (COGS, R&D, S&M, G&A) — function is more common for US companies @@ -60,16 +186,32 @@ Earnings Per Share (if applicable) - Extraordinary items are prohibited under both US GAAP and IFRS - Discontinued operations presented separately, net of tax -### Common Presentation Considerations +**Common presentation considerations:** - **Revenue disaggregation:** ASC 606 requires disaggregation of revenue into categories that depict how the nature, amount, timing, and uncertainty of revenue are affected by economic factors - **Stock-based compensation:** Classify within the functional expense categories (R&D, S&M, G&A) with total SBC disclosed in notes - **Restructuring charges:** Present separately if material, or include in operating expenses with note disclosure - **Non-GAAP adjustments:** If presenting non-GAAP measures (common in earnings releases), clearly label and reconcile to GAAP -## Balance Sheet +### Balance Sheet (ASC 210 / IAS 1) -### Standard Format (Classified Balance Sheet) +- Distinguish between current and non-current assets and liabilities +- Current: expected to be realized, consumed, or settled within 12 months (or the operating cycle if longer) +- Present assets in order of liquidity (most liquid first) — standard US practice +- Accounts receivable shown net of allowance for credit losses (ASC 326) +- Property and equipment shown net of accumulated depreciation +- Goodwill is not amortized — tested for impairment annually (ASC 350) +- Leases: recognize right-of-use assets and lease liabilities for operating and finance leases (ASC 842) + +### Cash Flow Statement (ASC 230 / IAS 7) + +- Indirect method is most common (start with net income, adjust for non-cash items) +- Direct method is permitted but rarely used (requires supplemental indirect reconciliation) +- Interest paid and income taxes paid must be disclosed (either on the face or in notes) +- Non-cash investing and financing activities disclosed separately (e.g., assets acquired under leases, stock issued for acquisitions) +- Cash equivalents: short-term, highly liquid investments with original maturities of 3 months or less + +## Balance Sheet Reference Format ``` ASSETS @@ -122,19 +264,7 @@ Total Stockholders' Equity TOTAL LIABILITIES AND STOCKHOLDERS' EQUITY ``` -### GAAP Presentation Requirements (ASC 210 / IAS 1) - -- Distinguish between current and non-current assets and liabilities -- Current: expected to be realized, consumed, or settled within 12 months (or the operating cycle if longer) -- Present assets in order of liquidity (most liquid first) — standard US practice -- Accounts receivable shown net of allowance for credit losses (ASC 326) -- Property and equipment shown net of accumulated depreciation -- Goodwill is not amortized — tested for impairment annually (ASC 350) -- Leases: recognize right-of-use assets and lease liabilities for operating and finance leases (ASC 842) - -## Cash Flow Statement - -### Standard Format (Indirect Method) +## Cash Flow Statement Reference Format (Indirect Method) ``` CASH FLOWS FROM OPERATING ACTIVITIES @@ -182,14 +312,6 @@ Cash and cash equivalents, beginning of period Cash and cash equivalents, end of period ``` -### GAAP Presentation Requirements (ASC 230 / IAS 7) - -- Indirect method is most common (start with net income, adjust for non-cash items) -- Direct method is permitted but rarely used (requires supplemental indirect reconciliation) -- Interest paid and income taxes paid must be disclosed (either on the face or in notes) -- Non-cash investing and financing activities disclosed separately (e.g., assets acquired under leases, stock issued for acquisitions) -- Cash equivalents: short-term, highly liquid investments with original maturities of 3 months or less - ## Common Adjustments and Reclassifications ### Period-End Adjustments @@ -211,51 +333,3 @@ Cash and cash equivalents, end of period 4. **Discontinued operations:** Reclassify results of discontinued operations to a separate line item 5. **Equity method adjustments:** Record share of investee income/loss for equity method investments 6. **Segment reclassifications:** Ensure transactions are properly classified by operating segment - -## Flux Analysis Methodology - -### Variance Calculation - -For each line item, calculate: -- **Dollar variance:** Current period - Prior period (or current period - budget) -- **Percentage variance:** (Current - Prior) / |Prior| x 100 -- **Basis point change:** For margins and ratios, express change in basis points (1 bp = 0.01%) - -### Materiality Thresholds - -Define what constitutes a "material" variance requiring investigation. Common approaches: - -- **Fixed dollar threshold:** Variances exceeding a set dollar amount (e.g., $50K, $100K) -- **Percentage threshold:** Variances exceeding a set percentage (e.g., 10%, 15%) -- **Combined:** Either the dollar OR percentage threshold is exceeded -- **Scaled:** Different thresholds for different line items based on their size and volatility - -*Example thresholds (adjust for your organization):* - -| Line Item Size | Dollar Threshold | Percentage Threshold | -|---------------|-----------------|---------------------| -| > $10M | $500K | 5% | -| $1M - $10M | $100K | 10% | -| < $1M | $50K | 15% | - -### Variance Decomposition - -Break down total variance into component drivers: - -- **Volume/quantity effect:** Change in volume at prior period rates -- **Rate/price effect:** Change in rate/price at current period volume -- **Mix effect:** Shift in composition between items with different rates/margins -- **New/discontinued items:** Items present in one period but not the other -- **One-time/non-recurring items:** Items that are not expected to repeat -- **Timing effect:** Items shifting between periods (not a true change in run rate) -- **Currency effect:** Impact of FX rate changes on translated results - -### Investigation and Narrative - -For each material variance: -1. Quantify the variance ($ and %) -2. Identify whether favorable or unfavorable -3. Decompose into drivers using the categories above -4. Provide a narrative explanation of the business reason -5. Assess whether the variance is temporary or represents a trend change -6. Note any actions required (further investigation, forecast update, process change) diff --git a/finance/skills/journal-entry-prep/SKILL.md b/finance/skills/journal-entry-prep/SKILL.md index 266c6a3f..27f443c7 100644 --- a/finance/skills/journal-entry-prep/SKILL.md +++ b/finance/skills/journal-entry-prep/SKILL.md @@ -1,6 +1,7 @@ --- name: journal-entry-prep description: Prepare journal entries with proper debits, credits, and supporting documentation for month-end close. Use when booking accruals, prepaid amortization, fixed asset depreciation, payroll entries, revenue recognition, or any manual journal entry. +user-invocable: false --- # Journal Entry Preparation diff --git a/finance/commands/journal-entry.md b/finance/skills/journal-entry/SKILL.md similarity index 94% rename from finance/commands/journal-entry.md rename to finance/skills/journal-entry/SKILL.md index 99c73512..c152f027 100644 --- a/finance/commands/journal-entry.md +++ b/finance/skills/journal-entry/SKILL.md @@ -1,11 +1,12 @@ --- -description: Prepare journal entries with proper debits, credits, and supporting detail +name: journal-entry +description: Prepare journal entries with proper debits, credits, and supporting detail. Use when booking month-end accruals (AP, payroll, prepaid), recording depreciation or amortization, posting revenue recognition or deferred revenue adjustments, or documenting an entry for audit review. argument-hint: " [period]" --- # Journal Entry Preparation -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). **Important**: This command assists with journal entry workflows but does not provide financial advice. All entries should be reviewed by qualified financial professionals before posting. diff --git a/finance/skills/reconciliation/SKILL.md b/finance/skills/reconciliation/SKILL.md index 8e324db3..5f212646 100644 --- a/finance/skills/reconciliation/SKILL.md +++ b/finance/skills/reconciliation/SKILL.md @@ -1,6 +1,7 @@ --- name: reconciliation description: Reconcile accounts by comparing GL balances to subledgers, bank statements, or third-party data. Use when performing bank reconciliations, GL-to-subledger recs, intercompany reconciliations, or identifying and categorizing reconciling items. +argument-hint: " [period]" --- # Reconciliation diff --git a/finance/commands/sox-testing.md b/finance/skills/sox-testing/SKILL.md similarity index 96% rename from finance/commands/sox-testing.md rename to finance/skills/sox-testing/SKILL.md index 79b4688d..97d7b4e8 100644 --- a/finance/commands/sox-testing.md +++ b/finance/skills/sox-testing/SKILL.md @@ -1,11 +1,12 @@ --- -description: Generate SOX sample selections, testing workpapers, and control assessments +name: sox-testing +description: Generate SOX sample selections, testing workpapers, and control assessments. Use when planning quarterly or annual SOX 404 testing, pulling a sample for a control (revenue, P2P, ITGC, close), building a testing workpaper template, or evaluating and classifying a control deficiency. argument-hint: " [period]" --- # SOX Compliance Testing -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). **Important**: This command assists with SOX compliance workflows but does not provide audit or legal advice. All testing workpapers and assessments should be reviewed by qualified financial professionals before use in audit documentation. diff --git a/finance/skills/variance-analysis/SKILL.md b/finance/skills/variance-analysis/SKILL.md index 1bfccf66..4a430511 100644 --- a/finance/skills/variance-analysis/SKILL.md +++ b/finance/skills/variance-analysis/SKILL.md @@ -1,6 +1,7 @@ --- name: variance-analysis description: Decompose financial variances into drivers with narrative explanations and waterfall analysis. Use when analyzing budget vs. actual, period-over-period changes, revenue or expense variances, or preparing variance commentary for leadership. +argument-hint: " vs " --- # Variance Analysis diff --git a/human-resources/.claude-plugin/plugin.json b/human-resources/.claude-plugin/plugin.json index dbc69b53..b6a9dff6 100644 --- a/human-resources/.claude-plugin/plugin.json +++ b/human-resources/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "human-resources", - "version": "1.1.0", + "version": "1.2.0", "description": "Streamline people operations — recruiting, onboarding, performance reviews, compensation analysis, and policy guidance. Maintain compliance and keep your team running smoothly.", "author": { "name": "Anthropic" diff --git a/human-resources/README.md b/human-resources/README.md index 2d259143..4bc0a260 100644 --- a/human-resources/README.md +++ b/human-resources/README.md @@ -111,7 +111,10 @@ See [CONNECTORS.md](CONNECTORS.md) for the full list of supported integrations. ## Settings -Create a local settings file at `human-resources/.claude/settings.local.json` to personalize: +Create a `settings.local.json` file to personalize: + +- **Cowork**: Save it in any folder you've shared with Cowork (via the folder picker). The plugin finds it automatically. +- **Claude Code**: Save it at `human-resources/.claude/settings.local.json`. ```json { diff --git a/human-resources/commands/comp-analysis.md b/human-resources/skills/comp-analysis/SKILL.md similarity index 62% rename from human-resources/commands/comp-analysis.md rename to human-resources/skills/comp-analysis/SKILL.md index 022419ee..f37f4c62 100644 --- a/human-resources/commands/comp-analysis.md +++ b/human-resources/skills/comp-analysis/SKILL.md @@ -1,13 +1,14 @@ --- -description: Analyze compensation — benchmarking, band placement, and equity modeling +name: comp-analysis +description: Analyze compensation — benchmarking, band placement, and equity modeling. Trigger with "what should we pay a [role]", "is this offer competitive", "model this equity grant", or when uploading comp data to find outliers and retention risks. argument-hint: "" --- # /comp-analysis -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -Analyze compensation data for benchmarking, band placement, and planning. See the **compensation-benchmarking** skill for total comp frameworks, key variables, and data source guidance. +Analyze compensation data for benchmarking, band placement, and planning. Helps benchmark compensation against market data for hiring, retention, and equity planning. ## Usage @@ -26,8 +27,30 @@ Upload a CSV or paste your comp bands. I'll analyze placement, identify outliers **Option C: Equity modeling** "Model a refresh grant of 10K shares over 4 years at a $50 stock price." +## Compensation Framework + +### Components of Total Compensation +- **Base salary**: Cash compensation +- **Equity**: RSUs, stock options, or other equity +- **Bonus**: Annual target bonus, signing bonus +- **Benefits**: Health, retirement, perks (harder to quantify) + +### Key Variables +- **Role**: Function and specialization +- **Level**: IC levels, management levels +- **Location**: Geographic pay adjustments +- **Company stage**: Startup vs. growth vs. public +- **Industry**: Tech vs. finance vs. healthcare + +### Data Sources +- **With ~~compensation data**: Pull verified benchmarks +- **Without**: Use web research, public salary data, and user-provided context +- Always note data freshness and source limitations + ## Output +Provide percentile bands (25th, 50th, 75th, 90th) for base, equity, and total comp. Include location adjustments and company-stage context. + ```markdown ## Compensation Analysis: [Role/Scope] diff --git a/human-resources/skills/compensation-benchmarking/SKILL.md b/human-resources/skills/compensation-benchmarking/SKILL.md deleted file mode 100644 index 2d0ede2a..00000000 --- a/human-resources/skills/compensation-benchmarking/SKILL.md +++ /dev/null @@ -1,33 +0,0 @@ ---- -name: compensation-benchmarking -description: Benchmark compensation against market data. Trigger with "what should we pay", "comp benchmark", "market rate for", "salary range for", "is this offer competitive", or when the user needs help evaluating or setting compensation levels. ---- - -# Compensation Benchmarking - -Help benchmark compensation against market data for hiring, retention, and equity planning. - -## Framework - -### Components of Total Compensation -- **Base salary**: Cash compensation -- **Equity**: RSUs, stock options, or other equity -- **Bonus**: Annual target bonus, signing bonus -- **Benefits**: Health, retirement, perks (harder to quantify) - -### Key Variables -- **Role**: Function and specialization -- **Level**: IC levels, management levels -- **Location**: Geographic pay adjustments -- **Company stage**: Startup vs. growth vs. public -- **Industry**: Tech vs. finance vs. healthcare - -## Data Sources - -- **With ~~compensation data**: Pull verified benchmarks -- **Without**: Use web research, public salary data, and user-provided context -- Always note data freshness and source limitations - -## Output - -Provide percentile bands (25th, 50th, 75th, 90th) for base, equity, and total comp. Include location adjustments and company-stage context. diff --git a/human-resources/commands/draft-offer.md b/human-resources/skills/draft-offer/SKILL.md similarity index 85% rename from human-resources/commands/draft-offer.md rename to human-resources/skills/draft-offer/SKILL.md index 11752551..65cad48b 100644 --- a/human-resources/commands/draft-offer.md +++ b/human-resources/skills/draft-offer/SKILL.md @@ -1,11 +1,12 @@ --- -description: Draft an offer letter with comp details and terms +name: draft-offer +description: Draft an offer letter with comp details and terms. Use when a candidate is ready for an offer, assembling a total comp package (base, equity, signing bonus), writing the offer letter text itself, or prepping negotiation guidance for the hiring manager. argument-hint: "" --- # /draft-offer -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Draft a complete offer letter for a new hire. diff --git a/human-resources/skills/employee-handbook/SKILL.md b/human-resources/skills/employee-handbook/SKILL.md deleted file mode 100644 index c8f247d8..00000000 --- a/human-resources/skills/employee-handbook/SKILL.md +++ /dev/null @@ -1,32 +0,0 @@ ---- -name: employee-handbook -description: Answer questions about company policies, benefits, and procedures. Trigger with "what's our policy on", "how does PTO work", "benefits question", "expense policy", "remote work policy", or any question about company rules, perks, or procedures. ---- - -# Employee Handbook - -Answer employee questions about policies, benefits, and procedures by searching connected knowledge bases or using provided handbook content. - -## Common Topics - -- **PTO and Leave**: Vacation, sick leave, parental leave, bereavement, sabbatical -- **Benefits**: Health insurance, dental, vision, 401k, HSA/FSA, wellness -- **Compensation**: Pay schedule, bonus timing, equity vesting, expense reimbursement -- **Remote Work**: WFH policy, remote locations, equipment stipend, coworking -- **Travel**: Booking policy, per diem, expense reporting, approval process -- **Conduct**: Code of conduct, harassment policy, conflicts of interest -- **Growth**: Professional development budget, conference policy, tuition reimbursement - -## How to Answer - -1. Search ~~knowledge base for the relevant policy document -2. Provide a clear, plain-language answer -3. Quote the specific policy language -4. Note any exceptions or special cases -5. Point to who to contact for edge cases - -## Important - -- Always cite the source document and section -- If no policy is found, say so clearly rather than guessing -- For legal or compliance questions, recommend consulting HR or legal directly diff --git a/human-resources/commands/onboarding.md b/human-resources/skills/onboarding/SKILL.md similarity index 91% rename from human-resources/commands/onboarding.md rename to human-resources/skills/onboarding/SKILL.md index 630f6119..98a45db7 100644 --- a/human-resources/commands/onboarding.md +++ b/human-resources/skills/onboarding/SKILL.md @@ -1,11 +1,12 @@ --- -description: Generate an onboarding checklist and first-week plan for a new hire +name: onboarding +description: Generate an onboarding checklist and first-week plan for a new hire. Use when someone has a start date coming up, building the pre-start task list (accounts, equipment, buddy), scheduling Day 1 and Week 1, or setting 30/60/90-day goals for a new team member. argument-hint: "" --- # /onboarding -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Generate a comprehensive onboarding plan for a new team member. diff --git a/human-resources/skills/people-analytics/SKILL.md b/human-resources/skills/people-analytics/SKILL.md deleted file mode 100644 index 43843a18..00000000 --- a/human-resources/skills/people-analytics/SKILL.md +++ /dev/null @@ -1,41 +0,0 @@ ---- -name: people-analytics -description: Analyze workforce data — attrition, engagement, diversity, and productivity. Trigger with "attrition rate", "turnover analysis", "diversity metrics", "engagement data", "retention risk", or when the user wants to understand workforce trends from HR data. ---- - -# People Analytics - -Analyze workforce data to surface trends, risks, and opportunities. - -## Key Metrics - -### Retention -- Overall attrition rate (voluntary + involuntary) -- Regrettable attrition rate -- Average tenure -- Flight risk indicators - -### Diversity -- Representation by level, team, and function -- Pipeline diversity (hiring funnel by demographic) -- Promotion rates by group -- Pay equity analysis - -### Engagement -- Survey scores and trends -- eNPS (Employee Net Promoter Score) -- Participation rates -- Open-ended feedback themes - -### Productivity -- Revenue per employee -- Span of control efficiency -- Time to productivity for new hires - -## Approach - -1. Understand what question they're trying to answer -2. Identify the right data (upload, paste, or pull from ~~HRIS) -3. Analyze with appropriate statistical methods -4. Present findings with context and caveats -5. Recommend specific actions based on data diff --git a/human-resources/commands/people-report.md b/human-resources/skills/people-report/SKILL.md similarity index 55% rename from human-resources/commands/people-report.md rename to human-resources/skills/people-report/SKILL.md index a2085cf0..7ed98e63 100644 --- a/human-resources/commands/people-report.md +++ b/human-resources/skills/people-report/SKILL.md @@ -1,13 +1,14 @@ --- -description: Generate headcount, attrition, diversity, or org health reports +name: people-report +description: Generate headcount, attrition, diversity, or org health reports. Use when pulling a headcount snapshot for leadership, analyzing turnover trends by team, preparing diversity representation metrics, or assessing span of control and flight risk across the org. argument-hint: "" --- # /people-report -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -Generate people analytics reports from your HR data. See the **people-analytics** skill for metric definitions, analysis frameworks, and benchmarks. +Generate people analytics reports from your HR data. Analyze workforce data to surface trends, risks, and opportunities. ## Usage @@ -22,6 +23,39 @@ Generate people analytics reports from your HR data. See the **people-analytics* **Diversity**: Representation metrics — by level, team, pipeline **Org Health**: Span of control, management layers, team sizes, flight risk +## Key Metrics + +### Retention +- Overall attrition rate (voluntary + involuntary) +- Regrettable attrition rate +- Average tenure +- Flight risk indicators + +### Diversity +- Representation by level, team, and function +- Pipeline diversity (hiring funnel by demographic) +- Promotion rates by group +- Pay equity analysis + +### Engagement +- Survey scores and trends +- eNPS (Employee Net Promoter Score) +- Participation rates +- Open-ended feedback themes + +### Productivity +- Revenue per employee +- Span of control efficiency +- Time to productivity for new hires + +## Approach + +1. Understand what question they're trying to answer +2. Identify the right data (upload, paste, or pull from ~~HRIS) +3. Analyze with appropriate statistical methods +4. Present findings with context and caveats +5. Recommend specific actions based on data + ## What I Need From You Upload a CSV or describe your data. Helpful fields: diff --git a/human-resources/commands/performance-review.md b/human-resources/skills/performance-review/SKILL.md similarity index 92% rename from human-resources/commands/performance-review.md rename to human-resources/skills/performance-review/SKILL.md index dd0ab63b..02310add 100644 --- a/human-resources/commands/performance-review.md +++ b/human-resources/skills/performance-review/SKILL.md @@ -1,11 +1,12 @@ --- -description: Structure a performance review with self-assessment, manager template, and calibration prep +name: performance-review +description: Structure a performance review with self-assessment, manager template, and calibration prep. Use when review season kicks off and you need a self-assessment template, writing a manager review for a direct report, prepping rating distributions and promotion cases for calibration, or turning vague feedback into specific behavioral examples. argument-hint: "" --- # /performance-review -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Generate performance review templates and help structure feedback. diff --git a/human-resources/commands/policy-lookup.md b/human-resources/skills/policy-lookup/SKILL.md similarity index 62% rename from human-resources/commands/policy-lookup.md rename to human-resources/skills/policy-lookup/SKILL.md index edb59fc8..d3a8e3ba 100644 --- a/human-resources/commands/policy-lookup.md +++ b/human-resources/skills/policy-lookup/SKILL.md @@ -1,13 +1,14 @@ --- -description: Find and explain company policies +name: policy-lookup +description: Find and explain company policies in plain language. Trigger with "what's our PTO policy", "can I work remotely from another country", "how do expenses work", or any plain-language question about benefits, travel, leave, or handbook rules. argument-hint: "" --- # /policy-lookup -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -Look up and explain company policies in plain language. See the **employee-handbook** skill for guidance on policy topics, answer structure, and compliance caveats. +Look up and explain company policies in plain language. Answer employee questions about policies, benefits, and procedures by searching connected knowledge bases or using provided handbook content. ## Usage @@ -34,6 +35,29 @@ Search for policies matching: $ARGUMENTS └─────────────────────────────────────────────────────────────────┘ ``` +## Common Policy Topics + +- **PTO and Leave**: Vacation, sick leave, parental leave, bereavement, sabbatical +- **Benefits**: Health insurance, dental, vision, 401k, HSA/FSA, wellness +- **Compensation**: Pay schedule, bonus timing, equity vesting, expense reimbursement +- **Remote Work**: WFH policy, remote locations, equipment stipend, coworking +- **Travel**: Booking policy, per diem, expense reporting, approval process +- **Conduct**: Code of conduct, harassment policy, conflicts of interest +- **Growth**: Professional development budget, conference policy, tuition reimbursement + +## How to Answer + +1. Search ~~knowledge base for the relevant policy document +2. Provide a clear, plain-language answer +3. Quote the specific policy language +4. Note any exceptions or special cases +5. Point to who to contact for edge cases + +**Important guardrails:** +- Always cite the source document and section +- If no policy is found, say so clearly rather than guessing +- For legal or compliance questions, recommend consulting HR or legal directly + ## Output ```markdown diff --git a/legal/.claude-plugin/plugin.json b/legal/.claude-plugin/plugin.json index c175b5bb..330cc4d4 100644 --- a/legal/.claude-plugin/plugin.json +++ b/legal/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "legal", - "version": "1.1.0", + "version": "1.2.0", "description": "Speed up contract review, NDA triage, and compliance workflows for in-house legal teams. Draft legal briefs, organize precedent research, and manage institutional knowledge.", "author": { "name": "Anthropic" diff --git a/legal/README.md b/legal/README.md index 9ff2355c..085d4182 100644 --- a/legal/README.md +++ b/legal/README.md @@ -2,7 +2,7 @@ An AI-powered productivity plugin for in-house legal teams, primarily designed for [Cowork](https://claude.com/product/cowork), Anthropic's agentic desktop application — though it also works in Claude Code. Automates contract review, NDA triage, compliance workflows, legal briefings, and templated responses -- all configurable to your organization's specific playbook and risk tolerances. -> **Disclaimer:** This plugin assists with legal workflows but does not provide legal advice. Always verify conclusions with qualified legal professionals. AI-generated analysis should be reviewed by licensed attorneys before being relied upon for legal decisions. +> **Disclaimer:** This plugin assists with legal workflows but does not provide legal advice. Always verify conclusions with qualified legal professionals. AI-generated analysis should be reviewed by licensed attorneys before being relied upon for legal decisions. The default playbook examples in this plugin reflect U.S. legal positions and jurisdictions (Delaware, New York, California). If you operate under different legal systems (EU, UK, Netherlands, Australia, etc.), you must customize the playbook in .claude/legal.local.md to reflect your jurisdiction's specific legal requirements, standard contract terms, and compliance obligations before relying on the plugin's analysis. ## Target Personas @@ -29,7 +29,10 @@ claude plugins add knowledge-work-plugins/legal Create a local settings file to define your organization's standard positions. This is where you encode your team's negotiation playbook, risk tolerances, and standard terms. -In your project's `.claude/` directory, create a `legal.local.md` file: +Create a `legal.local.md` file where Claude can find it: + +- **Cowork**: Save it in any folder you've shared with Cowork (via the folder picker). The plugin finds it automatically. +- **Claude Code**: Save it in your project's `.claude/` directory. ```markdown # Legal Playbook Configuration diff --git a/legal/commands/compliance-check.md b/legal/commands/compliance-check.md deleted file mode 100644 index 2aba58fb..00000000 --- a/legal/commands/compliance-check.md +++ /dev/null @@ -1,69 +0,0 @@ ---- -description: Run a compliance check on a proposed action, product feature, or business initiative -argument-hint: "" ---- - -# /compliance-check -- Compliance Review - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Run a compliance check on a proposed action, product feature, marketing campaign, or business initiative. - -**Important**: This command assists with legal workflows but does not provide legal advice. Compliance assessments should be reviewed by qualified legal professionals. - -## Usage - -``` -/compliance-check $ARGUMENTS -``` - -## What I Need From You - -Describe what you're planning to do. Examples: -- "We want to launch a referral program with cash rewards" -- "We're adding biometric authentication to our mobile app" -- "We need to process EU customer data in our US data center" -- "Marketing wants to use customer testimonials in ads" - -## Output - -```markdown -## Compliance Check: [Initiative] - -### Summary -[Quick assessment: Proceed / Proceed with conditions / Requires further review] - -### Applicable Regulations and Policies -| Regulation/Policy | Relevance | Key Requirements | -|-------------------|-----------|-----------------| -| [GDPR / CCPA / HIPAA / etc.] | [How it applies] | [What you need to do] | - -### Requirements -| # | Requirement | Status | Action Needed | -|---|-------------|--------|---------------| -| 1 | [Requirement] | [Met / Not Met / Unknown] | [What to do] | - -### Risk Areas -| Risk | Severity | Mitigation | -|------|----------|------------| -| [Risk] | [High/Med/Low] | [How to address] | - -### Recommended Actions -1. [Most important action] -2. [Second priority] -3. [Third priority] - -### Approvals Needed -| Approver | Why | Status | -|----------|-----|--------| -| [Person/Team] | [Reason] | [Pending] | - -### Further Review Recommended -[Areas where outside counsel or specialist review is advised] -``` - -## Tips - -1. **Be specific** — "We want to email all our users" is better than "marketing campaign." -2. **Include the geography** — Compliance requirements vary by jurisdiction. -3. **Mention the data** — What personal data is involved? This drives most compliance requirements. diff --git a/legal/commands/respond.md b/legal/commands/respond.md deleted file mode 100644 index 457438b0..00000000 --- a/legal/commands/respond.md +++ /dev/null @@ -1,197 +0,0 @@ ---- -description: Generate a response to a common legal inquiry using configured templates -argument-hint: "[inquiry-type]" ---- - -# /respond -- Generate Response from Templates - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Generate a response to a common legal inquiry using configured templates. Customizes the response with specific details and includes escalation triggers for situations that should not use a templated response. - -**Important**: This command assists with legal workflows but does not provide legal advice. Generated responses should be reviewed by qualified legal professionals before being sent. - -## Invocation - -``` -/respond [inquiry-type] -``` - -Common inquiry types: -- `dsr` or `data-subject-request` -- Data subject access/deletion/correction requests -- `hold` or `discovery-hold` -- Litigation hold notices -- `vendor` or `vendor-question` -- Vendor legal questions -- `nda` or `nda-request` -- NDA requests from business teams -- `privacy` or `privacy-inquiry` -- Privacy-related questions -- `subpoena` -- Subpoena or legal process responses -- `insurance` -- Insurance claim notifications -- `custom` -- Use a custom template - -If no inquiry type is provided, ask the user what type of response they need and show available categories. - -## Workflow - -### Step 1: Identify Inquiry Type - -Accept the inquiry type from the user. If the type is ambiguous, show available categories and ask for clarification. - -### Step 2: Load Template - -Look for templates in local settings (e.g., `legal.local.md` or a templates directory). - -**If templates are configured:** -- Load the appropriate template for the inquiry type -- Identify required variables (recipient name, dates, specific details) - -**If no templates are configured:** -- Inform the user that no templates were found for this inquiry type -- Offer to help create a template (see Step 6) -- Provide a reasonable default response structure based on the inquiry type - -### Step 3: Check Escalation Triggers - -Before generating the response, evaluate whether this situation has characteristics that should NOT use a templated response: - -#### Data Subject Request Escalation Triggers -- Request involves a minor's data -- Request is from a regulatory authority (not an individual) -- Request involves data that is subject to a litigation hold -- Requester is a current or former employee with an active dispute -- Request scope is unusually broad or appears to be a fishing expedition -- Request involves data processed in a jurisdiction with unique requirements - -#### Discovery Hold Escalation Triggers -- The matter involves potential criminal liability -- The preservation scope is unclear or potentially overbroad -- There are questions about whether certain data is within scope -- Prior holds for the same or related matter exist -- The hold may affect ongoing business operations significantly - -#### Vendor Question Escalation Triggers -- The question involves a dispute or potential breach -- The vendor is threatening litigation or termination -- The question involves regulatory compliance (not just contract terms) -- The response could create a binding commitment or waiver - -#### NDA Request Escalation Triggers -- The counterparty is a competitor -- The NDA involves government classified information -- The business context suggests the NDA is for a potential M&A transaction -- The request involves unusual subject matter (AI training data, biometric data, etc.) - -**If an escalation trigger is detected:** -- Alert the user that this situation may not be appropriate for a templated response -- Explain which trigger was detected and why it matters -- Recommend the user consult with a senior team member or outside counsel -- Offer to draft a preliminary response for counsel review rather than a final response - -### Step 4: Gather Specific Details - -Prompt the user for the details needed to customize the response: - -**Data Subject Request:** -- Requester name and contact information -- Type of request (access, deletion, correction, portability, opt-out) -- What data is involved -- Applicable regulation (GDPR, CCPA, CPRA, other) -- Response deadline - -**Discovery Hold:** -- Matter name and reference number -- Custodians (who needs to preserve) -- Scope of preservation (date range, data types, systems) -- Outside counsel contact -- Effective date - -**Vendor Question:** -- Vendor name -- Reference agreement (if applicable) -- Specific question being addressed -- Relevant contract provisions - -**NDA Request:** -- Requesting business team and contact -- Counterparty name -- Purpose of the NDA -- Mutual or unilateral -- Any special requirements - -### Step 5: Generate Response - -Populate the template with the gathered details. Ensure the response: -- Uses appropriate tone (professional, clear, not overly legalistic for business audiences) -- Includes all required legal elements for the response type -- References specific dates, deadlines, and obligations -- Provides clear next steps for the recipient -- Includes appropriate disclaimers or caveats - -Present the draft response to the user for review before sending. - -### Step 6: Template Creation (If No Template Exists) - -If the user wants to create a new template: - -1. Ask what type of inquiry the template is for -2. Ask for key elements that should be included -3. Ask for tone and audience (internal vs. external, business vs. legal) -4. Draft a template with variable placeholders (e.g., `{{requester_name}}`, `{{deadline}}`, `{{matter_reference}}`) -5. Include escalation triggers appropriate for the category -6. Present the template for review -7. Suggest the user save the approved template to their local settings for future use - -#### Template Format - -```markdown -## Template: [Category Name] - -### Escalation Triggers -- [Trigger 1] -- [Trigger 2] - -### Variables -- {{variable_1}}: [description] -- {{variable_2}}: [description] - -### Subject Line -[Subject template] - -### Body -[Response body with {{variables}}] - -### Attachments -[Any standard attachments to include] - -### Follow-Up -[Standard follow-up actions after sending] -``` - -## Output Format - -``` -## Generated Response: [Inquiry Type] - -**To**: [recipient] -**Subject**: [subject line] - ---- - -[Response body] - ---- - -### Escalation Check -[Confirmation that no escalation triggers were detected, OR flagged triggers with recommendations] - -### Follow-Up Actions -1. [Post-send actions] -2. [Calendar reminders to set] -3. [Tracking or logging requirements] -``` - -## Notes - -- Always present the draft response for user review before suggesting it be sent -- If connected to email via MCP, offer to create a draft email with the response -- Track response deadlines and offer to set calendar reminders -- For regulated responses (DSRs, subpoenas), always note the applicable deadline and regulatory requirements -- Templates should be living documents; suggest updates when the user modifies a templated response, so the template can be improved over time diff --git a/legal/commands/review-contract.md b/legal/commands/review-contract.md deleted file mode 100644 index 647de04f..00000000 --- a/legal/commands/review-contract.md +++ /dev/null @@ -1,171 +0,0 @@ ---- -description: Review a contract against your organization's negotiation playbook — flag deviations, generate redlines, provide business impact analysis -argument-hint: "" ---- - -# /review-contract -- Contract Review Against Playbook - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Review a contract against your organization's negotiation playbook. Analyze each clause, flag deviations, generate redline suggestions, and provide business impact analysis. - -## Invocation - -``` -/review-contract -``` - -Review the contract: @$1 - -## Workflow - -### Step 1: Accept the Contract - -Accept the contract in any of these formats: -- **File upload**: PDF, DOCX, or other document format -- **URL**: Link to a contract in your CLM, cloud storage (e.g., Box, Egnyte, SharePoint), or other document system -- **Pasted text**: Contract text pasted directly into the conversation - -If no contract is provided, prompt the user to supply one. - -### Step 2: Gather Context - -Ask the user for context before beginning the review: - -1. **Which side are you on?** (vendor/supplier, customer/buyer, licensor, licensee, partner -- or other) -2. **Deadline**: When does this need to be finalized? (Affects prioritization of issues) -3. **Focus areas**: Any specific concerns? (e.g., "data protection is critical", "we need flexibility on term", "IP ownership is the key issue") -4. **Deal context**: Any relevant business context? (e.g., deal size, strategic importance, existing relationship) - -If the user provides partial context, proceed with what you have and note assumptions. - -### Step 3: Load the Playbook - -Look for the organization's contract review playbook in local settings (e.g., `legal.local.md` or similar configuration files). - -The playbook should define: -- **Standard positions**: The organization's preferred terms for each major clause type -- **Acceptable ranges**: Terms that can be agreed to without escalation -- **Escalation triggers**: Terms that require senior counsel review or outside counsel involvement - -**If no playbook is configured:** -- Inform the user that no playbook was found -- Offer two options: - 1. Help the user set up their playbook (walk through defining positions for key clauses) - 2. Proceed with a generic review using widely-accepted commercial standards as the baseline -- If proceeding generically, clearly note that the review is based on general commercial standards, not the organization's specific positions - -### Step 4: Clause-by-Clause Analysis - -Analyze the contract systematically, covering at minimum: - -| Clause Category | Key Review Points | -|----------------|-------------------| -| **Limitation of Liability** | Cap amount, carveouts, mutual vs. unilateral, consequential damages | -| **Indemnification** | Scope, mutual vs. unilateral, cap, IP infringement, data breach | -| **IP Ownership** | Pre-existing IP, developed IP, work-for-hire, license grants, assignment | -| **Data Protection** | DPA requirement, processing terms, sub-processors, breach notification, cross-border transfers | -| **Confidentiality** | Scope, term, carveouts, return/destruction obligations | -| **Representations & Warranties** | Scope, disclaimers, survival period | -| **Term & Termination** | Duration, renewal, termination for convenience, termination for cause, wind-down | -| **Governing Law & Dispute Resolution** | Jurisdiction, venue, arbitration vs. litigation | -| **Insurance** | Coverage requirements, minimums, evidence of coverage | -| **Assignment** | Consent requirements, change of control, exceptions | -| **Force Majeure** | Scope, notification, termination rights | -| **Payment Terms** | Net terms, late fees, taxes, price escalation | - -For each clause, assess against the playbook (or generic standards) and note whether it is present, absent, or unusual. - -### Step 5: Flag Deviations - -Classify each deviation from the playbook using a three-tier system: - -#### GREEN -- Acceptable -- Aligns with or is better than the organization's standard position -- Minor variations that are commercially reasonable -- No action needed; note for awareness - -#### YELLOW -- Negotiate -- Falls outside standard position but within negotiable range -- Common in the market but not the organization's preference -- Requires attention but not escalation -- **Include**: Specific redline language to bring the term back to standard position -- **Include**: Fallback position if the counterparty pushes back -- **Include**: Business impact of accepting as-is vs. negotiating - -#### RED -- Escalate -- Falls outside acceptable range or triggers an escalation criterion -- Unusual or aggressive terms that pose material risk -- Requires senior counsel review, outside counsel involvement, or business decision-maker sign-off -- **Include**: Why this is a RED flag (specific risk) -- **Include**: What the standard market position looks like -- **Include**: Business impact and potential exposure -- **Include**: Recommended escalation path - -### Step 6: Generate Redline Suggestions - -For each YELLOW and RED deviation, provide: -- **Current language**: Quote the relevant contract text -- **Suggested redline**: Specific alternative language -- **Rationale**: Brief explanation suitable for sharing with the counterparty -- **Priority**: Whether this is a must-have or nice-to-have in negotiation - -### Step 7: Business Impact Summary - -Provide a summary section covering: -- **Overall risk assessment**: High-level view of the contract's risk profile -- **Top 3 issues**: The most important items to address -- **Negotiation strategy**: Recommended approach (which issues to lead with, what to concede) -- **Timeline considerations**: Any urgency factors affecting the negotiation approach - -### Step 8: CLM Routing (If Connected) - -If a Contract Lifecycle Management system is connected via MCP: -- Recommend the appropriate approval workflow based on contract type and risk level -- Suggest the correct routing path (e.g., standard approval, senior counsel, outside counsel) -- Note any required approvals based on contract value or risk flags - -If no CLM is connected, skip this step. - -## Output Format - -Structure the output as: - -``` -## Contract Review Summary - -**Document**: [contract name/identifier] -**Parties**: [party names and roles] -**Your Side**: [vendor/customer/etc.] -**Deadline**: [if provided] -**Review Basis**: [Playbook / Generic Standards] - -## Key Findings - -[Top 3-5 issues with severity flags] - -## Clause-by-Clause Analysis - -### [Clause Category] -- [GREEN/YELLOW/RED] -**Contract says**: [summary of the provision] -**Playbook position**: [your standard] -**Deviation**: [description of gap] -**Business impact**: [what this means practically] -**Redline suggestion**: [specific language, if YELLOW or RED] - -[Repeat for each major clause] - -## Negotiation Strategy - -[Recommended approach, priorities, concession candidates] - -## Next Steps - -[Specific actions to take] -``` - -## Notes - -- If the contract is in a language other than English, note this and ask if the user wants a translation or review in the original language -- For very long contracts (50+ pages), offer to focus on the most material sections first and then do a complete review -- Always remind the user that this analysis should be reviewed by qualified legal counsel before being relied upon for legal decisions diff --git a/legal/commands/triage-nda.md b/legal/commands/triage-nda.md deleted file mode 100644 index b717844f..00000000 --- a/legal/commands/triage-nda.md +++ /dev/null @@ -1,158 +0,0 @@ ---- -description: Rapidly triage an incoming NDA — classify as standard approval, counsel review, or full legal review -argument-hint: "" ---- - -# /triage-nda -- NDA Pre-Screening - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Triage the NDA: @$1 - -Rapidly triage incoming NDAs against standard screening criteria. Classify the NDA for routing: standard approval, counsel review, or full legal review. - -## Invocation - -``` -/triage-nda -``` - -## Workflow - -### Step 1: Accept the NDA - -Accept the NDA in any format: -- **File upload**: PDF, DOCX, or other document format -- **URL**: Link to the NDA in a document system -- **Pasted text**: NDA text pasted directly - -If no NDA is provided, prompt the user to supply one. - -### Step 2: Load NDA Playbook - -Look for NDA screening criteria in local settings (e.g., `legal.local.md`). - -The NDA playbook should define: -- Mutual vs. unilateral requirements -- Acceptable term lengths -- Required carveouts -- Prohibited provisions -- Organization-specific requirements - -**If no NDA playbook is configured:** -- Proceed with reasonable market-standard defaults -- Note clearly that defaults are being used -- Defaults applied: - - Mutual obligations required (unless the organization is only disclosing) - - Term: 2-3 years standard, up to 5 years for trade secrets - - Standard carveouts required: independently developed, publicly available, rightfully received from third party, required by law - - No non-solicitation or non-compete provisions - - No residuals clause (or narrowly scoped if present) - - Governing law in a reasonable commercial jurisdiction - -### Step 3: Quick Screen - -Evaluate the NDA against each screening criterion: - -| Criterion | Check | -|-----------|-------| -| **Mutual vs. Unilateral** | Are obligations mutual? If unilateral, is that appropriate for the relationship? | -| **Definition of Confidential Information** | Reasonable scope? Not overbroad (e.g., "all information of any kind")? | -| **Term** | Within acceptable range? Reasonable for the type of information? | -| **Standard Carveouts** | All required carveouts present? (independent development, public knowledge, third-party receipt, legal compulsion) | -| **Permitted Disclosures** | Can share with employees, advisors, contractors who need to know? | -| **Return/Destruction** | Reasonable obligations on termination? Allows retention of legal/compliance copies? | -| **Residuals** | If present, narrowly scoped to unaided memory? | -| **Non-Solicitation** | Any non-solicit provisions embedded? | -| **Non-Compete** | Any non-compete provisions embedded? | -| **Injunctive Relief** | Reasonable or one-sided? Pre-determined damages? | -| **Governing Law** | Acceptable jurisdiction? | -| **Assignment** | Reasonable assignment provisions? | -| **Unusual Provisions** | Any non-standard clauses that don't belong in an NDA? | - -### Step 4: Classify - -Based on the screening results, assign a classification: - -#### GREEN -- Standard Approval -All criteria met. NDA is market-standard with no unusual provisions. -- **Route**: Can be approved and signed via standard process -- **Action**: Proceed to signature with standard delegation of authority - -#### YELLOW -- Counsel Review Needed -One or more criteria have minor deviations that need review but are potentially acceptable: -- Definition of confidential information is broader than ideal but not unreasonable -- Term is longer than standard but within market range -- Residuals clause present but narrowly scoped -- Minor jurisdiction preference issue -- Missing one standard carveout that could be added -- **Route**: Flag specific issues for counsel review -- **Action**: Counsel can likely resolve in a single review pass - -#### RED -- Significant Issues -One or more criteria have material deviations that pose risk: -- Unilateral obligations when mutual is required -- Missing critical carveouts (e.g., no independent development carveout) -- Non-solicitation or non-compete provisions embedded -- Unreasonable term (10+ years) without justification -- Overbroad definition that could capture public information -- Unusual provisions (exclusivity, audit rights, IP assignment) -- Highly unfavorable jurisdiction with no negotiation room -- **Route**: Full legal review required -- **Action**: Do not sign; requires negotiation or counterproposal - -### Step 5: Generate Triage Report - -Output a structured report: - -``` -## NDA Triage Report - -**Classification**: [GREEN / YELLOW / RED] -**Parties**: [party names] -**Type**: [Mutual / Unilateral (disclosing) / Unilateral (receiving)] -**Term**: [duration] -**Governing Law**: [jurisdiction] -**Review Basis**: [Playbook / Default Standards] - -## Screening Results - -| Criterion | Status | Notes | -|-----------|--------|-------| -| Mutual Obligations | [PASS/FLAG/FAIL] | [details] | -| Definition Scope | [PASS/FLAG/FAIL] | [details] | -| Term | [PASS/FLAG/FAIL] | [details] | -| Standard Carveouts | [PASS/FLAG/FAIL] | [details] | -| [etc.] | | | - -## Issues Found - -### [Issue 1 -- YELLOW/RED] -**What**: [description] -**Risk**: [what could go wrong] -**Suggested Fix**: [specific language or approach] - -[Repeat for each issue] - -## Recommendation - -[Specific next step: approve, send for review with specific notes, or reject/counter] - -## Next Steps - -1. [Action item 1] -2. [Action item 2] -``` - -### Step 6: Routing Suggestion - -Based on the classification: -- **GREEN**: Suggest the user proceed to signature under their standard delegation of authority -- **YELLOW**: Identify which specific issues need counsel attention and suggest the user route to the appropriate reviewer -- **RED**: Recommend the user engage counsel for a full review, and provide a counterproposal NDA if the organization has a standard form - -## Notes - -- If the document is not actually an NDA (e.g., it's labeled as an NDA but contains substantive commercial terms), flag this immediately as a RED and recommend full contract review instead -- For NDAs that are part of a larger agreement (e.g., confidentiality section in an MSA), note that the broader agreement context may affect the analysis -- Always note that this is a screening tool and counsel should review any items the user is uncertain about diff --git a/legal/commands/brief.md b/legal/skills/brief/SKILL.md similarity index 94% rename from legal/commands/brief.md rename to legal/skills/brief/SKILL.md index d55a4269..fa80117a 100644 --- a/legal/commands/brief.md +++ b/legal/skills/brief/SKILL.md @@ -1,11 +1,12 @@ --- -description: Generate contextual briefings for legal work — daily summary, topic research, or incident response +name: brief +description: Generate contextual briefings for legal work — daily summary, topic research, or incident response. Use when starting your day and need a scan of legal-relevant items across email, calendar, and contracts, when researching a specific legal question across internal sources, or when a developing situation (data breach, litigation threat, regulatory inquiry) needs rapid context. argument-hint: "[daily | topic | incident]" --- # /brief -- Legal Team Briefing -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Generate contextual briefings for legal work. Supports three modes: daily brief, topic brief, and incident brief. diff --git a/legal/skills/compliance/SKILL.md b/legal/skills/compliance-check/SKILL.md similarity index 82% rename from legal/skills/compliance/SKILL.md rename to legal/skills/compliance-check/SKILL.md index 260474f8..688d8c96 100644 --- a/legal/skills/compliance/SKILL.md +++ b/legal/skills/compliance-check/SKILL.md @@ -1,13 +1,67 @@ --- -name: compliance -description: Navigate privacy regulations (GDPR, CCPA), review DPAs, and handle data subject requests. Use when reviewing data processing agreements, responding to data subject access or deletion requests, assessing cross-border data transfer requirements, or evaluating privacy compliance. +name: compliance-check +description: Run a compliance check on a proposed action, product feature, or business initiative, surfacing applicable regulations, required approvals, and risk areas. Use when launching a feature that touches personal data, when marketing or product proposes something with regulatory implications, or when you need to know which approvals and jurisdictional requirements apply before proceeding. +argument-hint: "" --- -# Compliance Skill +# /compliance-check -- Compliance Review -You are a compliance assistant for an in-house legal team. You help with privacy regulation compliance, DPA reviews, data subject request handling, and regulatory monitoring. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -**Important**: You assist with legal workflows but do not provide legal advice. Compliance determinations should be reviewed by qualified legal professionals. Regulatory requirements change frequently; always verify current requirements with authoritative sources. +Run a compliance check on a proposed action, product feature, marketing campaign, or business initiative. + +**Important**: This command assists with legal workflows but does not provide legal advice. Compliance assessments should be reviewed by qualified legal professionals. Regulatory requirements change frequently; always verify current requirements with authoritative sources. + +## Usage + +``` +/compliance-check $ARGUMENTS +``` + +## What I Need From You + +Describe what you're planning to do. Examples: +- "We want to launch a referral program with cash rewards" +- "We're adding biometric authentication to our mobile app" +- "We need to process EU customer data in our US data center" +- "Marketing wants to use customer testimonials in ads" + +## Output + +```markdown +## Compliance Check: [Initiative] + +### Summary +[Quick assessment: Proceed / Proceed with conditions / Requires further review] + +### Applicable Regulations and Policies +| Regulation/Policy | Relevance | Key Requirements | +|-------------------|-----------|-----------------| +| [GDPR / CCPA / HIPAA / etc.] | [How it applies] | [What you need to do] | + +### Requirements +| # | Requirement | Status | Action Needed | +|---|-------------|--------|---------------| +| 1 | [Requirement] | [Met / Not Met / Unknown] | [What to do] | + +### Risk Areas +| Risk | Severity | Mitigation | +|------|----------|------------| +| [Risk] | [High/Med/Low] | [How to address] | + +### Recommended Actions +1. [Most important action] +2. [Second priority] +3. [Third priority] + +### Approvals Needed +| Approver | Why | Status | +|----------|-----|--------| +| [Person/Team] | [Reason] | [Pending] | + +### Further Review Recommended +[Areas where outside counsel or specialist review is advised] +``` ## Privacy Regulation Overview @@ -212,3 +266,9 @@ Escalate regulatory developments to senior counsel or leadership when: - A compliance deadline is approaching that requires organizational changes - A data transfer mechanism the organization relies on is challenged or invalidated - A regulatory authority initiates an inquiry or investigation involving the organization + +## Tips + +1. **Be specific** — "We want to email all our users" is better than "marketing campaign." +2. **Include the geography** — Compliance requirements vary by jurisdiction. +3. **Mention the data** — What personal data is involved? This drives most compliance requirements. diff --git a/legal/skills/canned-responses/SKILL.md b/legal/skills/legal-response/SKILL.md similarity index 57% rename from legal/skills/canned-responses/SKILL.md rename to legal/skills/legal-response/SKILL.md index 739f6866..ca96f064 100644 --- a/legal/skills/canned-responses/SKILL.md +++ b/legal/skills/legal-response/SKILL.md @@ -1,38 +1,179 @@ --- -name: canned-responses -description: Generate templated responses for common legal inquiries and identify when situations require individualized attention. Use when responding to routine legal questions — data subject requests, vendor inquiries, NDA requests, discovery holds — or when managing response templates. +name: legal-response +description: Generate a response to a common legal inquiry using configured templates, with built-in escalation checks for situations that shouldn't use a templated reply. Use when responding to data subject requests, litigation hold notices, vendor legal questions, NDA requests from business teams, or subpoenas. +argument-hint: "[inquiry-type]" --- -# Canned Responses Skill +# /legal-response -- Generate Response from Templates -You are a response template assistant for an in-house legal team. You help manage, customize, and generate templated responses for common legal inquiries, and you identify when a situation should NOT use a templated response and instead requires individualized attention. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). -**Important**: You assist with legal workflows but do not provide legal advice. Templated responses should be reviewed before sending, especially for regulated communications. +Generate a response to a common legal inquiry using configured templates. Customizes the response with specific details and includes escalation triggers for situations that should not use a templated response. -## Template Management Methodology +**Important**: This command assists with legal workflows but does not provide legal advice. Generated responses should be reviewed by qualified legal professionals before being sent, especially for regulated communications. -### Template Organization +## Invocation -Templates should be organized by category and maintained in the team's local settings. Each template should include: +``` +/legal-response [inquiry-type] +``` -1. **Category**: The type of inquiry the template addresses -2. **Template name**: A descriptive identifier -3. **Use case**: When this template is appropriate -4. **Escalation triggers**: When this template should NOT be used -5. **Required variables**: Information that must be customized for each use -6. **Template body**: The response text with variable placeholders -7. **Follow-up actions**: Standard steps after sending the response -8. **Last reviewed date**: When the template was last verified for accuracy +Common inquiry types: +- `dsr` or `data-subject-request` -- Data subject access/deletion/correction requests +- `hold` or `discovery-hold` -- Litigation hold notices +- `vendor` or `vendor-question` -- Vendor legal questions +- `nda` or `nda-request` -- NDA requests from business teams +- `privacy` or `privacy-inquiry` -- Privacy-related questions +- `subpoena` -- Subpoena or legal process responses +- `insurance` -- Insurance claim notifications +- `custom` -- Use a custom template -### Template Lifecycle +If no inquiry type is provided, ask the user what type of response they need and show available categories. -1. **Creation**: Draft template based on best practices and team input -2. **Review**: Legal team review and approval of template content -3. **Publication**: Add to template library with metadata -4. **Use**: Generate responses using the template -5. **Feedback**: Track when templates are modified during use to identify improvement opportunities -6. **Update**: Revise templates when laws, policies, or best practices change -7. **Retirement**: Archive templates that are no longer applicable +## Workflow + +### Step 1: Identify Inquiry Type + +Accept the inquiry type from the user. If the type is ambiguous, show available categories and ask for clarification. + +### Step 2: Load Template + +Look for templates in local settings (e.g., `legal.local.md` or a templates directory). + +**If templates are configured:** +- Load the appropriate template for the inquiry type +- Identify required variables (recipient name, dates, specific details) + +**If no templates are configured:** +- Inform the user that no templates were found for this inquiry type +- Offer to help create a template (see Template Creation Guide below) +- Provide a reasonable default response structure based on the inquiry type + +### Step 3: Check Escalation Triggers + +Before generating any response, evaluate whether this situation has characteristics that should NOT use a templated response. + +#### Universal Escalation Triggers (Apply to All Categories) +- The matter involves potential litigation or regulatory investigation +- The inquiry is from a regulator, government agency, or law enforcement +- The response could create a binding legal commitment or waiver +- The matter involves potential criminal liability +- Media attention is involved or likely +- The situation is unprecedented (no prior handling by the team) +- Multiple jurisdictions are involved with conflicting requirements +- The matter involves executive leadership or board members + +#### Data Subject Request Escalation Triggers +- Request involves a minor's data, or is from/on behalf of a minor +- Request is from a regulatory authority (not an individual) +- Request involves data that is subject to a litigation hold +- Requester is a current or former employee with an active dispute or HR matter +- Request scope is unusually broad or appears to be a fishing expedition +- Request involves data processed in a jurisdiction with unique requirements +- Request involves special category data (health, biometric, genetic) + +#### Discovery Hold Escalation Triggers +- The matter involves potential criminal liability +- The preservation scope is unclear, disputed, or potentially overbroad +- There are questions about whether certain data is within scope +- Prior holds for the same or related matter exist +- The hold may affect ongoing business operations significantly +- Hold conflicts with regulatory deletion requirements +- Custodian objects to the hold scope + +#### Vendor Question Escalation Triggers +- The question involves a dispute or potential breach +- The vendor is threatening litigation or termination +- The question involves regulatory compliance (not just contract terms) +- The response could create a binding commitment or waiver +- Response could affect ongoing negotiation + +#### NDA Request Escalation Triggers +- The counterparty is a competitor +- The NDA involves government classified information +- The business context suggests the NDA is for a potential M&A transaction +- The request involves unusual subject matter (AI training data, biometric data, etc.) + +#### Subpoena / Legal Process Escalation Triggers +- **ALWAYS requires counsel review** (templates are starting points only) +- Privilege issues identified +- Third-party data involved +- Cross-border production issues +- Unreasonable timeline + +**When an escalation trigger is detected:** +1. **Stop**: Do not generate a templated response +2. **Alert**: Inform the user that an escalation trigger has been detected +3. **Explain**: Describe which trigger was detected and why it matters +4. **Recommend**: Suggest the appropriate escalation path (senior counsel, outside counsel, specific team member) +5. **Offer**: Provide a draft for counsel review (clearly marked as "DRAFT - FOR COUNSEL REVIEW ONLY") rather than a final response + +### Step 4: Gather Specific Details + +Prompt the user for the details needed to customize the response: + +**Data Subject Request:** +- Requester name and contact information +- Type of request (access, deletion, correction, portability, opt-out) +- What data is involved +- Applicable regulation (GDPR, CCPA, CPRA, other) +- Response deadline + +**Discovery Hold:** +- Matter name and reference number +- Custodians (who needs to preserve) +- Scope of preservation (date range, data types, systems) +- Outside counsel contact +- Effective date + +**Vendor Question:** +- Vendor name +- Reference agreement (if applicable) +- Specific question being addressed +- Relevant contract provisions + +**NDA Request:** +- Requesting business team and contact +- Counterparty name +- Purpose of the NDA +- Mutual or unilateral +- Any special requirements + +### Step 5: Generate Response + +Populate the template with the gathered details. Ensure the response: +- Uses appropriate tone (professional, clear, not overly legalistic for business audiences) +- Includes all required legal elements for the response type +- References specific dates, deadlines, and obligations +- Provides clear next steps for the recipient +- Includes appropriate disclaimers or caveats + +Present the draft response to the user for review before sending. + +#### Customization Guidelines + +**Required customization** — Every templated response MUST be customized with: +- Correct names, dates, and reference numbers +- Specific facts of the situation +- Applicable jurisdiction and regulation +- Correct response deadlines based on when the inquiry was received +- Appropriate signature block and contact information + +**Tone adjustment** — Adjust tone based on: +- **Audience**: Internal vs. external, business vs. legal, individual vs. regulatory authority +- **Relationship**: New counterparty vs. existing partner vs. adversarial party +- **Sensitivity**: Routine inquiry vs. contentious matter vs. regulatory investigation +- **Urgency**: Standard timeline vs. expedited response needed + +**Jurisdiction-specific adjustments:** +- Verify that cited regulations are correct for the requester's jurisdiction +- Adjust timelines to match applicable law +- Include jurisdiction-specific rights information +- Use jurisdiction-appropriate legal terminology + +### Step 6: Template Creation (If No Template Exists) + +If the user wants to create a new template, walk through the Template Creation Guide (see below) and present the finished template for review. Suggest the user save the approved template to their local settings for future use. ## Response Categories @@ -185,116 +326,64 @@ Contact {{legal_contact}} with any questions. - Timeline of events - Requested coverage confirmation -## Customization Guidelines - -When generating a response from a template: - -### Required Customization -Every templated response MUST be customized with: -- Correct names, dates, and reference numbers -- Specific facts of the situation -- Applicable jurisdiction and regulation -- Correct response deadlines based on when the inquiry was received -- Appropriate signature block and contact information - -### Tone Adjustment -Adjust tone based on: -- **Audience**: Internal vs. external, business vs. legal, individual vs. regulatory authority -- **Relationship**: New counterparty vs. existing partner vs. adversarial party -- **Sensitivity**: Routine inquiry vs. contentious matter vs. regulatory investigation -- **Urgency**: Standard timeline vs. expedited response needed - -### Jurisdiction-Specific Adjustments -- Verify that cited regulations are correct for the requester's jurisdiction -- Adjust timelines to match applicable law -- Include jurisdiction-specific rights information -- Use jurisdiction-appropriate legal terminology - -## Escalation Trigger Identification - -Every template category has situations where a templated response is inappropriate. Before generating any response, check for these escalation triggers: - -### Universal Escalation Triggers (Apply to All Categories) -- The matter involves potential litigation or regulatory investigation -- The inquiry is from a regulator, government agency, or law enforcement -- The response could create a binding legal commitment or waiver -- The matter involves potential criminal liability -- Media attention is involved or likely -- The situation is unprecedented (no prior handling by the team) -- Multiple jurisdictions are involved with conflicting requirements -- The matter involves executive leadership or board members - -### Category-Specific Escalation Triggers - -**Data Subject Requests**: -- Request from a minor or on behalf of a minor -- Request involves data subject to litigation hold -- Requester is in active litigation or dispute with the organization -- Request is from an employee with an active HR matter -- Request scope is so broad it appears to be a fishing expedition -- Request involves special category data (health, biometric, genetic) +## Template Management Methodology -**Discovery Holds**: -- Potential criminal liability -- Unclear or disputed preservation scope -- Hold conflicts with regulatory deletion requirements -- Prior holds exist for related matters -- Custodian objects to the hold scope +### Template Organization -**Vendor Questions**: -- Vendor is disputing contract terms -- Vendor is threatening litigation or termination -- Response could affect ongoing negotiation -- Question involves regulatory compliance (not just contract interpretation) +Templates should be organized by category and maintained in the team's local settings. Each template should include: -**Subpoena / Legal Process**: -- ALWAYS requires counsel review (templates are starting points only) -- Privilege issues identified -- Third-party data involved -- Cross-border production issues -- Unreasonable timeline +1. **Category**: The type of inquiry the template addresses +2. **Template name**: A descriptive identifier +3. **Use case**: When this template is appropriate +4. **Escalation triggers**: When this template should NOT be used +5. **Required variables**: Information that must be customized for each use +6. **Template body**: The response text with variable placeholders +7. **Follow-up actions**: Standard steps after sending the response +8. **Last reviewed date**: When the template was last verified for accuracy -### When an Escalation Trigger is Detected +### Template Lifecycle -1. **Stop**: Do not generate a templated response -2. **Alert**: Inform the user that an escalation trigger has been detected -3. **Explain**: Describe which trigger was detected and why it matters -4. **Recommend**: Suggest the appropriate escalation path (senior counsel, outside counsel, specific team member) -5. **Offer**: Provide a draft for counsel review (clearly marked as "DRAFT - FOR COUNSEL REVIEW ONLY") rather than a final response +1. **Creation**: Draft template based on best practices and team input +2. **Review**: Legal team review and approval of template content +3. **Publication**: Add to template library with metadata +4. **Use**: Generate responses using the template +5. **Feedback**: Track when templates are modified during use to identify improvement opportunities +6. **Update**: Revise templates when laws, policies, or best practices change +7. **Retirement**: Archive templates that are no longer applicable ## Template Creation Guide When helping users create new templates: -### Step 1: Define the Use Case +### 1. Define the Use Case - What type of inquiry does this address? - How frequently does this come up? - Who is the typical audience? - What is the typical urgency level? -### Step 2: Identify Required Elements +### 2. Identify Required Elements - What information must be included in every response? - What regulatory requirements apply? - What organizational policies govern this type of response? -### Step 3: Define Variables +### 3. Define Variables - What changes with each use? (names, dates, specifics) - What stays the same? (legal requirements, standard language) - Use clear variable names: `{{requester_name}}`, `{{response_deadline}}`, `{{matter_reference}}` -### Step 4: Draft the Template +### 4. Draft the Template - Write in clear, professional language - Avoid unnecessary legal jargon for business audiences - Include all legally required elements - Add placeholders for all variable content - Include a subject line template if for email use -### Step 5: Define Escalation Triggers +### 5. Define Escalation Triggers - What situations should NOT use this template? - What characteristics indicate the matter needs individualized attention? - Be specific: vague triggers are not useful -### Step 6: Add Metadata +### 6. Add Metadata - Template name and category - Version number and last reviewed date - Author and approver @@ -335,3 +424,34 @@ When helping users create new templates: ### Notes [Any special instructions for users of this template] ``` + +## Output Format + +``` +## Generated Response: [Inquiry Type] + +**To**: [recipient] +**Subject**: [subject line] + +--- + +[Response body] + +--- + +### Escalation Check +[Confirmation that no escalation triggers were detected, OR flagged triggers with recommendations] + +### Follow-Up Actions +1. [Post-send actions] +2. [Calendar reminders to set] +3. [Tracking or logging requirements] +``` + +## Notes + +- Always present the draft response for user review before suggesting it be sent +- If connected to email via MCP, offer to create a draft email with the response +- Track response deadlines and offer to set calendar reminders +- For regulated responses (DSRs, subpoenas), always note the applicable deadline and regulatory requirements +- Templates should be living documents; suggest updates when the user modifies a templated response, so the template can be improved over time diff --git a/legal/skills/contract-review/SKILL.md b/legal/skills/review-contract/SKILL.md similarity index 55% rename from legal/skills/contract-review/SKILL.md rename to legal/skills/review-contract/SKILL.md index 28cc090d..03020486 100644 --- a/legal/skills/contract-review/SKILL.md +++ b/legal/skills/review-contract/SKILL.md @@ -1,26 +1,66 @@ --- -name: contract-review -description: Review contracts against your organization's negotiation playbook, flagging deviations and generating redline suggestions. Use when reviewing vendor contracts, customer agreements, or any commercial agreement where you need clause-by-clause analysis against standard positions. +name: review-contract +description: Review a contract against your organization's negotiation playbook — flag deviations, generate redlines, provide business impact analysis. Use when reviewing vendor or customer agreements, when you need clause-by-clause analysis against standard positions, or when preparing a negotiation strategy with prioritized redlines and fallback positions. +argument-hint: "" --- -# Contract Review Skill +# /review-contract -- Contract Review Against Playbook -You are a contract review assistant for an in-house legal team. You analyze contracts against the organization's negotiation playbook, identify deviations, classify their severity, and generate actionable redline suggestions. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Review a contract against your organization's negotiation playbook. Analyze each clause, flag deviations, generate redline suggestions, and provide business impact analysis. **Important**: You assist with legal workflows but do not provide legal advice. All analysis should be reviewed by qualified legal professionals before being relied upon. -## Playbook-Based Review Methodology +## Invocation + +``` +/review-contract +``` + +Review the contract: @$1 + +## Workflow + +### Step 1: Accept the Contract + +Accept the contract in any of these formats: +- **File upload**: PDF, DOCX, or other document format +- **URL**: Link to a contract in your CLM, cloud storage (e.g., Box, Egnyte, SharePoint), or other document system +- **Pasted text**: Contract text pasted directly into the conversation + +If no contract is provided, prompt the user to supply one. + +### Step 2: Gather Context + +Ask the user for context before beginning the review: -### Loading the Playbook +1. **Which side are you on?** (vendor/supplier, customer/buyer, licensor, licensee, partner -- or other) +2. **Deadline**: When does this need to be finalized? (Affects prioritization of issues) +3. **Focus areas**: Any specific concerns? (e.g., "data protection is critical", "we need flexibility on term", "IP ownership is the key issue") +4. **Deal context**: Any relevant business context? (e.g., deal size, strategic importance, existing relationship) -Before reviewing any contract, check for a configured playbook in the user's local settings. The playbook defines the organization's standard positions, acceptable ranges, and escalation triggers for each major clause type. +If the user provides partial context, proceed with what you have and note assumptions. -If no playbook is available: -- Inform the user and offer to help create one -- If proceeding without a playbook, use widely-accepted commercial standards as a baseline -- Clearly label the review as "based on general commercial standards" rather than organizational positions +### Step 3: Load the Playbook -### Review Process +Look for the organization's contract review playbook in local settings (e.g., `legal.local.md` or similar configuration files). + +The playbook should define: +- **Standard positions**: The organization's preferred terms for each major clause type +- **Acceptable ranges**: Terms that can be agreed to without escalation +- **Escalation triggers**: Terms that require senior counsel review or outside counsel involvement + +**If no playbook is configured:** +- Inform the user that no playbook was found +- Offer two options: + 1. Help the user set up their playbook (walk through defining positions for key clauses) + 2. Proceed with a generic review using widely-accepted commercial standards as the baseline +- If proceeding generically, clearly note that the review is based on general commercial standards, not the organization's specific positions + +### Step 4: Clause-by-Clause Analysis + +Apply the following review process: 1. **Identify the contract type**: SaaS agreement, professional services, license, partnership, procurement, etc. The contract type affects which clauses are most material. 2. **Determine the user's side**: Vendor, customer, licensor, licensee, partner. This fundamentally changes the analysis (e.g., limitation of liability protections favor different parties). @@ -28,9 +68,28 @@ If no playbook is available: 4. **Analyze each material clause** against the playbook position. 5. **Consider the contract holistically**: Are the overall risk allocation and commercial terms balanced? -## Common Clause Analysis +Analyze the contract systematically, covering at minimum: + +| Clause Category | Key Review Points | +|----------------|-------------------| +| **Limitation of Liability** | Cap amount, carveouts, mutual vs. unilateral, consequential damages | +| **Indemnification** | Scope, mutual vs. unilateral, cap, IP infringement, data breach | +| **IP Ownership** | Pre-existing IP, developed IP, work-for-hire, license grants, assignment | +| **Data Protection** | DPA requirement, processing terms, sub-processors, breach notification, cross-border transfers | +| **Confidentiality** | Scope, term, carveouts, return/destruction obligations | +| **Representations & Warranties** | Scope, disclaimers, survival period | +| **Term & Termination** | Duration, renewal, termination for convenience, termination for cause, wind-down | +| **Governing Law & Dispute Resolution** | Jurisdiction, venue, arbitration vs. litigation | +| **Insurance** | Coverage requirements, minimums, evidence of coverage | +| **Assignment** | Consent requirements, change of control, exceptions | +| **Force Majeure** | Scope, notification, termination rights | +| **Payment Terms** | Net terms, late fees, taxes, price escalation | + +For each clause, assess against the playbook (or generic standards) and note whether it is present, absent, or unusual. -### Limitation of Liability +#### Detailed Clause Guidance + +##### Limitation of Liability **Key elements to review:** - Cap amount (fixed dollar amount, multiple of fees, or uncapped) @@ -47,7 +106,7 @@ If no playbook is available: - Broad carveouts that effectively eliminate the cap (e.g., "any breach of Section X" where Section X covers most obligations) - No consequential damages exclusion for one party's breaches -### Indemnification +##### Indemnification **Key elements to review:** - Whether indemnification is mutual or unilateral @@ -63,7 +122,7 @@ If no playbook is available: - No right to control defense of claims - Indemnification obligations that survive termination indefinitely -### Intellectual Property +##### Intellectual Property **Key elements to review:** - Ownership of pre-existing IP (each party should retain their own) @@ -79,7 +138,7 @@ If no playbook is available: - Unrestricted feedback clauses granting perpetual, irrevocable licenses - License scope broader than needed for the business relationship -### Data Protection +##### Data Protection **Key elements to review:** - Whether a Data Processing Agreement/Addendum (DPA) is required @@ -98,7 +157,7 @@ If no playbook is available: - No cross-border transfer protections when data moves internationally - Inadequate data deletion provisions -### Term and Termination +##### Term and Termination **Key elements to review:** - Initial term and renewal terms @@ -115,7 +174,7 @@ If no playbook is available: - Inadequate transition assistance provisions - Survival clauses that effectively extend the agreement indefinitely -### Governing Law and Dispute Resolution +##### Governing Law and Dispute Resolution **Key elements to review:** - Choice of law (governing jurisdiction) @@ -132,9 +191,11 @@ If no playbook is available: - Waiver of jury trial without corresponding protections - No escalation process before formal dispute resolution -## Deviation Severity Classification +### Step 5: Flag Deviations + +Classify each deviation from the playbook using a three-tier system: -### GREEN -- Acceptable +#### GREEN -- Acceptable The clause aligns with or is better than the organization's standard position. Minor variations that are commercially reasonable and do not increase risk materially. @@ -145,7 +206,7 @@ The clause aligns with or is better than the organization's standard position. M **Action**: Note for awareness. No negotiation needed. -### YELLOW -- Negotiate +#### YELLOW -- Negotiate The clause falls outside the standard position but within a negotiable range. The term is common in the market but not the organization's preference. Requires attention and likely negotiation, but not escalation. @@ -156,8 +217,11 @@ The clause falls outside the standard position but within a negotiable range. Th - Governing law in an acceptable but not preferred jurisdiction **Action**: Generate specific redline language. Provide fallback position. Estimate business impact of accepting vs. negotiating. +- **Include**: Specific redline language to bring the term back to standard position +- **Include**: Fallback position if the counterparty pushes back +- **Include**: Business impact of accepting as-is vs. negotiating -### RED -- Escalate +#### RED -- Escalate The clause falls outside acceptable range, triggers a defined escalation criterion, or poses material risk. Requires senior counsel review, outside counsel involvement, or business decision-maker sign-off. @@ -170,8 +234,20 @@ The clause falls outside acceptable range, triggers a defined escalation criteri - Governing law in a problematic jurisdiction with mandatory arbitration **Action**: Explain the specific risk. Provide market-standard alternative language. Estimate exposure. Recommend escalation path. +- **Include**: Why this is a RED flag (specific risk) +- **Include**: What the standard market position looks like +- **Include**: Business impact and potential exposure +- **Include**: Recommended escalation path + +### Step 6: Generate Redline Suggestions -## Redline Generation Best Practices +For each YELLOW and RED deviation, provide: +- **Current language**: Quote the relevant contract text +- **Suggested redline**: Specific alternative language +- **Rationale**: Brief explanation suitable for sharing with the counterparty +- **Priority**: Whether this is a must-have or nice-to-have in negotiation + +#### Redline Generation Best Practices When generating redline suggestions: @@ -182,7 +258,7 @@ When generating redline suggestions: 5. **Prioritize**: Not all redlines are equal. Indicate which are must-haves and which are nice-to-haves. 6. **Consider the relationship**: Adjust tone and approach based on whether this is a new vendor, strategic partner, or commodity supplier. -### Redline Format +#### Redline Format For each redline: ``` @@ -194,25 +270,33 @@ For each redline: **Fallback**: [Alternative position if primary redline is rejected] ``` -## Negotiation Priority Framework +### Step 7: Business Impact Summary + +Provide a summary section covering: +- **Overall risk assessment**: High-level view of the contract's risk profile +- **Top 3 issues**: The most important items to address +- **Negotiation strategy**: Recommended approach (which issues to lead with, what to concede) +- **Timeline considerations**: Any urgency factors affecting the negotiation approach + +#### Negotiation Priority Framework When presenting redlines, organize by negotiation priority: -### Tier 1 -- Must-Haves (Deal Breakers) +**Tier 1 -- Must-Haves (Deal Breakers)** Issues where the organization cannot proceed without resolution: - Uncapped or materially insufficient liability protections - Missing data protection requirements for regulated data - IP provisions that could jeopardize core assets - Terms that conflict with regulatory obligations -### Tier 2 -- Should-Haves (Strong Preferences) +**Tier 2 -- Should-Haves (Strong Preferences)** Issues that materially affect risk but have negotiation room: - Liability cap adjustments within range - Indemnification scope and mutuality - Termination flexibility - Audit and compliance rights -### Tier 3 -- Nice-to-Haves (Concession Candidates) +**Tier 3 -- Nice-to-Haves (Concession Candidates)** Issues that improve the position but can be conceded strategically: - Preferred governing law (if alternative is acceptable) - Notice period preferences @@ -220,3 +304,55 @@ Issues that improve the position but can be conceded strategically: - Insurance certificate requirements **Negotiation strategy**: Lead with Tier 1 items. Trade Tier 3 concessions to secure Tier 2 wins. Never concede on Tier 1 without escalation. + +### Step 8: CLM Routing (If Connected) + +If a Contract Lifecycle Management system is connected via MCP: +- Recommend the appropriate approval workflow based on contract type and risk level +- Suggest the correct routing path (e.g., standard approval, senior counsel, outside counsel) +- Note any required approvals based on contract value or risk flags + +If no CLM is connected, skip this step. + +## Output Format + +Structure the output as: + +``` +## Contract Review Summary + +**Document**: [contract name/identifier] +**Parties**: [party names and roles] +**Your Side**: [vendor/customer/etc.] +**Deadline**: [if provided] +**Review Basis**: [Playbook / Generic Standards] + +## Key Findings + +[Top 3-5 issues with severity flags] + +## Clause-by-Clause Analysis + +### [Clause Category] -- [GREEN/YELLOW/RED] +**Contract says**: [summary of the provision] +**Playbook position**: [your standard] +**Deviation**: [description of gap] +**Business impact**: [what this means practically] +**Redline suggestion**: [specific language, if YELLOW or RED] + +[Repeat for each major clause] + +## Negotiation Strategy + +[Recommended approach, priorities, concession candidates] + +## Next Steps + +[Specific actions to take] +``` + +## Notes + +- If the contract is in a language other than English, note this and ask if the user wants a translation or review in the original language +- For very long contracts (50+ pages), offer to focus on the most material sections first and then do a complete review +- Always remind the user that this analysis should be reviewed by qualified legal counsel before being relied upon for legal decisions diff --git a/legal/commands/signature-request.md b/legal/skills/signature-request/SKILL.md similarity index 87% rename from legal/commands/signature-request.md rename to legal/skills/signature-request/SKILL.md index 3f36bd63..242d64bd 100644 --- a/legal/commands/signature-request.md +++ b/legal/skills/signature-request/SKILL.md @@ -1,11 +1,12 @@ --- -description: Prepare and route a document for e-signature +name: signature-request +description: Prepare and route a document for e-signature — run a pre-signature checklist, configure signing order, and send for execution. Use when a contract is finalized and ready to sign, when verifying entity names, exhibits, and signature blocks before sending, or when setting up an envelope with sequential or parallel signers. argument-hint: "" --- # /signature-request -- E-Signature Routing -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Prepare a document for electronic signature — verify completeness, set signing order, and route for execution. diff --git a/legal/skills/nda-triage/SKILL.md b/legal/skills/triage-nda/SKILL.md similarity index 70% rename from legal/skills/nda-triage/SKILL.md rename to legal/skills/triage-nda/SKILL.md index 52e1067f..143c1113 100644 --- a/legal/skills/nda-triage/SKILL.md +++ b/legal/skills/triage-nda/SKILL.md @@ -1,36 +1,80 @@ --- -name: nda-triage -description: Screen incoming NDAs and classify them as GREEN (standard), YELLOW (needs review), or RED (significant issues). Use when a new NDA comes in from sales or business development, when assessing NDA risk level, or when deciding whether an NDA needs full counsel review. +name: triage-nda +description: Rapidly triage an incoming NDA and classify it as GREEN (standard approval), YELLOW (counsel review), or RED (full legal review). Use when a new NDA arrives from sales or business development, when screening for embedded non-solicits, non-competes, or missing carveouts, or when deciding whether an NDA can be signed under standard delegation. +argument-hint: "" --- -# NDA Triage Skill +# /triage-nda -- NDA Pre-Screening -You are an NDA screening assistant for an in-house legal team. You rapidly evaluate incoming NDAs against standard criteria, classify them by risk level, and provide routing recommendations. +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). + +Triage the NDA: @$1 + +Rapidly triage incoming NDAs against standard screening criteria. Classify the NDA for routing: standard approval, counsel review, or full legal review. **Important**: You assist with legal workflows but do not provide legal advice. All analysis should be reviewed by qualified legal professionals before being relied upon. -## NDA Screening Criteria and Checklist +## Invocation + +``` +/triage-nda +``` + +## Workflow + +### Step 1: Accept the NDA + +Accept the NDA in any format: +- **File upload**: PDF, DOCX, or other document format +- **URL**: Link to the NDA in a document system +- **Pasted text**: NDA text pasted directly + +If no NDA is provided, prompt the user to supply one. -When triaging an NDA, evaluate each of the following criteria systematically: +### Step 2: Load NDA Playbook -### 1. Agreement Structure +Look for NDA screening criteria in local settings (e.g., `legal.local.md`). + +The NDA playbook should define: +- Mutual vs. unilateral requirements +- Acceptable term lengths +- Required carveouts +- Prohibited provisions +- Organization-specific requirements + +**If no NDA playbook is configured:** +- Proceed with reasonable market-standard defaults +- Note clearly that defaults are being used +- Defaults applied: + - Mutual obligations required (unless the organization is only disclosing) + - Term: 2-3 years standard, up to 5 years for trade secrets + - Standard carveouts required: independently developed, publicly available, rightfully received from third party, required by law + - No non-solicitation or non-compete provisions + - No residuals clause (or narrowly scoped if present) + - Governing law in a reasonable commercial jurisdiction + +### Step 3: Quick Screen + +Evaluate the NDA against each screening criterion systematically. + +#### 1. Agreement Structure - [ ] **Type identified**: Mutual NDA, Unilateral (disclosing party), or Unilateral (receiving party) - [ ] **Appropriate for context**: Is the NDA type appropriate for the business relationship? (e.g., mutual for exploratory discussions, unilateral for one-way disclosures) - [ ] **Standalone agreement**: Confirm the NDA is a standalone agreement, not a confidentiality section embedded in a larger commercial agreement -### 2. Definition of Confidential Information +#### 2. Definition of Confidential Information - [ ] **Reasonable scope**: Not overbroad (avoid "all information of any kind whether or not marked as confidential") - [ ] **Marking requirements**: If marking is required, is it workable? (Written marking within 30 days of oral disclosure is standard) - [ ] **Exclusions present**: Standard exclusions defined (see Standard Carveouts below) - [ ] **No problematic inclusions**: Does not define publicly available information or independently developed materials as confidential -### 3. Obligations of Receiving Party +#### 3. Obligations of Receiving Party - [ ] **Standard of care**: Reasonable care or at least the same care as for own confidential information - [ ] **Use restriction**: Limited to the stated purpose - [ ] **Disclosure restriction**: Limited to those with need to know who are bound by similar obligations - [ ] **No onerous obligations**: No requirements that are impractical (e.g., encrypting all communications, maintaining physical logs) -### 4. Standard Carveouts +#### 4. Standard Carveouts All of the following carveouts should be present: - [ ] **Public knowledge**: Information that is or becomes publicly available through no fault of the receiving party - [ ] **Prior possession**: Information already known to the receiving party before disclosure @@ -38,29 +82,29 @@ All of the following carveouts should be present: - [ ] **Third-party receipt**: Information rightfully received from a third party without restriction - [ ] **Legal compulsion**: Right to disclose when required by law, regulation, or legal process (with notice to the disclosing party where legally permitted) -### 5. Permitted Disclosures +#### 5. Permitted Disclosures - [ ] **Employees**: Can share with employees who need to know - [ ] **Contractors/advisors**: Can share with contractors, advisors, and professional consultants under similar confidentiality obligations - [ ] **Affiliates**: Can share with affiliates (if needed for the business purpose) - [ ] **Legal/regulatory**: Can disclose as required by law or regulation -### 6. Term and Duration +#### 6. Term and Duration - [ ] **Agreement term**: Reasonable period for the business relationship (1-3 years is standard) - [ ] **Confidentiality survival**: Obligations survive for a reasonable period after termination (2-5 years is standard; trade secrets may be longer) - [ ] **Not perpetual**: Avoid indefinite or perpetual confidentiality obligations (exception: trade secrets, which may warrant longer protection) -### 7. Return and Destruction +#### 7. Return and Destruction - [ ] **Obligation triggered**: On termination or upon request - [ ] **Reasonable scope**: Return or destroy confidential information and all copies - [ ] **Retention exception**: Allows retention of copies required by law, regulation, or internal compliance/backup policies - [ ] **Certification**: Certification of destruction is reasonable; sworn affidavit is onerous -### 8. Remedies +#### 8. Remedies - [ ] **Injunctive relief**: Acknowledgment that breach may cause irreparable harm and equitable relief may be appropriate is standard - [ ] **No pre-determined damages**: Avoid liquidated damages clauses in NDAs - [ ] **Not one-sided**: Remedies provisions apply equally to both parties (in mutual NDAs) -### 9. Problematic Provisions to Flag +#### 9. Problematic Provisions to Flag - [ ] **No non-solicitation**: NDA should not contain employee non-solicitation provisions - [ ] **No non-compete**: NDA should not contain non-compete provisions - [ ] **No exclusivity**: NDA should not restrict either party from entering similar discussions with others @@ -69,14 +113,16 @@ All of the following carveouts should be present: - [ ] **No IP assignment or license**: NDA should not grant any intellectual property rights - [ ] **No audit rights**: Unusual in standard NDAs -### 10. Governing Law and Jurisdiction +#### 10. Governing Law and Jurisdiction - [ ] **Reasonable jurisdiction**: A well-established commercial jurisdiction - [ ] **Consistent**: Governing law and jurisdiction should be in the same or related jurisdictions - [ ] **No mandatory arbitration** (in standard NDAs): Litigation is generally preferred for NDA disputes -## GREEN / YELLOW / RED Classification Rules +### Step 4: Classify + +Based on the screening results, assign a classification: -### GREEN -- Standard Approval +#### GREEN -- Standard Approval **All** of the following must be true: - NDA is mutual (or unilateral in the appropriate direction) @@ -91,8 +137,9 @@ All of the following carveouts should be present: - Definition of confidential information is reasonably scoped **Routing**: Approve via standard delegation of authority. No counsel review required. +- **Action**: Proceed to signature with standard delegation of authority -### YELLOW -- Counsel Review Needed +#### YELLOW -- Counsel Review Needed **One or more** of the following are present, but the NDA is not fundamentally problematic: - Definition of confidential information is broader than preferred but not unreasonable @@ -106,8 +153,9 @@ All of the following carveouts should be present: - Unusual but non-harmful provisions (e.g., obligation to notify of potential breach) **Routing**: Flag specific issues for counsel review. Counsel can likely resolve with minor redlines in a single review pass. +- **Action**: Counsel can likely resolve in a single review pass -### RED -- Significant Issues +#### RED -- Significant Issues **One or more** of the following are present: - **Unilateral when mutual is required** (or wrong direction for the relationship) @@ -124,6 +172,65 @@ All of the following carveouts should be present: - **The document is not actually an NDA** (contains substantive commercial terms, exclusivity, or other obligations beyond confidentiality) **Routing**: Full legal review required. Do not sign. Requires negotiation, counterproposal with the organization's standard form NDA, or rejection. +- **Action**: Do not sign; requires negotiation or counterproposal + +### Step 5: Generate Triage Report + +Output a structured report: + +``` +## NDA Triage Report + +**Classification**: [GREEN / YELLOW / RED] +**Parties**: [party names] +**Type**: [Mutual / Unilateral (disclosing) / Unilateral (receiving)] +**Term**: [duration] +**Governing Law**: [jurisdiction] +**Review Basis**: [Playbook / Default Standards] + +## Screening Results + +| Criterion | Status | Notes | +|-----------|--------|-------| +| Mutual Obligations | [PASS/FLAG/FAIL] | [details] | +| Definition Scope | [PASS/FLAG/FAIL] | [details] | +| Term | [PASS/FLAG/FAIL] | [details] | +| Standard Carveouts | [PASS/FLAG/FAIL] | [details] | +| [etc.] | | | + +## Issues Found + +### [Issue 1 -- YELLOW/RED] +**What**: [description] +**Risk**: [what could go wrong] +**Suggested Fix**: [specific language or approach] + +[Repeat for each issue] + +## Recommendation + +[Specific next step: approve, send for review with specific notes, or reject/counter] + +## Next Steps + +1. [Action item 1] +2. [Action item 2] +``` + +### Step 6: Routing Suggestion + +Based on the classification, recommend the appropriate next step: + +| Classification | Recommended Action | Typical Timeline | +|---|---|---| +| GREEN | Approve and route for signature per delegation of authority | Same day | +| YELLOW | Send to designated reviewer with specific issues flagged | 1-2 business days | +| RED | Engage counsel for full review; prepare counterproposal or standard form | 3-5 business days | + +For YELLOW and RED classifications: +- Identify the specific person or role that should review (if the organization has defined routing rules) +- Include a brief summary of issues suitable for the reviewer to quickly understand the key points +- If the organization has a standard form NDA, recommend sending it as a counterproposal for RED-classified NDAs ## Common NDA Issues and Standard Positions @@ -148,17 +255,8 @@ All of the following carveouts should be present: **Standard position**: 2-5 years from disclosure or termination, whichever is later. Trade secrets may warrant protection for as long as they remain trade secrets. **Redline approach**: Replace perpetual obligation with a defined term. Offer a trade secret carveout for longer protection of qualifying information. -## Routing Recommendations - -After classification, recommend the appropriate next step: - -| Classification | Recommended Action | Typical Timeline | -|---|---|---| -| GREEN | Approve and route for signature per delegation of authority | Same day | -| YELLOW | Send to designated reviewer with specific issues flagged | 1-2 business days | -| RED | Engage counsel for full review; prepare counterproposal or standard form | 3-5 business days | +## Notes -For YELLOW and RED classifications: -- Identify the specific person or role that should review (if the organization has defined routing rules) -- Include a brief summary of issues suitable for the reviewer to quickly understand the key points -- If the organization has a standard form NDA, recommend sending it as a counterproposal for RED-classified NDAs +- If the document is not actually an NDA (e.g., it's labeled as an NDA but contains substantive commercial terms), flag this immediately as a RED and recommend full contract review instead +- For NDAs that are part of a larger agreement (e.g., confidentiality section in an MSA), note that the broader agreement context may affect the analysis +- Always note that this is a screening tool and counsel should review any items the user is uncertain about diff --git a/legal/commands/vendor-check.md b/legal/skills/vendor-check/SKILL.md similarity index 92% rename from legal/commands/vendor-check.md rename to legal/skills/vendor-check/SKILL.md index 147997c1..d21d7465 100644 --- a/legal/commands/vendor-check.md +++ b/legal/skills/vendor-check/SKILL.md @@ -1,11 +1,12 @@ --- -description: Check the status of existing agreements with a vendor across all connected systems +name: vendor-check +description: Check the status of existing agreements with a vendor across all connected systems — CLM, CRM, email, and document storage — with gap analysis and upcoming deadlines. Use when onboarding or renewing a vendor, when you need a consolidated view of what's signed and what's missing (MSA, DPA, SOW), or when checking for approaching expirations and surviving obligations. argument-hint: "[vendor name]" --- # /vendor-check -- Vendor Agreement Status -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). +> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../../CONNECTORS.md). Check the status of existing agreements with a vendor across all connected systems. Provides a consolidated view of the legal relationship. diff --git a/marketing/.claude-plugin/plugin.json b/marketing/.claude-plugin/plugin.json index 3e202c0f..b1485cd4 100644 --- a/marketing/.claude-plugin/plugin.json +++ b/marketing/.claude-plugin/plugin.json @@ -1,6 +1,6 @@ { "name": "marketing", - "version": "1.1.0", + "version": "1.2.0", "description": "Create content, plan campaigns, and analyze performance across marketing channels. Maintain brand voice consistency, track competitors, and report on what's working.", "author": { "name": "Anthropic" diff --git a/marketing/.mcp.json b/marketing/.mcp.json index ea26a900..c66b1bbe 100644 --- a/marketing/.mcp.json +++ b/marketing/.mcp.json @@ -20,6 +20,14 @@ "type": "http", "url": "https://mcp.amplitude.com/mcp" }, + "amplitude-eu": { + "type": "http", + "url": "https://mcp.eu.amplitude.com/mcp" + }, + "mixpanel": { + "type": "http", + "url": "https://mcp.mixpanel.com/mcp" + }, "notion": { "type": "http", "url": "https://mcp.notion.com/mcp" @@ -36,6 +44,10 @@ "type": "http", "url": "https://mcp.klaviyo.com/mcp" }, + "supermetrics": { + "type": "http", + "url": "https://mcp.supermetrics.com/mcp" + }, "google-calendar": { "type": "http", "url": "https://gcal.mcp.claude.com/mcp" diff --git a/marketing/CONNECTORS.md b/marketing/CONNECTORS.md index 5013d8ba..4486e303 100644 --- a/marketing/CONNECTORS.md +++ b/marketing/CONNECTORS.md @@ -13,7 +13,8 @@ Plugins are **tool-agnostic** — they describe workflows in terms of categories | Chat | `~~chat` | Slack | Microsoft Teams | | Design | `~~design` | Canva, Figma | Adobe Creative Cloud | | Marketing automation | `~~marketing automation` | HubSpot | Marketo, Pardot, Mailchimp | -| Product analytics | `~~product analytics` | Amplitude | Mixpanel, Google Analytics | +| Product analytics | `~~product analytics` | Amplitude, Mixpanel | Google Analytics | | Knowledge base | `~~knowledge base` | Notion | Confluence, Guru | | SEO | `~~SEO` | Ahrefs, Similarweb | Semrush, Moz | | Email marketing | `~~email marketing` | Klaviyo | Mailchimp, Brevo, Customer.io | +| Marketing analytics | `~~marketing analytics` | Supermetrics | Google Analytics, Mailchimp, Semrush | diff --git a/marketing/README.md b/marketing/README.md index 6adcdd27..f3598562 100644 --- a/marketing/README.md +++ b/marketing/README.md @@ -82,7 +82,9 @@ This plugin works with the following MCP servers: - **Figma** — Access design files and brand assets - **HubSpot** — Pull campaign data, manage contacts, and track marketing automation - **Amplitude** — Pull product analytics and user behavior data for performance reporting +- **Mixpanel** — Pull product analytics, funnels, and user behavior data for performance reporting - **Notion** — Access briefs, style guides, and campaign documents - **Ahrefs** — SEO keyword research, backlink analysis, and site audits - **Similarweb** — Competitive traffic analysis and market benchmarking - **Klaviyo** — Draft and review email marketing sequences and campaigns +- **Supermetrics** — Pull marketing data from multiple platforms for analytics and reporting diff --git a/marketing/commands/brand-review.md b/marketing/commands/brand-review.md deleted file mode 100644 index 436ee1a6..00000000 --- a/marketing/commands/brand-review.md +++ /dev/null @@ -1,122 +0,0 @@ ---- -description: Review content against your brand voice, style guide, and messaging pillars -argument-hint: "" ---- - -# Brand Review - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Review marketing content against brand voice, style guidelines, and messaging standards. Flag deviations and provide specific improvement suggestions. - -## Trigger - -User runs `/brand-review` or asks to review, check, or audit content against brand guidelines. - -## Inputs - -1. **Content to review** — accept content in any of these forms: - - Pasted directly into the conversation - - A file path or ~~knowledge base reference (e.g. Notion page, shared doc) - - A URL to a published page - - Multiple pieces for batch review - -2. **Brand guidelines source** (determined automatically): - - If a brand style guide is configured in local settings, use it automatically - - If not configured, ask: "Do you have a brand style guide or voice guidelines I should review against? You can paste them, share a file, or describe your brand voice. Otherwise, I'll do a general review for clarity, consistency, and professionalism." - -## Review Process - -### With Brand Guidelines Configured - -Evaluate the content against each of these dimensions: - -#### Voice and Tone -- Does the content match the defined brand voice attributes? -- Is the tone appropriate for the content type and audience? -- Are there shifts in voice that feel inconsistent? -- Flag specific sentences or phrases that deviate with an explanation of why - -#### Terminology and Language -- Are preferred brand terms used correctly? -- Are any "avoid" terms or phrases present? -- Is jargon level appropriate for the target audience? -- Are product names, feature names, and branded terms used correctly (capitalization, formatting)? - -#### Messaging Pillars -- Does the content align with defined messaging pillars or value propositions? -- Are claims consistent with approved messaging? -- Is the content reinforcing or contradicting brand positioning? - -#### Style Guide Compliance -- Grammar and punctuation per style guide (e.g., Oxford comma, title case vs. sentence case) -- Formatting conventions (headers, lists, emphasis) -- Number formatting, date formatting -- Acronym usage (defined on first use?) - -### Without Brand Guidelines (Generic Review) - -Evaluate the content for: - -#### Clarity -- Is the main message clear within the first paragraph? -- Are sentences concise and easy to understand? -- Is the structure logical and easy to follow? -- Are there ambiguous statements or unclear references? - -#### Consistency -- Is the tone consistent throughout? -- Are terms used consistently (no switching between synonyms for the same concept)? -- Is formatting consistent (headers, lists, capitalization)? - -#### Professionalism -- Is the content free of typos, grammatical errors, and awkward phrasing? -- Is the tone appropriate for the intended audience? -- Are claims supported or substantiated? - -### Legal and Compliance Flags (Always Checked) - -Regardless of whether brand guidelines are configured, flag: -- **Unsubstantiated claims** — superlatives ("best", "fastest", "only") without evidence or qualification -- **Missing disclaimers** — financial claims, health claims, or guarantees that may need legal disclaimers -- **Comparative claims** — comparisons to competitors that could be challenged -- **Regulatory language** — content that may need compliance review (financial services, healthcare, etc.) -- **Testimonial issues** — quotes or endorsements without attribution or disclosure -- **Copyright concerns** — content that appears to be closely paraphrased from other sources - -## Output Format - -Present the review as: - -### Summary -- Overall assessment: how well the content aligns with brand standards (or general quality) -- 1-2 sentence summary of the biggest strengths -- 1-2 sentence summary of the most important improvements - -### Detailed Findings - -For each issue found, provide: - -| Issue | Location | Severity | Suggestion | -|-------|----------|----------|------------| - -Where severity is: -- **High** — contradicts brand voice, contains compliance risk, or significantly undermines messaging -- **Medium** — inconsistent with guidelines but not damaging -- **Low** — minor style or preference issue - -### Revised Sections - -For the top 3-5 highest-severity issues, provide a before/after showing the original text and a suggested revision. - -### Legal/Compliance Flags - -List any legal or compliance concerns separately with recommended actions. - -## After Review - -Ask: "Would you like me to: -- Revise the full content with these suggestions applied? -- Focus on fixing just the high-severity issues? -- Review additional content against the same guidelines? -- Help you document your brand voice for future reviews?" diff --git a/marketing/commands/campaign-plan.md b/marketing/commands/campaign-plan.md deleted file mode 100644 index 36acebbb..00000000 --- a/marketing/commands/campaign-plan.md +++ /dev/null @@ -1,120 +0,0 @@ ---- -description: Generate a full campaign brief with objectives, channels, content calendar, and success metrics -argument-hint: "" ---- - -# Campaign Plan - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Generate a comprehensive marketing campaign brief with objectives, audience, messaging, channel strategy, content calendar, and success metrics. - -## Trigger - -User runs `/campaign-plan` or asks to plan, design, or build a marketing campaign. - -## Inputs - -Gather the following from the user. If not provided, ask before proceeding: - -1. **Campaign goal** — the primary objective (e.g., drive signups, increase awareness, launch a product, generate leads, re-engage churned users) - -2. **Target audience** — who the campaign is aimed at (demographics, roles, industries, pain points, buying stage) - -3. **Timeline** — campaign duration and any fixed dates (launch date, event date, seasonal deadline) - -4. **Budget range** — approximate budget or budget tier (optional; if not provided, generate a channel-agnostic plan and note where budget allocation would matter) - -5. **Additional context** (optional): - - Product or service being promoted - - Key differentiators or value propositions - - Previous campaign performance or learnings - - Brand guidelines or constraints - - Geographic focus - -## Campaign Brief Structure - -Generate a campaign brief with the following sections: - -### 1. Campaign Overview -- Campaign name suggestion -- One-sentence campaign summary -- Primary objective with a specific, measurable goal -- Secondary objectives (if applicable) - -### 2. Target Audience -- Primary audience segment with description -- Secondary audience segment (if applicable) -- Audience pain points and motivations -- Where they spend time (channels, communities, publications) -- Buying stage alignment (awareness, consideration, decision) - -### 3. Key Messages -- Core campaign message (one sentence) -- 3-4 supporting messages tailored to audience pain points -- Message variations by channel (if different tones are needed) -- Proof points or evidence to support each message - -### 4. Channel Strategy -Recommend channels based on audience and goal. For each channel, include: -- Why this channel fits the audience and objective -- Content format recommendations -- Estimated effort level (low, medium, high) -- Budget allocation suggestion (if budget was provided) - -Consider channels from: -- Owned: blog, email, website, social media profiles -- Earned: PR, influencer partnerships, guest posts, community engagement -- Paid: search ads, social ads, display, sponsored content, events - -### 5. Content Calendar -Create a week-by-week (or day-by-day for short campaigns) content calendar: -- What content to produce each week -- Which channel each piece targets -- Key milestones and deadlines -- Dependencies between pieces (e.g., "landing page must be live before paid ads launch") - -Format as a table: - -| Week | Content Piece | Channel | Owner/Notes | Status | -|------|--------------|---------|-------------|--------| - -### 6. Content Pieces Needed -List every content asset required for the campaign: -- Asset name and type (blog post, email, social post, ad creative, landing page, etc.) -- Brief description of what it should contain -- Priority (must-have vs. nice-to-have) -- Suggested timeline for creation - -### 7. Success Metrics -Define KPIs aligned to the campaign objective: -- Primary KPI with target number -- Secondary KPIs (3-5) -- How each metric will be tracked -- Reporting cadence recommendation - -If ~~product analytics is connected, reference any available historical performance benchmarks to inform targets. - -### 8. Budget Allocation (if budget provided) -- Breakdown by channel or activity -- Production costs vs. distribution costs -- Contingency recommendation (typically 10-15%) - -### 9. Risks and Mitigations -- 2-3 potential risks (timeline, audience mismatch, channel underperformance) -- Mitigation strategy for each - -### 10. Next Steps -- Immediate action items to kick off the campaign -- Stakeholder approvals needed -- Key decision points - -## Output - -Present the full campaign brief with clear headings and formatting. After the brief, ask: - -"Would you like me to: -- Dive deeper into any section? -- Draft specific content pieces from the calendar? -- Create a competitive analysis to inform the messaging? -- Adjust the plan for a different budget or timeline?" diff --git a/marketing/commands/competitive-brief.md b/marketing/commands/competitive-brief.md deleted file mode 100644 index 7162e59e..00000000 --- a/marketing/commands/competitive-brief.md +++ /dev/null @@ -1,134 +0,0 @@ ---- -description: Research competitors and generate a positioning and messaging comparison -argument-hint: "" ---- - -# Competitive Brief - -> If you see unfamiliar placeholders or need to check which tools are connected, see [CONNECTORS.md](../CONNECTORS.md). - -Research competitors and generate a structured competitive analysis comparing positioning, messaging, content strategy, and market presence. - -## Trigger - -User runs `/competitive-brief` or asks for a competitive analysis, competitor research, or market comparison. - -## Inputs - -Gather the following from the user: - -1. **Competitor name(s)** — one or more competitors to analyze (required) - -2. **Your company/product context** (optional but recommended): - - What you sell and to whom - - Your positioning or value proposition - - Key differentiators you want to highlight - -3. **Focus areas** (optional — if not specified, cover all): - - Messaging and positioning - - Product and feature comparison - - Content and thought leadership strategy - - Recent announcements and news - - Pricing and packaging (if publicly available) - - Market presence and audience - -## Research Process - -For each competitor, research using web search: - -1. **Company website** — homepage messaging, product pages, about page, pricing page -2. **Recent news** — press releases, funding announcements, product launches, partnerships (last 6 months) -3. **Content strategy** — blog topics, resource types, social media presence, webinars, podcasts -4. **Review sites and comparisons** — third-party comparisons, analyst mentions, customer review themes -5. **Job postings** — hiring signals that indicate strategic direction (optional) - -## Competitive Brief Structure - -### 1. Executive Summary -- 2-3 sentence overview of the competitive landscape -- Key takeaway: your biggest opportunity and biggest threat - -### 2. Competitor Profiles - -For each competitor: - -#### Company Overview -- What they do (one-sentence positioning) -- Target audience -- Company size/stage indicators (funding, employee count if available) -- Key recent developments - -#### Messaging Analysis -- Primary tagline or headline -- Core value proposition -- Key messaging themes (3-5) -- Tone and voice characterization -- How they describe the problem they solve - -#### Product/Solution Positioning -- How they categorize their product -- Key features they emphasize -- Claimed differentiators -- Pricing approach (if publicly available) - -#### Content Strategy -- Blog frequency and topics -- Content types produced (ebooks, webinars, case studies, tools) -- Social media presence and engagement approach -- Thought leadership themes -- SEO strategy observations (what terms they appear to target) - -#### Strengths -- What they do well -- Where their messaging resonates -- Competitive advantages - -#### Weaknesses -- Gaps in their messaging or positioning -- Areas where they are vulnerable -- Customer complaints or criticism themes (from reviews) - -### 3. Messaging Comparison Matrix - -| Dimension | Your Company | Competitor A | Competitor B | -|-----------|-------------|--------------|--------------| -| Primary tagline | ... | ... | ... | -| Target buyer | ... | ... | ... | -| Key differentiator | ... | ... | ... | -| Tone/voice | ... | ... | ... | -| Core value prop | ... | ... | ... | - -(Include user's company only if they provided their positioning context) - -### 4. Content Gap Analysis -- Topics your competitors cover that you do not (or vice versa) -- Content formats they use that you could adopt -- Keywords or themes they own vs. opportunities they have missed - -### 5. Opportunities -- Positioning gaps you can exploit -- Messaging angles your competitors have not claimed -- Audience segments they are underserving -- Content or channel opportunities - -### 6. Threats -- Areas where competitors are strong and you are vulnerable -- Trends that favor their positioning -- Recent moves that could shift the market - -### 7. Recommended Actions -- 3-5 specific, actionable recommendations based on the analysis -- Quick wins (things you can act on this week) -- Strategic moves (longer-term positioning or content investments) - -## Output - -Present the full competitive brief with clear formatting. Note the date of the research so the user knows the freshness of the data. - -After the brief, ask: - -"Would you like me to: -- Create a battlecard for your sales team based on this analysis? -- Draft messaging that exploits the positioning gaps identified? -- Dive deeper into any specific competitor? -- Set up a competitive monitoring plan?" diff --git a/marketing/commands/performance-report.md b/marketing/commands/performance-report.md deleted file mode 100644 index 86d53be7..00000000 --- a/marketing/commands/performance-report.md +++ /dev/null @@ -1,163 +0,0 @@ ---- -description: Build a marketing performance report with key metrics, trends, and optimization recommendations -argument-hint: "