Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Original file line number Diff line number Diff line change
Expand Up @@ -232,6 +232,7 @@ llm.sync.anthropic.api-key=
#RAG
codecrow.rag.api.url=http://host.docker.internal:8001
codecrow.rag.api.enabled=true
codecrow.rag.api.secret=change-me-to-a-random-secret
# RAG API timeouts (in seconds)
codecrow.rag.api.timeout.connect=30
codecrow.rag.api.timeout.read=120
Expand Down
11 changes: 11 additions & 0 deletions deployment/config/mcp-client/.env.sample
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,17 @@ AI_CLIENT_PORT=8000
RAG_ENABLED=true
RAG_API_URL=http://host.docker.internal:8001

# === Service-to-Service Auth ===
# Shared secret for authenticating requests between internal services.
# Must match the SERVICE_SECRET configured on rag-pipeline.
# Leave empty to disable auth (dev mode only).
# IMPORTANT: Avoid $ { } characters in the secret — they can cause dotenv parsing issues.
SERVICE_SECRET=change-me-to-a-random-secret

# === Concurrency ===
# Max parallel review requests handled simultaneously (default: 4)
MAX_CONCURRENT_REVIEWS=4

# API access for Platform MCP (internal network only)
CODECROW_API_URL=http://codecrow-web-application:8081

Expand Down
30 changes: 30 additions & 0 deletions deployment/config/rag-pipeline/.env.sample
Original file line number Diff line number Diff line change
@@ -1,7 +1,37 @@
# === Service-to-Service Auth ===
# Shared secret for authenticating incoming requests from mcp-client.
# Must match the SERVICE_SECRET configured on mcp-client.
# Leave empty to disable auth (dev mode only).
# IMPORTANT: Avoid $ { } characters in the secret — they can cause dotenv parsing issues.
SERVICE_SECRET=change-me-to-a-random-secret

# === Path Traversal Guard ===
# Root directory that repo_path arguments are allowed under.
# The rag-pipeline will reject any index/query request whose resolved
# path escapes this directory. Default: /tmp
ALLOWED_REPO_ROOT=/tmp

#QDRANT configuration
QDRANT_URL=http://qdrant:6333
QDRANT_COLLECTION_PREFIX=codecrow

# ollama/openrouter
EMBEDDING_PROVIDER=openrouter
OLLAMA_BASE_URL=http://localhost:11434
OLLAMA_EMBEDDING_MODEL=qwen3-embedding:0.6b

# Ollama Performance Tuning
# Batch size for embedding requests (higher = better throughput, more memory)
OLLAMA_BATCH_SIZE=100
# Request timeout in seconds (increase for slow CPU)
OLLAMA_TIMEOUT=120

# CPU Threading Optimization (set based on your CPU cores)
# Recommended: physical_cores - 1 (leave 1 core for system)
OMP_NUM_THREADS=6
MKL_NUM_THREADS=6
OPENBLAS_NUM_THREADS=6

# OpenRouter Configuration
# Get your API key from https://openrouter.ai/
OPENROUTER_API_KEY=sk-or-v1-your-api-key-here
Expand Down
17 changes: 17 additions & 0 deletions deployment/config/web-frontend/.env.sample
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,23 @@ SERVER_PORT=8080

VITE_BLOG_URL=http://localhost:8083

# ============================================================================
# Feature Flags (Cloud-specific features - disabled by default for OSS)
# ============================================================================
# Set to "true" to enable cloud features. Leave empty or "false" to disable.
#
# VITE_FEATURE_BILLING - Enables billing page, subscription management, payment methods
VITE_FEATURE_BILLING=false
#
# VITE_FEATURE_CLOUD_PLANS - Enables cloud subscription plans (Pro, Pro+, Enterprise)
VITE_FEATURE_CLOUD_PLANS=false
#
# VITE_FEATURE_USAGE_ANALYTICS - Enables usage tracking and quota management
VITE_FEATURE_USAGE_ANALYTICS=false
#
# VITE_FEATURE_ENTERPRISE - Enables SSO, SAML, and advanced team management
VITE_FEATURE_ENTERPRISE=false


# New Relic Browser Monitoring (Optional - leave empty to disable)
# Get these values from: https://one.newrelic.com -> Browser -> Add data -> Browser monitoring
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -23,6 +23,7 @@
exports org.rostilos.codecrow.analysisengine.aiclient;
exports org.rostilos.codecrow.analysisengine.config;
exports org.rostilos.codecrow.analysisengine.dto.request.ai;
exports org.rostilos.codecrow.analysisengine.dto.request.ai.enrichment;
exports org.rostilos.codecrow.analysisengine.dto.request.processor;
exports org.rostilos.codecrow.analysisengine.dto.request.validation;
exports org.rostilos.codecrow.analysisengine.exception;
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
package org.rostilos.codecrow.analysisengine.dto.request.ai;

Check failure on line 1 in java-ecosystem/libs/analysis-engine/src/main/java/org/rostilos/codecrow/analysisengine/dto/request/ai/AiAnalysisRequestImpl.java

View check run for this annotation

CodeCrow-Local / CodeCrow Analysis

HIGH severity issue

The deduplication logic in 'withAllPrAnalysesData' fails to merge resolved status when the input list 'allPrAnalyses' is sorted DESC (newest first), as specified in the Javadoc. In DESC order, the first issue seen for a fingerprint is the newest. Subsequent (older) issues will have a lower 'currentVersion' than the 'existingVersion' in the map, causing the 'if (currentVersion > existingVersion)' block to be skipped. Consequently, if an older version was resolved but the newest is open, the re...
Raw output
Suggested fix:
Sort the input list in ascending order (oldest first) before processing, or adjust the logic to handle DESC order by checking if the older (current) issue is resolved when the existing (newer) one is not.

import com.fasterxml.jackson.annotation.JsonProperty;
import org.rostilos.codecrow.analysisengine.dto.request.ai.enrichment.PrEnrichmentDataDto;
import org.rostilos.codecrow.core.model.ai.AIConnection;
import org.rostilos.codecrow.core.model.ai.AIProviderKey;
import org.rostilos.codecrow.core.model.codeanalysis.AnalysisMode;
Expand Down Expand Up @@ -44,6 +45,9 @@
protected final String deltaDiff;
protected final String previousCommitHash;
protected final String currentCommitHash;

// File enrichment data (full file contents + dependency graph)
protected final PrEnrichmentDataDto enrichmentData;

protected AiAnalysisRequestImpl(Builder<?> builder) {
this.projectId = builder.projectId;
Expand Down Expand Up @@ -74,6 +78,8 @@
this.deltaDiff = builder.deltaDiff;
this.previousCommitHash = builder.previousCommitHash;
this.currentCommitHash = builder.currentCommitHash;
// File enrichment data
this.enrichmentData = builder.enrichmentData;
}

public Long getProjectId() {
Expand Down Expand Up @@ -181,6 +187,10 @@
return currentCommitHash;
}

public PrEnrichmentDataDto getEnrichmentData() {
return enrichmentData;
}


public static Builder<?> builder() {
return new Builder<>();
Expand Down Expand Up @@ -216,6 +226,8 @@
private String deltaDiff;
private String previousCommitHash;
private String currentCommitHash;
// File enrichment data
private PrEnrichmentDataDto enrichmentData;

protected Builder() {
}
Expand Down Expand Up @@ -349,7 +361,7 @@
private String computeIssueFingerprint(AiRequestPreviousIssueDTO issue) {
String file = issue.file() != null ? issue.file() : "";
// Normalize line to nearest multiple of 3 for tolerance
int lineGroup = issue.line() != null ? (issue.line() / 3) : 0;

Check warning on line 364 in java-ecosystem/libs/analysis-engine/src/main/java/org/rostilos/codecrow/analysisengine/dto/request/ai/AiAnalysisRequestImpl.java

View check run for this annotation

CodeCrow-Local / CodeCrow Analysis

MEDIUM severity issue

The line grouping logic 'issue.line() / 3' does not implement the '±3 tolerance' mentioned in the Javadoc. This creates fixed buckets (e.g., lines 1-2 in bucket 0, lines 3-5 in bucket 1). Adjacent lines like 2 and 3 will fall into different buckets and fail to match, while lines 3 and 5 will match despite being further apart. This makes issue tracking across commits jittery.
Raw output
Suggested fix:
Consider using a larger bucket or a more flexible matching algorithm if true tolerance is required. At minimum, update the Javadoc to reflect that it uses fixed buckets of size 3.
String severity = issue.severity() != null ? issue.severity() : "";
String reasonPrefix = issue.reason() != null
? issue.reason().substring(0, Math.min(50, issue.reason().length())).toLowerCase().trim()
Expand Down Expand Up @@ -461,6 +473,11 @@
return self();
}

public T withEnrichmentData(PrEnrichmentDataDto enrichmentData) {
this.enrichmentData = enrichmentData;
return self();
}

public AiAnalysisRequestImpl build() {
return new AiAnalysisRequestImpl(this);
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,46 @@
package org.rostilos.codecrow.analysisengine.dto.request.ai.enrichment;

/**
* DTO representing the content of a single file retrieved from VCS.
* Used for file enrichment during PR analysis to provide full file context.
*/
public record FileContentDto(
String path,
String content,
long sizeBytes,
boolean skipped,
String skipReason
) {
/**
* Create a successful file content result.
*/
public static FileContentDto of(String path, String content) {
return new FileContentDto(
path,
content,
content != null ? content.getBytes().length : 0,

Check warning on line 21 in java-ecosystem/libs/analysis-engine/src/main/java/org/rostilos/codecrow/analysisengine/dto/request/ai/enrichment/FileContentDto.java

View check run for this annotation

CodeCrow-Local / CodeCrow Analysis

MEDIUM severity issue

String.getBytes() uses the platform's default charset. This can cause inconsistent file size reporting if the analysis engine runs on systems with different default encodings (e.g., UTF-16 vs UTF-8). Since VCS content is typically UTF-8, it should be explicitly specified.
Raw output
Suggested fix:
Specify StandardCharsets.UTF_8 when calling getBytes().
false,
null
);
}

/**
* Create a skipped file result (e.g., file too large, binary, or fetch failed).
*/
public static FileContentDto skipped(String path, String reason) {
return new FileContentDto(path, null, 0, true, reason);
}

/**
* Create a skipped file result due to size limit.
*/
public static FileContentDto skippedDueToSize(String path, long actualSize, long maxSize) {
return new FileContentDto(
path,
null,
actualSize,
true,
String.format("File size %d bytes exceeds limit %d bytes", actualSize, maxSize)
);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,90 @@
package org.rostilos.codecrow.analysisengine.dto.request.ai.enrichment;

/**
* DTO representing a relationship between two files in the PR.
* Used for building the dependency graph for intelligent batching.
*/
public record FileRelationshipDto(
String sourceFile,
String targetFile,
RelationshipType relationshipType,
String matchedOn,
int strength
) {
/**
* Types of relationships between files.
*/
public enum RelationshipType {
IMPORTS,
EXTENDS,
IMPLEMENTS,
CALLS,
SAME_PACKAGE,
REFERENCES
}

/**
* Create an import relationship.
*/
public static FileRelationshipDto imports(String sourceFile, String targetFile, String importStatement) {
return new FileRelationshipDto(
sourceFile,
targetFile,
RelationshipType.IMPORTS,
importStatement,
10 // High strength for direct imports
);
}

/**
* Create an extends relationship.
*/
public static FileRelationshipDto extendsClass(String sourceFile, String targetFile, String className) {
return new FileRelationshipDto(
sourceFile,
targetFile,
RelationshipType.EXTENDS,
className,
15 // Highest strength for inheritance
);
}

/**
* Create an implements relationship.
*/
public static FileRelationshipDto implementsInterface(String sourceFile, String targetFile, String interfaceName) {
return new FileRelationshipDto(
sourceFile,
targetFile,
RelationshipType.IMPLEMENTS,
interfaceName,
15 // Highest strength for interface implementation
);
}

/**
* Create a calls relationship.
*/
public static FileRelationshipDto calls(String sourceFile, String targetFile, String methodName) {
return new FileRelationshipDto(
sourceFile,
targetFile,
RelationshipType.CALLS,
methodName,
8 // Medium-high strength for method calls
);
}

/**
* Create a same-package relationship.
*/
public static FileRelationshipDto samePackage(String sourceFile, String targetFile, String packageName) {
return new FileRelationshipDto(
sourceFile,
targetFile,
RelationshipType.SAME_PACKAGE,
packageName,
3 // Low strength for implicit package relationship
);
}
}
Original file line number Diff line number Diff line change
@@ -0,0 +1,70 @@
package org.rostilos.codecrow.analysisengine.dto.request.ai.enrichment;

import com.fasterxml.jackson.annotation.JsonIgnoreProperties;
import com.fasterxml.jackson.annotation.JsonProperty;

import java.util.List;

/**
* DTO representing parsed AST metadata for a single file.
* Mirrors the response from RAG pipeline's /parse endpoint.
*/
@JsonIgnoreProperties(ignoreUnknown = true)
public record ParsedFileMetadataDto(
@JsonProperty("path") String path,
@JsonProperty("language") String language,
@JsonProperty("imports") List<String> imports,
@JsonProperty("extends") List<String> extendsClasses,
@JsonProperty("implements") List<String> implementsInterfaces,
@JsonProperty("semantic_names") List<String> semanticNames,
@JsonProperty("parent_class") String parentClass,
@JsonProperty("namespace") String namespace,
@JsonProperty("calls") List<String> calls,
@JsonProperty("error") String error
) {
/**
* Create a metadata result with only imports and extends (minimal parsing).
*/
public static ParsedFileMetadataDto minimal(String path, List<String> imports, List<String> extendsClasses) {
return new ParsedFileMetadataDto(
path,
null,
imports,
extendsClasses,
List.of(),
List.of(),
null,
null,
List.of(),
null
);
}

/**
* Create an error result for a file that couldn't be parsed.
*/
public static ParsedFileMetadataDto error(String path, String errorMessage) {
return new ParsedFileMetadataDto(
path,
null,
List.of(),
List.of(),
List.of(),
List.of(),
null,
null,
List.of(),
errorMessage
);
}

/**
* Check if this metadata has any relationships to extract.
*/
public boolean hasRelationships() {
return (imports != null && !imports.isEmpty()) ||
(extendsClasses != null && !extendsClasses.isEmpty()) ||
(implementsInterfaces != null && !implementsInterfaces.isEmpty()) ||
(calls != null && !calls.isEmpty());
}
}
Loading
Loading