Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion README.md
Original file line number Diff line number Diff line change
Expand Up @@ -246,7 +246,7 @@ If you find this repository useful in your work, please cite:
author = {Cory Cornelius and Marius Arvinte and Sebastian Szyller and Weilin Xu and Nageen Himayat},
title = {{LLMart}: {L}arge {L}anguage {M}odel adversarial robutness toolbox},
url = {http://github.com/IntelLabs/LLMart},
version = {2026.01},
version = {2026.01.1},
year = {2026},
}
```
2 changes: 1 addition & 1 deletion examples/basic/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
jupyterlab[docs-screenshots]==4.3.5
jupyterlab[docs-screenshots]==4.4.8
fire==0.7.0
2 changes: 1 addition & 1 deletion examples/fact_checking/claim.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ def attack(suffix=6, n_swaps=1024, n_tokens=1, num_steps=1000, per_device_bs=64)

# Attack pipeline
adv_pipe = pipeline(
"adv-text-generation",
"adv-text-generation", # type: ignore[reportArgumentType]
model=pipe.model.requires_grad_(False),
tokenizer=pipe.tokenizer,
attack=AttackPrompt(suffix=suffix),
Expand Down
2 changes: 1 addition & 1 deletion examples/fact_checking/document.py
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ def attack(

# Attack pipeline
adv_pipe = pipeline(
"adv-text-generation",
"adv-text-generation", # type: ignore[reportArgumentType]
model=pipe.model.requires_grad_(False),
tokenizer=pipe.tokenizer,
attack=AttackPrompt(
Expand Down
2 changes: 1 addition & 1 deletion examples/llmguard/whitebox.py
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,7 @@ def attack(

# We're running a suffix attack with 2 adversarial tokens
adv_classifier = pipeline(
task="adv-text-classification",
task="adv-text-classification", # type: ignore[reportArgumentType]
model=classifier.model,
tokenizer=classifier.tokenizer,
attack=AttackPrompt(suffix=suffix_length, default_token=suffix_init),
Expand Down
2 changes: 1 addition & 1 deletion examples/random_strings/whitebox.py
Original file line number Diff line number Diff line change
Expand Up @@ -71,7 +71,7 @@ def attack(
) -> tuple[bool, tuple[str, str]]:
torch.manual_seed(seed)
adv_generator = pipeline(
task="adv-text-generation",
task="adv-text-generation", # type: ignore[reportArgumentType]
model=generator.model,
tokenizer=generator.tokenizer,
attack=AttackPrompt(prefix=num_tokens, default_token=" @", prefix_pad_right=""),
Expand Down
2 changes: 1 addition & 1 deletion examples/unlearning/whitebox.py
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ def attack(
) -> tuple[bool, tuple[str, str]]:
torch.manual_seed(seed)
adv_generator = pipeline(
task="adv-text-generation",
task="adv-text-generation", # type: ignore[reportArgumentType]
model=generator.model,
tokenizer=generator.tokenizer,
attack=AttackPrompt(prefix=prefix_len, default_token=" @"),
Expand Down
2 changes: 1 addition & 1 deletion examples/vlm/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -184,7 +184,7 @@ def main(
)

adv_pipe = pipeline( # pyright: ignore [reportArgumentType]
task="adv-image-text-to-loss",
task="adv-image-text-to-loss", # type: ignore[reportArgumentType]
model=pipe.model,
# do not reuse the processor, because we will add the TaggedTokenizer in the processor.
processor=AutoProcessor.from_pretrained(
Expand Down
14 changes: 7 additions & 7 deletions pyproject.toml
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
[project]
name = "llmart"
version = "2026.01"
version = "2026.01.1"
description = "LLMart"
authors = []
readme = "README.md"
Expand All @@ -14,19 +14,19 @@ core = [
"hydra-core==1.3.2",
"hydra-colorlog==1.2.0",
"accelerate==1.6.0",
"transformers[sentencepiece,torch-vision]==4.52.4",
"transformers[sentencepiece,torch-vision]==4.53.0",
"tensorboard==2.18.0",
"datasets==3.1.0",
]

gpu = [
"torch==2.7.0",
"torch==2.8.0",
"llmart[core]",
]

xpu = [
"torch==2.7.0+xpu",
"pytorch-triton-xpu==3.3.0",
"torch==2.8.0+xpu",
"pytorch-triton-xpu==3.4.0",
"llmart[core]",
]

Expand All @@ -38,8 +38,8 @@ dev = [
]

[build-system]
requires = ["hatchling"]
build-backend = "hatchling.build"
requires = ["uv_build>=0.9.28,<0.10.0"]
build-backend = "uv_build"

[tool.basedpyright]
venvPath = "."
Expand Down
2 changes: 1 addition & 1 deletion src/llmart/attack.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,7 +128,7 @@ def run_attack(cfg: config.LLMartConf) -> dict:

# Load demo models
pipe = pipeline(
task=cfg.model.task,
task=cfg.model.task, # type: ignore[reportArgumentType]
model=cfg.model.name,
revision=cfg.model.revision,
device=cfg.model.device,
Expand Down
2 changes: 1 addition & 1 deletion test/test_pipelines_text_generation.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@
@pytest.fixture
def pipe():
pipe = pipeline(
"adv-text-generation",
"adv-text-generation", # type: ignore[reportArgumentType]
model="hf-internal-testing/tiny-gpt2-with-chatml-template",
attack=AttackPrompt(suffix=20),
model_kwargs=dict(),
Expand Down
653 changes: 376 additions & 277 deletions uv.lock

Large diffs are not rendered by default.