Skip to content

"KeyError" when running example code #33

@chricke

Description

@chricke

I tried to run the demo code from https://huggingface.co/KRLabsOrg/lettucedect-610m-eurobert-de-v1 but get a KeyError when loading the model.

`from lettucedetect.models.inference import HallucinationDetector

detector = HallucinationDetector(
method="transformer",
model_path="KRLabsOrg/lettucedect-610m-eurobert-de-v1",
lang="de",
trust_remote_code=True
)`

After downloading the model and files it returns this every time I try to use the model:

`---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
Cell In[5], line 1
----> 1 detector = HallucinationDetector(
2 method="transformer",
3 model_path="KRLabsOrg/lettucedect-610m-eurobert-de-v1",
4 lang="de",
5 trust_remote_code=True
6 )

File ~/miniconda3/envs/hallucination_detection_312/lib/python3.12/site-packages/lettucedetect/models/inference.py:20, in HallucinationDetector.init(self, method, **kwargs)
19 def init(self, method: str = "transformer", **kwargs):
---> 20 self.detector = make_detector(method, **kwargs)

File ~/miniconda3/envs/hallucination_detection_312/lib/python3.12/site-packages/lettucedetect/detectors/factory.py:21, in make_detector(method, **kwargs)
18 if method == "transformer":
19 from lettucedetect.detectors.transformer import TransformerDetector
---> 21 return TransformerDetector(**kwargs)
22 elif method == "llm":
23 from lettucedetect.detectors.llm import LLMDetector

File ~/miniconda3/envs/hallucination_detection_312/lib/python3.12/site-packages/lettucedetect/detectors/transformer.py:33, in TransformerDetector.init(self, model_path, max_length, device, lang, **tok_kwargs)
31 self.lang, self.max_length = lang, max_length
32 self.tokenizer = AutoTokenizer.from_pretrained(model_path, **tok_kwargs)
---> 33 self.model = AutoModelForTokenClassification.from_pretrained(model_path, **tok_kwargs)
34 self.device = device or (
35 torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
36 )
37 self.model.to(self.device).eval()

File ~/miniconda3/envs/hallucination_detection_312/lib/python3.12/site-packages/transformers/models/auto/auto_factory.py:367, in _BaseAutoModelClass.from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs)
365 model_class.register_for_auto_class(auto_class=cls)
366 model_class = add_generation_mixin_to_remote_model(model_class)
--> 367 return model_class.from_pretrained(
368 pretrained_model_name_or_path, *model_args, config=config, **hub_kwargs, **kwargs
369 )
370 elif type(config) in cls._model_mapping:
371 model_class = _get_model_class(config, cls._model_mapping)

File ~/miniconda3/envs/hallucination_detection_312/lib/python3.12/site-packages/transformers/modeling_utils.py:4021, in PreTrainedModel.from_pretrained(cls, pretrained_model_name_or_path, config, cache_dir, ignore_mismatched_sizes, force_download, local_files_only, token, revision, use_safetensors, weights_only, *model_args, **kwargs)
4018 config = copy.deepcopy(config) # We do not want to modify the config inplace in from_pretrained.
4019 with ContextManagers(model_init_context):
4020 # Let's make sure we don't run the init function of buffer modules
-> 4021 model = cls(config, *model_args, **model_kwargs)
4023 if hf_quantizer is not None: # replace module with quantized modules (does not touch weights)
4024 hf_quantizer.preprocess_model(
4025 model=model,
4026 dtype=dtype,
(...) 4029 use_kernels=use_kernels,
4030 )

File ~/.cache/huggingface/modules/transformers_modules/EuroBERT/EuroBERT_hyphen_610m/d9af784ed20db6c2096e335ec6a67dd4a219924c/modeling_eurobert.py:892, in EuroBertForTokenClassification.init(self, config)
890 super().init(config)
891 self.num_labels = config.num_labels
--> 892 self.model = EuroBertModel(config)
894 self.classifier = nn.Linear(config.hidden_size, config.num_labels)
895 self.post_init()

File ~/.cache/huggingface/modules/transformers_modules/EuroBERT/EuroBERT_hyphen_610m/d9af784ed20db6c2096e335ec6a67dd4a219924c/modeling_eurobert.py:484, in EuroBertModel.init(self, config)
480 self.layers = nn.ModuleList(
481 [EuroBertDecoderLayer(config, layer_idx) for layer_idx in range(config.num_hidden_layers)]
482 )
483 self.norm = EuroBertRMSNorm(config.hidden_size, eps=config.rms_norm_eps)
--> 484 self.rotary_emb = EuroBertRotaryEmbedding(config=config)
485 self.gradient_checkpointing = False
486 self.mask_converter = AttentionMaskConverter(is_causal=False)

File ~/.cache/huggingface/modules/transformers_modules/EuroBERT/EuroBERT_hyphen_610m/d9af784ed20db6c2096e335ec6a67dd4a219924c/modeling_eurobert.py:268, in EuroBertRotaryEmbedding.init(self, config, device)
265 self.original_max_seq_len = config.max_position_embeddings
267 self.config = config
--> 268 self.rope_init_fn = ROPE_INIT_FUNCTIONS[self.rope_type]
270 inv_freq, self.attention_scaling = self.rope_init_fn(self.config, device)
271 self.register_buffer("inv_freq", inv_freq, persistent=False)

KeyError: 'default'`

Metadata

Metadata

Assignees

No one assigned

    Labels

    No labels
    No labels

    Type

    No type

    Projects

    No projects

    Milestone

    No milestone

    Relationships

    None yet

    Development

    No branches or pull requests

    Issue actions