diff --git a/src/twinkle/model/multi_lora.py b/src/twinkle/model/multi_lora.py index df60e9f4..d8be4832 100644 --- a/src/twinkle/model/multi_lora.py +++ b/src/twinkle/model/multi_lora.py @@ -478,12 +478,15 @@ def _loader(self): def save_lora_converter(self, name, parameter, adapter_name): _lora = self.find_lora(adapter_name) - pattern = re.compile(rf'\.lora_\w+\.{adapter_name}\.') - pattern_no_adapter = re.compile(r'\.lora_\w+\.weight') - if (pattern.search(name) or pattern_no_adapter.search(name)) and self.match_target_modules( + # Skip weights belonging to OTHER adapters + if re.search(r'\.lora_\w+\.\w+\.', name) and not re.search(rf'\.lora_\w+\.{adapter_name}\.', name): + return None + if re.search(rf'\.lora_\w+\.({adapter_name}|weight)', name) and self.match_target_modules( name, _lora.tenant_config.target_modules): _param = torch_util.to_local_tensor(parameter) - if 'embedding_A' in name: + if _param is None: + pass + elif 'embedding_A' in name: _param = _param[:, :_lora.tenant_config.r] elif 'embedding_B' in name: _param = _param[:_lora.tenant_config.r, :]