Skip to content

Commit 21b3f7e

Browse files
Fix multi lora saving (#148)
1 parent dca29d4 commit 21b3f7e

File tree

1 file changed

+7
-4
lines changed

1 file changed

+7
-4
lines changed

src/twinkle/model/multi_lora.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -478,12 +478,15 @@ def _loader(self):
478478

479479
def save_lora_converter(self, name, parameter, adapter_name):
480480
_lora = self.find_lora(adapter_name)
481-
pattern = re.compile(rf'\.lora_\w+\.{adapter_name}\.')
482-
pattern_no_adapter = re.compile(r'\.lora_\w+\.weight')
483-
if (pattern.search(name) or pattern_no_adapter.search(name)) and self.match_target_modules(
481+
# Skip weights belonging to OTHER adapters
482+
if re.search(r'\.lora_\w+\.\w+\.', name) and not re.search(rf'\.lora_\w+\.{adapter_name}\.', name):
483+
return None
484+
if re.search(rf'\.lora_\w+\.({adapter_name}|weight)', name) and self.match_target_modules(
484485
name, _lora.tenant_config.target_modules):
485486
_param = torch_util.to_local_tensor(parameter)
486-
if 'embedding_A' in name:
487+
if _param is None:
488+
pass
489+
elif 'embedding_A' in name:
487490
_param = _param[:, :_lora.tenant_config.r]
488491
elif 'embedding_B' in name:
489492
_param = _param[:_lora.tenant_config.r, :]

0 commit comments

Comments
 (0)