From e61ff00fae5d74d9e66752f68891c15e6bd8a29d Mon Sep 17 00:00:00 2001 From: Zhimeng Date: Sun, 12 Oct 2025 17:15:43 +1100 Subject: [PATCH 1/9] add lora parameter webUI --- .DS_Store | Bin 0 -> 8196 bytes src/llamafactory/webui/components/train.py | 3 +++ src/llamafactory/webui/locales.py | 22 +++++++++++++++++++++ 3 files changed, 25 insertions(+) create mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store new file mode 100644 index 0000000000000000000000000000000000000000..a2998f56e1df3548862af16146988bcc62da6368 GIT binary patch literal 8196 zcmeHM&ubGw6n@htO+dkbqV=}uNl-C(6O^>Xi&$u_dbRmcnqtC+G%Y>uKOiVxELafq z5Af!-H&N`-g9pL0AidU`-`kmN-pgjAUPRiNF!Ppq@6G$(x6_?{TOu;^o6R!OED;sa zS(Z*<7}HqKJ=Lb%%wFlmm6WHo3iv1o&nE*XTUSy8So7JFAU(!=2px&_pR2C zo&nFmv1EXs4*@#Mij$F*@uLGnTmnGmFda8sm-qrQaI)fLWF<6c#=1gUSK~?yW8EtlV&dpnKZwoARIK!)XTUR%W`Jw=99^X~TBp9M-#;wglDbY< zt=8LN9dpG`&zAPyExe!8`sPG^w zvQ8%)BFVN=cxaFD%utOs@LQ!mZ74Lk&pV@fiDm!Ny#W@rhD4HOqj2OI2JS}_NcvP$ z7+#hxN}Y^<=ED6!{KQwr9;a7vcdv2W8N>~#L65;ePsOJ4{-|EYU%8&dS8GTNSvCp> zMU10}_}gHhLtSd)THgyg_&&}?;yL>@!IN4`Vwz;9aM?o!AGq*iTs5kLsT%db*ds7j zLvPWl=IrE8;QXzR#C>=t+}^Q&SAXC)+~t4cr}Cmj=DUDB-#{+w;CpoEo5GE>g~Ysj z>cL>|jjSV?&bCr`OqcOY15b|{ip|qk-V8M|{Lb$|`fp-yUZpvGflTEUC3583{2b1X1V^y9?Save`NFbiFpP*1E`gA)#d6um~gdqiW9E21N0SiZfqA> y8M|PJ%W=r~O$jpjhas*5n6gjB$;e8~p#Ad?0kRJ9=Re-}>^p})|Ksg=Y=&QAZLKf> literal 0 HcmV?d00001 diff --git a/src/llamafactory/webui/components/train.py b/src/llamafactory/webui/components/train.py index 8b7aa6e946..fc62add0b2 100644 --- a/src/llamafactory/webui/components/train.py +++ b/src/llamafactory/webui/components/train.py @@ -179,6 +179,7 @@ def create_train_tab(engine: "Engine") -> dict[str, "Component"]: use_pissa = gr.Checkbox() lora_target = gr.Textbox(scale=2) additional_target = gr.Textbox(scale=2) + lora_parameters = gr.Textbox(scale=2) input_elems.update( { @@ -192,6 +193,7 @@ def create_train_tab(engine: "Engine") -> dict[str, "Component"]: use_pissa, lora_target, additional_target, + lora_parameters, } ) elem_dict.update( @@ -207,6 +209,7 @@ def create_train_tab(engine: "Engine") -> dict[str, "Component"]: use_pissa=use_pissa, lora_target=lora_target, additional_target=additional_target, + lora_parameters=lora_parameters, ) ) diff --git a/src/llamafactory/webui/locales.py b/src/llamafactory/webui/locales.py index 7051b30e80..73f377120b 100644 --- a/src/llamafactory/webui/locales.py +++ b/src/llamafactory/webui/locales.py @@ -1323,6 +1323,28 @@ "info": "LoRA 層以外の学習可能なモジュールの名前。複数のモジュールを区切るにはカンマを使用します。", }, }, + "lora_parameters": { + "en": { + "label": "LoRA parameters (optional)", + "info": "Name(s) of parameters to apply LoRA. Use commas to separate multiple parameters.", + }, + "ru": { + "label": "Параметры LoRA (необязательно)", + "info": "Имя(ена) параметров для применения LoRA. Используйте запятые для разделения нескольких параметров.", + }, + "zh": { + "label": "LoRA 参数(可选)", + "info": "要应用 LoRA 的参数名称。使用逗号分隔多个参数。", + }, + "ko": { + "label": "LoRA 매개변수 (선택 사항)", + "info": "LoRA를 적용할 매개변수의 이름입니다. 여러 매개변수를 구분하려면 쉼표를 사용하십시오.", + }, + "ja": { + "label": "LoRA パラメータ (オプション)", + "info": "LoRA を適用するパラメータの名前。複数のパラメータを区切るにはカンマを使用します。", + }, + }, "rlhf_tab": { "en": { "label": "RLHF configurations", From ce14b487d9f7a6d7f55e4a6b32146c8ae7dbcf80 Mon Sep 17 00:00:00 2001 From: zhangziheng Date: Sun, 12 Oct 2025 19:55:21 +1100 Subject: [PATCH 2/9] change the finetuning args of lora --- src/llamafactory/hparams/finetuning_args.py | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/src/llamafactory/hparams/finetuning_args.py b/src/llamafactory/hparams/finetuning_args.py index 3130f86e42..4c9286ad9b 100644 --- a/src/llamafactory/hparams/finetuning_args.py +++ b/src/llamafactory/hparams/finetuning_args.py @@ -88,6 +88,16 @@ class LoraArguments: ) }, ) + lora_target_parameters: Optional[str] = field( + default=None, + metadata={ + "help": ( + "Name(s) of nn.Parameters to apply LoRA directly. " + "Use commas to separate multiple parameters. " + "Useful for MoE models with expert parameters." + ) + }, + ) loraplus_lr_ratio: Optional[float] = field( default=None, metadata={"help": "LoRA plus learning rate ratio (lr_B / lr_A)."}, @@ -524,6 +534,7 @@ def split_arg(arg): self.freeze_extra_modules: Optional[list[str]] = split_arg(self.freeze_extra_modules) self.lora_alpha: int = self.lora_alpha or self.lora_rank * 2 self.lora_target: list[str] = split_arg(self.lora_target) + self.lora_target_parameters: Optional[list[str]] = split_arg(self.lora_target_parameters) self.oft_target: list[str] = split_arg(self.oft_target) self.additional_target: Optional[list[str]] = split_arg(self.additional_target) self.galore_target: list[str] = split_arg(self.galore_target) From d202ae91e8f636f5e50eeaa13bfefee4d71b03fd Mon Sep 17 00:00:00 2001 From: zhangziheng Date: Sun, 12 Oct 2025 19:55:50 +1100 Subject: [PATCH 3/9] change the adapter of lora --- src/llamafactory/model/adapter.py | 1 + src/llamafactory/webui/runner.py | 1 + 2 files changed, 2 insertions(+) diff --git a/src/llamafactory/model/adapter.py b/src/llamafactory/model/adapter.py index d9522d39dd..9839619242 100644 --- a/src/llamafactory/model/adapter.py +++ b/src/llamafactory/model/adapter.py @@ -235,6 +235,7 @@ def _setup_lora_tuning( "use_rslora": finetuning_args.use_rslora, "use_dora": finetuning_args.use_dora, "modules_to_save": finetuning_args.additional_target, + "target_parameters": finetuning_args.lora_parameters, } elif finetuning_args.finetuning_type == "oft": peft_kwargs = { diff --git a/src/llamafactory/webui/runner.py b/src/llamafactory/webui/runner.py index 0a6fc7c9aa..0d48acdd23 100644 --- a/src/llamafactory/webui/runner.py +++ b/src/llamafactory/webui/runner.py @@ -212,6 +212,7 @@ def _parse_train_args(self, data: dict["Component", Any]) -> dict[str, Any]: args["pissa_convert"] = get("train.use_pissa") args["lora_target"] = get("train.lora_target") or "all" args["additional_target"] = get("train.additional_target") or None + args["lora_parameters"] = get("train.lora_parameters") or None if args["use_llama_pro"]: args["freeze_trainable_layers"] = get("train.freeze_trainable_layers") From 96f15f05c034d7187c6110165b35af236f5d4135 Mon Sep 17 00:00:00 2001 From: zhangziheng Date: Sun, 19 Oct 2025 17:11:05 +1100 Subject: [PATCH 4/9] change of adapters --- src/llamafactory/hparams/finetuning_args.py | 4 ++-- src/llamafactory/model/adapter.py | 10 ++++++++-- 2 files changed, 10 insertions(+), 4 deletions(-) diff --git a/src/llamafactory/hparams/finetuning_args.py b/src/llamafactory/hparams/finetuning_args.py index 4c9286ad9b..fba48486c6 100644 --- a/src/llamafactory/hparams/finetuning_args.py +++ b/src/llamafactory/hparams/finetuning_args.py @@ -88,7 +88,7 @@ class LoraArguments: ) }, ) - lora_target_parameters: Optional[str] = field( + lora_parameters: Optional[str] = field( default=None, metadata={ "help": ( @@ -534,7 +534,7 @@ def split_arg(arg): self.freeze_extra_modules: Optional[list[str]] = split_arg(self.freeze_extra_modules) self.lora_alpha: int = self.lora_alpha or self.lora_rank * 2 self.lora_target: list[str] = split_arg(self.lora_target) - self.lora_target_parameters: Optional[list[str]] = split_arg(self.lora_target_parameters) + self.lora_parameters: Optional[list[str]] = split_arg(self.lora_parameters) self.oft_target: list[str] = split_arg(self.oft_target) self.additional_target: Optional[list[str]] = split_arg(self.additional_target) self.galore_target: list[str] = split_arg(self.galore_target) diff --git a/src/llamafactory/model/adapter.py b/src/llamafactory/model/adapter.py index 9839619242..8c8bc9753e 100644 --- a/src/llamafactory/model/adapter.py +++ b/src/llamafactory/model/adapter.py @@ -198,8 +198,14 @@ def _setup_lora_tuning( logger.info_rank0("Loaded adapter(s): {}".format(",".join(model_args.adapter_name_or_path))) if is_trainable and adapter_to_resume is None: # create new lora weights while training + target_modules = [] + target_parameters = [] if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all": - target_modules = find_all_linear_modules(model, finetuning_args.freeze_vision_tower) + if finetuning_args.lora_parameters: # if specified the parameters to be adapted, use them + print("Using specified LoRA parameters: ", finetuning_args.lora_parameters) + target_parameters = finetuning_args.lora_parameters + else: + target_modules = find_all_linear_modules(model, finetuning_args.freeze_vision_tower) else: target_modules = finetuning_args.lora_target @@ -235,7 +241,7 @@ def _setup_lora_tuning( "use_rslora": finetuning_args.use_rslora, "use_dora": finetuning_args.use_dora, "modules_to_save": finetuning_args.additional_target, - "target_parameters": finetuning_args.lora_parameters, + "target_parameters": target_parameters, } elif finetuning_args.finetuning_type == "oft": peft_kwargs = { From 8342bf8e89ff6f5ed77f442834c8d4db74eee748 Mon Sep 17 00:00:00 2001 From: zhangziheng Date: Thu, 23 Oct 2025 18:08:01 +1100 Subject: [PATCH 5/9] lora_parameters_tests --- src/llamafactory/train/test_utils.py | 48 +++++++++++++++++++--------- tests/model/test_lora.py | 11 +++++-- 2 files changed, 41 insertions(+), 18 deletions(-) diff --git a/src/llamafactory/train/test_utils.py b/src/llamafactory/train/test_utils.py index 6e4c4ffc28..f0013ce9b1 100644 --- a/src/llamafactory/train/test_utils.py +++ b/src/llamafactory/train/test_utils.py @@ -43,22 +43,40 @@ def compare_model(model_a: "torch.nn.Module", model_b: "torch.nn.Module", diff_k assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5) is True -def check_lora_model(model: "LoraModel") -> tuple[set[str], set[str]]: - linear_modules, extra_modules = set(), set() - for name, param in model.named_parameters(): - if any(module in name for module in ["lora_A", "lora_B"]): - linear_modules.add(name.split(".lora_", maxsplit=1)[0].split(".")[-1]) - assert param.requires_grad is True - assert param.dtype == torch.float32 - elif "modules_to_save" in name: - extra_modules.add(name.split(".modules_to_save", maxsplit=1)[0].split(".")[-1]) - assert param.requires_grad is True - assert param.dtype == torch.float32 - else: - assert param.requires_grad is False - assert param.dtype == torch.float16 +# def check_lora_model(model: "LoraModel") -> tuple[set[str], set[str]]: +# linear_modules, extra_modules = set(), set() +# for name, param in model.named_parameters(): +# if any(module in name for module in ["lora_A", "lora_B"]): +# linear_modules.add(name.split(".lora_", maxsplit=1)[0].split(".")[-1]) +# assert param.requires_grad is True +# assert param.dtype == torch.float32 +# elif "modules_to_save" in name: +# extra_modules.add(name.split(".modules_to_save", maxsplit=1)[0].split(".")[-1]) +# assert param.requires_grad is True +# assert param.dtype == torch.float32 +# else: +# assert param.requires_grad is False +# assert param.dtype == torch.float16 + +# return linear_modules, extra_modules + +def check_lora_model(model: "LoraModel") -> tuple[list[str], list[str], list[str]]: + lora_param_names = [] + injected_param_names = [] + frozen_param_names = [] - return linear_modules, extra_modules + for name, param in model.named_parameters(): + if "lora_A" in name or "lora_B" in name: + lora_param_names.append(name) + base_name = name.split(".lora_", maxsplit=1)[0] + injected_param_names.append(base_name) + elif not param.requires_grad: + frozen_param_names.append(name) + elif hasattr(param, "_is_lora_injected") and getattr(param, "_is_lora_injected"): + injected_param_names.append(name) + + injected_param_names = list(dict.fromkeys(injected_param_names)) + return lora_param_names, injected_param_names, frozen_param_names def load_train_model(add_valuehead: bool = False, **kwargs) -> "PreTrainedModel": diff --git a/tests/model/test_lora.py b/tests/model/test_lora.py index 3d394c33dc..6011e7cf3a 100644 --- a/tests/model/test_lora.py +++ b/tests/model/test_lora.py @@ -63,19 +63,19 @@ def fix_valuehead_cpu_loading(): def test_lora_train_qv_modules(): model = load_train_model(lora_target="q_proj,v_proj", **TRAIN_ARGS) - linear_modules, _ = check_lora_model(model) + linear_modules, _, _ = check_lora_model(model) assert linear_modules == {"q_proj", "v_proj"} def test_lora_train_all_modules(): model = load_train_model(lora_target="all", **TRAIN_ARGS) - linear_modules, _ = check_lora_model(model) + linear_modules, _, _ = check_lora_model(model) assert linear_modules == {"q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"} def test_lora_train_extra_modules(): model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS) - _, extra_modules = check_lora_model(model) + _, extra_modules, _ = check_lora_model(model) assert extra_modules == {"embed_tokens", "lm_head"} @@ -91,6 +91,11 @@ def test_lora_train_new_adapters(): compare_model( model, ref_model, diff_keys=["q_proj", "k_proj", "v_proj", "o_proj", "up_proj", "gate_proj", "down_proj"] ) + +def test_lora_parameters(): + model = load_train_model(lora_parameters="q_proj.weight, k_proj.weight", **TRAIN_ARGS) + _, _, injected_parameters = check_lora_model(model) + assert injected_parameters == {"q_proj.weight", "k_proj.weight"} @pytest.mark.usefixtures("fix_valuehead_cpu_loading") From 282026b648ebb1f1f328e96253c58520acb5c80e Mon Sep 17 00:00:00 2001 From: zhangziheng Date: Thu, 23 Oct 2025 18:08:10 +1100 Subject: [PATCH 6/9] aaa --- ...45\220\215\347\273\230\345\233\276.drawio" | 88 +++++++++++++++++++ 1 file changed, 88 insertions(+) create mode 100644 "\346\234\252\345\221\275\345\220\215\347\273\230\345\233\276.drawio" diff --git "a/\346\234\252\345\221\275\345\220\215\347\273\230\345\233\276.drawio" "b/\346\234\252\345\221\275\345\220\215\347\273\230\345\233\276.drawio" new file mode 100644 index 0000000000..e19dafb73e --- /dev/null +++ "b/\346\234\252\345\221\275\345\220\215\347\273\230\345\233\276.drawio" @@ -0,0 +1,88 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + From 770bf8ed22bee4dd2293edf3b8e68687cf89e653 Mon Sep 17 00:00:00 2001 From: zhangziheng Date: Sat, 25 Oct 2025 15:11:36 +1100 Subject: [PATCH 7/9] changed the implementation of func check_lora_model --- src/llamafactory/model/adapter.py | 2 +- src/llamafactory/train/test_utils.py | 53 +++++------ tests/model/test_lora.py | 4 +- ...45\220\215\347\273\230\345\233\276.drawio" | 88 ------------------- 4 files changed, 23 insertions(+), 124 deletions(-) delete mode 100644 "\346\234\252\345\221\275\345\220\215\347\273\230\345\233\276.drawio" diff --git a/src/llamafactory/model/adapter.py b/src/llamafactory/model/adapter.py index 8c8bc9753e..10c4f9707f 100644 --- a/src/llamafactory/model/adapter.py +++ b/src/llamafactory/model/adapter.py @@ -202,7 +202,7 @@ def _setup_lora_tuning( target_parameters = [] if len(finetuning_args.lora_target) == 1 and finetuning_args.lora_target[0] == "all": if finetuning_args.lora_parameters: # if specified the parameters to be adapted, use them - print("Using specified LoRA parameters: ", finetuning_args.lora_parameters) + logger.info_rank0("Using specified LoRA parameters: {}", finetuning_args.lora_parameters) target_parameters = finetuning_args.lora_parameters else: target_modules = find_all_linear_modules(model, finetuning_args.freeze_vision_tower) diff --git a/src/llamafactory/train/test_utils.py b/src/llamafactory/train/test_utils.py index f0013ce9b1..94cc19b27b 100644 --- a/src/llamafactory/train/test_utils.py +++ b/src/llamafactory/train/test_utils.py @@ -43,41 +43,28 @@ def compare_model(model_a: "torch.nn.Module", model_b: "torch.nn.Module", diff_k assert torch.allclose(state_dict_a[name], state_dict_b[name], rtol=1e-4, atol=1e-5) is True -# def check_lora_model(model: "LoraModel") -> tuple[set[str], set[str]]: -# linear_modules, extra_modules = set(), set() -# for name, param in model.named_parameters(): -# if any(module in name for module in ["lora_A", "lora_B"]): -# linear_modules.add(name.split(".lora_", maxsplit=1)[0].split(".")[-1]) -# assert param.requires_grad is True -# assert param.dtype == torch.float32 -# elif "modules_to_save" in name: -# extra_modules.add(name.split(".modules_to_save", maxsplit=1)[0].split(".")[-1]) -# assert param.requires_grad is True -# assert param.dtype == torch.float32 -# else: -# assert param.requires_grad is False -# assert param.dtype == torch.float16 - -# return linear_modules, extra_modules - -def check_lora_model(model: "LoraModel") -> tuple[list[str], list[str], list[str]]: - lora_param_names = [] - injected_param_names = [] - frozen_param_names = [] - +def check_lora_model(model: "LoraModel") -> tuple[set[str], set[str]]: + linear_modules, linear_parameters, extra_modules = set(), set(), set() for name, param in model.named_parameters(): - if "lora_A" in name or "lora_B" in name: - lora_param_names.append(name) - base_name = name.split(".lora_", maxsplit=1)[0] - injected_param_names.append(base_name) - elif not param.requires_grad: - frozen_param_names.append(name) - elif hasattr(param, "_is_lora_injected") and getattr(param, "_is_lora_injected"): - injected_param_names.append(name) - - injected_param_names = list(dict.fromkeys(injected_param_names)) - return lora_param_names, injected_param_names, frozen_param_names + if any(module in name for module in ["lora_A", "lora_B"]): + linear_modules.add(name.split(".lora_", maxsplit=1)[0].split(".")[-1]) + parts = name.split(".") + for i, part in enumerate(parts): + if "lora_" in part: + short_name = parts[i - 1] + "." + parts[-1] + linear_parameters.add(short_name) + break + assert param.requires_grad is True + assert param.dtype == torch.float32 + elif "modules_to_save" in name: + extra_modules.add(name.split(".modules_to_save", maxsplit=1)[0].split(".")[-1]) + assert param.requires_grad is True + assert param.dtype == torch.float32 + else: + assert param.requires_grad is False + assert param.dtype == torch.float16 + return linear_modules, linear_parameters, extra_modules def load_train_model(add_valuehead: bool = False, **kwargs) -> "PreTrainedModel": model_args, _, _, finetuning_args, _ = get_train_args(kwargs) diff --git a/tests/model/test_lora.py b/tests/model/test_lora.py index 6011e7cf3a..73dbc06322 100644 --- a/tests/model/test_lora.py +++ b/tests/model/test_lora.py @@ -75,7 +75,7 @@ def test_lora_train_all_modules(): def test_lora_train_extra_modules(): model = load_train_model(additional_target="embed_tokens,lm_head", **TRAIN_ARGS) - _, extra_modules, _ = check_lora_model(model) + _, _, extra_modules = check_lora_model(model) assert extra_modules == {"embed_tokens", "lm_head"} @@ -94,7 +94,7 @@ def test_lora_train_new_adapters(): def test_lora_parameters(): model = load_train_model(lora_parameters="q_proj.weight, k_proj.weight", **TRAIN_ARGS) - _, _, injected_parameters = check_lora_model(model) + _, injected_parameters, _ = check_lora_model(model) assert injected_parameters == {"q_proj.weight", "k_proj.weight"} diff --git "a/\346\234\252\345\221\275\345\220\215\347\273\230\345\233\276.drawio" "b/\346\234\252\345\221\275\345\220\215\347\273\230\345\233\276.drawio" deleted file mode 100644 index e19dafb73e..0000000000 --- "a/\346\234\252\345\221\275\345\220\215\347\273\230\345\233\276.drawio" +++ /dev/null @@ -1,88 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - From 03ba68413618de50d2e4753ee197ea6ccb4fd32c Mon Sep 17 00:00:00 2001 From: zhangziheng Date: Sat, 25 Oct 2025 21:54:39 +1100 Subject: [PATCH 8/9] conflict lora parameters test --- tests/model/test_lora.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/tests/model/test_lora.py b/tests/model/test_lora.py index 73dbc06322..59c5f88ceb 100644 --- a/tests/model/test_lora.py +++ b/tests/model/test_lora.py @@ -96,6 +96,12 @@ def test_lora_parameters(): model = load_train_model(lora_parameters="q_proj.weight, k_proj.weight", **TRAIN_ARGS) _, injected_parameters, _ = check_lora_model(model) assert injected_parameters == {"q_proj.weight", "k_proj.weight"} + +def test_lora_target_and_parameters_conflicts(): + model = load_train_model(lora_parameters="q_proj.weight",lora_target="q_proj,v_proj", **TRAIN_ARGS) + linear_modules, injected_parameters, _ = check_lora_model(model) + assert injected_parameters == {"q_proj.weight", "v_proj.weight"} + assert linear_modules == {"q_proj", "v_proj"} @pytest.mark.usefixtures("fix_valuehead_cpu_loading") From 18418ba6345d2a831c31cea57ffe85af1a652938 Mon Sep 17 00:00:00 2001 From: Ziheng-Zhang-AUS Date: Sun, 26 Oct 2025 20:08:47 +1100 Subject: [PATCH 9/9] Delete .DS_Store --- .DS_Store | Bin 8196 -> 0 bytes 1 file changed, 0 insertions(+), 0 deletions(-) delete mode 100644 .DS_Store diff --git a/.DS_Store b/.DS_Store deleted file mode 100644 index a2998f56e1df3548862af16146988bcc62da6368..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8196 zcmeHM&ubGw6n@htO+dkbqV=}uNl-C(6O^>Xi&$u_dbRmcnqtC+G%Y>uKOiVxELafq z5Af!-H&N`-g9pL0AidU`-`kmN-pgjAUPRiNF!Ppq@6G$(x6_?{TOu;^o6R!OED;sa zS(Z*<7}HqKJ=Lb%%wFlmm6WHo3iv1o&nE*XTUSy8So7JFAU(!=2px&_pR2C zo&nFmv1EXs4*@#Mij$F*@uLGnTmnGmFda8sm-qrQaI)fLWF<6c#=1gUSK~?yW8EtlV&dpnKZwoARIK!)XTUR%W`Jw=99^X~TBp9M-#;wglDbY< zt=8LN9dpG`&zAPyExe!8`sPG^w zvQ8%)BFVN=cxaFD%utOs@LQ!mZ74Lk&pV@fiDm!Ny#W@rhD4HOqj2OI2JS}_NcvP$ z7+#hxN}Y^<=ED6!{KQwr9;a7vcdv2W8N>~#L65;ePsOJ4{-|EYU%8&dS8GTNSvCp> zMU10}_}gHhLtSd)THgyg_&&}?;yL>@!IN4`Vwz;9aM?o!AGq*iTs5kLsT%db*ds7j zLvPWl=IrE8;QXzR#C>=t+}^Q&SAXC)+~t4cr}Cmj=DUDB-#{+w;CpoEo5GE>g~Ysj z>cL>|jjSV?&bCr`OqcOY15b|{ip|qk-V8M|{Lb$|`fp-yUZpvGflTEUC3583{2b1X1V^y9?Save`NFbiFpP*1E`gA)#d6um~gdqiW9E21N0SiZfqA> y8M|PJ%W=r~O$jpjhas*5n6gjB$;e8~p#Ad?0kRJ9=Re-}>^p})|Ksg=Y=&QAZLKf>