diff --git a/notebooks/en/fine_tuning_vlm_trl.ipynb b/notebooks/en/fine_tuning_vlm_trl.ipynb index 29b5851d..a072b773 100644 --- a/notebooks/en/fine_tuning_vlm_trl.ipynb +++ b/notebooks/en/fine_tuning_vlm_trl.ipynb @@ -607,7 +607,6 @@ " if 'model' in globals(): del globals()['model']\n", " if 'processor' in globals(): del globals()['processor']\n", " if 'trainer' in globals(): del globals()['trainer']\n", - " if 'peft_model' in globals(): del globals()['peft_model']\n", " if 'bnb_config' in globals(): del globals()['bnb_config']\n", " time.sleep(2)\n", "\n", @@ -738,17 +737,9 @@ "id": "ITmkRHWCKYjf", "outputId": "49440aaf-89a4-4810-ad40-eafe4582bab3" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "trainable params: 2,523,136 || all params: 8,293,898,752 || trainable%: 0.0304\n" - ] - } - ], + "outputs": [], "source": [ - "from peft import LoraConfig, get_peft_model\n", + "from peft import LoraConfig\n", "\n", "# Configure LoRA\n", "peft_config = LoraConfig(\n", @@ -758,13 +749,7 @@ " bias=\"none\",\n", " target_modules=[\"q_proj\", \"v_proj\"],\n", " task_type=\"CAUSAL_LM\",\n", - ")\n", - "\n", - "# Apply PEFT model adaptation\n", - "peft_model = get_peft_model(model, peft_config)\n", - "\n", - "# Print trainable parameters\n", - "peft_model.print_trainable_parameters()" + ")" ] }, { diff --git a/notebooks/zh-CN/fine_tuning_vlm_trl.ipynb b/notebooks/zh-CN/fine_tuning_vlm_trl.ipynb index a9527746..9f4b2629 100644 --- a/notebooks/zh-CN/fine_tuning_vlm_trl.ipynb +++ b/notebooks/zh-CN/fine_tuning_vlm_trl.ipynb @@ -1120,7 +1120,6 @@ " if 'model' in globals(): del globals()['model']\n", " if 'processor' in globals(): del globals()['processor']\n", " if 'trainer' in globals(): del globals()['trainer']\n", - " if 'peft_model' in globals(): del globals()['peft_model']\n", " if 'bnb_config' in globals(): del globals()['bnb_config']\n", " time.sleep(2)\n", "\n", @@ -1243,17 +1242,9 @@ "id": "ITmkRHWCKYjf", "outputId": "3ca824c9-4aca-4d5b-e942-7a1705939e08" }, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "trainable params: 2,523,136 || all params: 8,293,898,752 || trainable%: 0.0304\n" - ] - } - ], + "outputs": [], "source": [ - "from peft import LoraConfig, get_peft_model\n", + "from peft import LoraConfig\n", "\n", "# Configure LoRA\n", "peft_config = LoraConfig(\n", @@ -1263,13 +1254,7 @@ " bias=\"none\",\n", " target_modules=[\"q_proj\", \"v_proj\"],\n", " task_type=\"CAUSAL_LM\",\n", - ")\n", - "\n", - "# Apply PEFT model adaptation\n", - "peft_model = get_peft_model(model, peft_config)\n", - "\n", - "# Print trainable parameters\n", - "peft_model.print_trainable_parameters()" + ")" ] }, {