codeShare commited on
Commit
0f0c3bc
·
verified ·
1 Parent(s): 6eeb20b

Upload T5_encoding_test.ipynb

Browse files
Files changed (1) hide show
  1. T5_encoding_test.ipynb +1 -1
T5_encoding_test.ipynb CHANGED
@@ -1 +1 @@
1
- {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_encoding_test.ipynb","timestamp":1753506570273}],"authorship_tag":"ABX9TyP1U1pJP0UrPl9WWyhvWuOQ"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","source":["!pip install transformers"],"metadata":{"id":"Q2jmuaxxF4ev"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown Use the T5 encoder only\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5Model\n","import torch\n","\n","# Step 3: Load the T5 tokenizer and model\n","# You can use 't5-small', 't5-base', 't5-large', etc. 't5-small' is lighter for Colab\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5Model.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define the input string\n","input_string = \"Studies have shown that owning a dog is good for you\" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input string to get token IDs\n","input_ids = tokenizer(input_string, return_tensors=\"pt\").input_ids\n","print(\"Token IDs:\", input_ids)\n","\n","# Step 6: (Optional) Get hidden state embeddings\n","# Ensure the model is in evaluation mode\n","model.eval()\n","\n","# Forward pass to get encoder outputs\n","with torch.no_grad():\n"," outputs = model.encoder(input_ids=input_ids)\n"," encoder_hidden_states = outputs.last_hidden_state\n","\n","# Print the shape of the hidden states\n","print(\"Encoder Hidden States Shape:\", encoder_hidden_states.shape)\n","# Example: Shape will be [batch_size, sequence_length, hidden_size], e.g., [1, num_tokens, 768] for t5-base\n","\n","# Step 7: (Optional) Decode token IDs back to text for verification\n","decoded_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)\n","print(\"Decoded Text:\", decoded_text)"],"metadata":{"id":"jT1UmiK8_jHs"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown 🇫🇷 Translate using the T5 model <br>\n","# @markdown Note: NOT a FLUX feature since FLUX only uses the T5 encoder!\n","\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5ForConditionalGeneration\n","\n","# Step 3: Load the T5 tokenizer and model\n","# Use 't5-base' for balance; 't5-small' for speed, or 't5-large' for better performance\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5ForConditionalGeneration.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define the input string with the instruction\n","input_string = \"translate to French: The sun is shining today.\" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input string\n","input_ids = tokenizer(input_string, return_tensors=\"pt\").input_ids\n","\n","# Step 6: Generate the output\n","model.eval()\n","with torch.no_grad():\n"," outputs = model.generate(input_ids, max_length=50)\n"," translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n","\n","# Step 7: Print the result\n","print(\"Translated Text:\", translated_text)\n","\n"],"metadata":{"id":"lovIkU-uDLPn"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown ⚖️ Compare Similiarity\n","\n","# Step 1: Install required libraries\n","!pip install transformers torch\n","\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5Model\n","import torch\n","import torch.nn.functional as F\n","\n","# Step 3: Load T5 tokenizer and model\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5Model.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define input strings\n","text1 = \"a photo The sun is shining today\" # @param {type:'string'}\n","text2 = \"anime screencap The sun is shining today \" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input strings\n","inputs1 = tokenizer(text1, return_tensors=\"pt\", padding=True, truncation=True)\n","inputs2 = tokenizer(text2, return_tensors=\"pt\", padding=True, truncation=True)\n","\n","# Step 6: Get T5 encoder hidden states\n","model.eval()\n","with torch.no_grad():\n"," # Get encoder outputs for both inputs\n"," outputs1 = model.encoder(input_ids=inputs1.input_ids)\n"," outputs2 = model.encoder(input_ids=inputs2.input_ids)\n","\n"," # Extract last hidden states [batch_size, sequence_length, hidden_size]\n"," hidden_states1 = outputs1.last_hidden_state\n"," hidden_states2 = outputs2.last_hidden_state\n","\n","# Step 7: Aggregate hidden states (mean pooling)\n","# Average across the sequence dimension to get a single vector per input\n","embedding1 = hidden_states1.mean(dim=1) # Shape: [1, hidden_size]\n","embedding2 = hidden_states2.mean(dim=1) # Shape: [1, hidden_size]\n","\n","# Step 8: Compute cosine similarity\n","cosine_sim = F.cosine_similarity(embedding1, embedding2, dim=1)\n","print(\"Cosine Similarity:\", cosine_sim.item())\n","\n","# Step 9: (Optional) Print token IDs for reference\n","print(\"Token IDs for text1:\", inputs1.input_ids)\n","print(\"Token IDs for text2:\", inputs2.input_ids)"],"metadata":{"id":"XPymy3EwByMQ"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["#!git clone https://github.com/Tans37/Multimodal-Image-Captioning.git\n","\n","%cd /content/Multimodal-Image-Captioning\n","\n","!pip install -r requirements.txt\n","\n"],"metadata":{"id":"R-g0wne2K030"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown Import list of Tensor Art posts as 150Mb size .parquet file (SFW only)\n","!pip install -U datasets\n","from datasets import load_dataset\n","ds = load_dataset(\"bigdata-pw/tensorart\")\n"],"metadata":{"id":"ZwwIE7dBOGtt"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown Browse images on Tensor Art (SFW only)\n","\n","# Example: Access the first item in the dataset (adjust based on dataset structure)\n","\n","from IPython.display import Image, display\n","start_at_K= 48 # @param {type:'slider',min:0,max:270}\n","\n","start_at = 50 # @param {type:'slider',min:0,max:999}\n","travel = 100\n","url =''\n","START_AT = 1000*start_at_K+start_at\n","for index in range(START_AT+travel):\n"," if index<START_AT:continue\n"," item = ds['train'][index]['generationData']\n","\n","\n"," if item:\n"," prompt=''\n"," url=''\n"," for key in item:\n"," if item[key]:\n"," subitem = item[key]\n"," for subkey in subitem:\n"," #print(subkey)\n"," if subkey=='prompt': prompt=subitem[subkey]\n"," #-----#\n"," #------#\n"," if prompt != '':\n"," print(f'at index = {index}: {prompt}')\n"," try:\n"," image_url = ds['train'][index]['url']\n","\n"," display(Image(url=ds['train'][index]['url']))\n"," except:\n"," print(\"No image URL found in the variable 'url'.\")\n"," break\n"," #-------#\n","\n"],"metadata":{"colab":{"base_uri":"https://localhost:8080/","height":1000},"id":"iFyoLiq4Pano","executionInfo":{"status":"ok","timestamp":1753460527129,"user_tz":-120,"elapsed":46,"user":{"displayName":"No Name","userId":"10578412414437288386"}},"outputId":"a1e01c95-e69b-4b3d-c87d-7b558fbc1192"},"execution_count":null,"outputs":[{"output_type":"stream","name":"stdout","text":["at index = 48050: A sun-drenched garden gazebo provides the picturesque backdrop for this whimsical scene. A playful woman with bunny ears and an hourglass figure sits at a wooden table, her purple locks adorned with various fake animal ears that dangle like ornaments. Her parted bangs frame her face with delicate precision, drawing attention to her bright smile. Wearing a vibrant yellow off-the-shoulder shirt and a black miniskirt, she exudes carefree charm. A cup with a drinking straw held casually in one hand adds a touch of effortless elegance, while her earrings glint in the warm daylight.\n"]},{"output_type":"display_data","data":{"text/html":["<img src=\"https://image.tensorartassets.com/posts/images/619665233061022599/13dea14e-d350-4b81-9865-ca6626fcd199.png\"/>"],"text/plain":["<IPython.core.display.Image object>"]},"metadata":{}}]}]}
 
1
+ {"nbformat":4,"nbformat_minor":0,"metadata":{"colab":{"provenance":[{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_encoding_test.ipynb","timestamp":1753784751931},{"file_id":"https://huggingface.co/datasets/codeShare/lora-training-data/blob/main/T5_encoding_test.ipynb","timestamp":1753506570273}],"authorship_tag":"ABX9TyPrayIBQD6IWp9FEuabFuiO"},"kernelspec":{"name":"python3","display_name":"Python 3"},"language_info":{"name":"python"}},"cells":[{"cell_type":"code","source":["!pip install transformers"],"metadata":{"id":"Q2jmuaxxF4ev"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown Use the T5 encoder only\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5Model\n","import torch\n","\n","# Step 3: Load the T5 tokenizer and model\n","# You can use 't5-small', 't5-base', 't5-large', etc. 't5-small' is lighter for Colab\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5Model.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define the input string\n","input_string = \"Studies have shown that owning a dog is good for you\" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input string to get token IDs\n","input_ids = tokenizer(input_string, return_tensors=\"pt\").input_ids\n","print(\"Token IDs:\", input_ids)\n","\n","# Step 6: (Optional) Get hidden state embeddings\n","# Ensure the model is in evaluation mode\n","model.eval()\n","\n","# Forward pass to get encoder outputs\n","with torch.no_grad():\n"," outputs = model.encoder(input_ids=input_ids)\n"," encoder_hidden_states = outputs.last_hidden_state\n","\n","# Print the shape of the hidden states\n","print(\"Encoder Hidden States Shape:\", encoder_hidden_states.shape)\n","# Example: Shape will be [batch_size, sequence_length, hidden_size], e.g., [1, num_tokens, 768] for t5-base\n","\n","# Step 7: (Optional) Decode token IDs back to text for verification\n","decoded_text = tokenizer.decode(input_ids[0], skip_special_tokens=True)\n","print(\"Decoded Text:\", decoded_text)"],"metadata":{"id":"jT1UmiK8_jHs"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown 🇫🇷 Translate using the T5 model <br>\n","# @markdown Note: NOT a FLUX feature since FLUX only uses the T5 encoder!\n","\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5ForConditionalGeneration\n","\n","# Step 3: Load the T5 tokenizer and model\n","# Use 't5-base' for balance; 't5-small' for speed, or 't5-large' for better performance\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5ForConditionalGeneration.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define the input string with the instruction\n","input_string = \"translate to French: The sun is shining today.\" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input string\n","input_ids = tokenizer(input_string, return_tensors=\"pt\").input_ids\n","\n","# Step 6: Generate the output\n","model.eval()\n","with torch.no_grad():\n"," outputs = model.generate(input_ids, max_length=50)\n"," translated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)\n","\n","# Step 7: Print the result\n","print(\"Translated Text:\", translated_text)\n","\n"],"metadata":{"id":"lovIkU-uDLPn"},"execution_count":null,"outputs":[]},{"cell_type":"code","source":["# @markdown ⚖️ Compare Similiarity\n","\n","# Step 1: Install required libraries\n","!pip install transformers torch\n","\n","# Step 2: Import necessary libraries\n","from transformers import T5Tokenizer, T5Model\n","import torch\n","import torch.nn.functional as F\n","\n","# Step 3: Load T5 tokenizer and model\n","tokenizer = T5Tokenizer.from_pretrained(\"t5-base\")\n","model = T5Model.from_pretrained(\"t5-base\")\n","\n","# Step 4: Define input strings\n","text1 = \"a photo The sun is shining today\" # @param {type:'string'}\n","text2 = \"anime screencap The sun is shining today \" # @param {type:'string'}\n","\n","# Step 5: Tokenize the input strings\n","inputs1 = tokenizer(text1, return_tensors=\"pt\", padding=True, truncation=True)\n","inputs2 = tokenizer(text2, return_tensors=\"pt\", padding=True, truncation=True)\n","\n","# Step 6: Get T5 encoder hidden states\n","model.eval()\n","with torch.no_grad():\n"," # Get encoder outputs for both inputs\n"," outputs1 = model.encoder(input_ids=inputs1.input_ids)\n"," outputs2 = model.encoder(input_ids=inputs2.input_ids)\n","\n"," # Extract last hidden states [batch_size, sequence_length, hidden_size]\n"," hidden_states1 = outputs1.last_hidden_state\n"," hidden_states2 = outputs2.last_hidden_state\n","\n","# Step 7: Aggregate hidden states (mean pooling)\n","# Average across the sequence dimension to get a single vector per input\n","embedding1 = hidden_states1.mean(dim=1) # Shape: [1, hidden_size]\n","embedding2 = hidden_states2.mean(dim=1) # Shape: [1, hidden_size]\n","\n","# Step 8: Compute cosine similarity\n","cosine_sim = F.cosine_similarity(embedding1, embedding2, dim=1)\n","print(\"Cosine Similarity:\", cosine_sim.item())\n","\n","# Step 9: (Optional) Print token IDs for reference\n","print(\"Token IDs for text1:\", inputs1.input_ids)\n","print(\"Token IDs for text2:\", inputs2.input_ids)"],"metadata":{"id":"XPymy3EwByMQ"},"execution_count":null,"outputs":[]}]}