{ "nbformat": 4, "nbformat_minor": 0, "metadata": { "colab": { "provenance": [] }, "kernelspec": { "name": "python3", "display_name": "Python 3" }, "language_info": { "name": "python" } }, "cells": [ { "cell_type": "markdown", "source": [ "# Receipt" ], "metadata": { "id": "wEKMGOFvSV_V" } }, { "cell_type": "code", "execution_count": null, "metadata": { "id": "iGlVFh9Yf1ar" }, "outputs": [], "source": [ "import random\n", "import math\n", "\n", "vocabulary = [\n", " \"Apple\", \"Banana\", \"Coffee\", \"Dumpling\", \"Eggs\", \"Fries\", \"Garlic\", \"Ham\",\n", " \"Ice cream\", \"Juice\", \"Ketchup\", \"Lemon\", \"Milk\", \"Noodles\", \"Orange\",\n", " \"Pasta\", \"Quinoa\", \"Rice\", \"Salad\", \"Tea\", \"Udon\", \"Vinegar\", \"Water\", \"Yogurt\",\n", " \"Bread\", \"Cheese\", \"Donuts\", \"Espresso\", \"Fish\", \"Grapes\", \"Honey\",\n", " \"Jam\", \"Kiwi\", \"Lobster\", \"Mango\", \"Nuts\", \"Oatmeal\", \"Pizza\", \"Ramen\",\n", " \"Soda\", \"Tuna\", \"Vanilla\", \"Wine\", \"Zucchini\", \"Steak\", \"Burger\", \"Chicken\",\n", " \"Pork\", \"Beef\", \"Lamb\", \"Tofu\", \"Avocado\", \"Tomato\", \"Potato\", \"Carrot\",\n", " \"Broccoli\", \"Cauliflower\", \"Spinach\", \"Lettuce\", \"Cucumber\", \"Onion\",\n", " \"Bottled water\", \"Sparkling water\", \"Green tea\", \"Black tea\", \"Beer\", \"Wine\",\n", " \"Whiskey\", \"Vodka\", \"Rum\", \"Gin\", \"Tequila\", \"Cocktail\", \"Smoothie\", \"Milkshake\",\n", " \"Shampoo\", \"Conditioner\", \"Soap\", \"Toothpaste\", \"Toothbrush\", \"Floss\", \"Mouthwash\",\n", " \"Detergent\", \"Fabric softener\", \"Bleach\", \"Disinfectant\", \"Sponge\", \"Brush\",\n", " \"Toilet paper\", \"Paper towel\", \"Tissues\", \"Napkins\", \"Trash bags\", \"Vacuum cleaner\",\n", " \"Mop\", \"Broom\", \"Dustpan\", \"Duster\", \"Wipes\", \"Air freshener\", \"Candle\",\n", " \"Light bulb\", \"Batteries\", \"Extension cord\", \"Plug adapter\", \"Hanger\",\n", " \"Laundry basket\", \"Iron\", \"Ironing board\", \"Scissors\", \"Tape\", \"Glue\",\n", " \"Nail clipper\", \"Razor\", \"Shaving cream\", \"Deodorant\", \"Perfume\", \"Cologne\",\n", " \"Lotion\", \"Sunscreen\", \"Insect repellent\", \"Band-aids\", \"Cotton swabs\",\n", " \"Notebook\", \"Journal\", \"Planner\", \"Calendar\", \"Pen\", \"Pencil\", \"Marker\",\n", " \"Highlighter\", \"Eraser\", \"Ruler\", \"Stapler\", \"Staples\", \"Paper clips\",\n", " \"Binder\", \"Folder\", \"Envelope\", \"Sticky notes\", \"Index cards\", \"Tape dispenser\",\n", " \"Calculator\", \"Laptop\", \"Tablet\", \"E-reader\", \"Charger\", \"USB drive\",\n", " \"Memory card\", \"External hard drive\", \"Mouse\", \"Keyboard\", \"Monitor\",\n", " \"Headphones\", \"Speakers\", \"Webcam\", \"Microphone\", \"Printer\", \"Scanner\",\n", " \"Ink cartridge\", \"Toner\", \"Paper\", \"Cardstock\", \"Laminating sheets\",\n", " \"T-shirt\", \"Shirt\", \"Blouse\", \"Sweater\", \"Jacket\", \"Coat\", \"Jeans\",\n", " \"Pants\", \"Shorts\", \"Skirt\", \"Dress\", \"Suit\", \"Tie\", \"Socks\", \"Underwear\",\n", " \"Bra\", \"Pajamas\", \"Bathrobe\", \"Slippers\", \"Shoes\", \"Boots\", \"Sandals\",\n", " \"Sneakers\", \"Hat\", \"Cap\", \"Beanie\", \"Scarf\", \"Gloves\", \"Mittens\",\n", " \"Sunglasses\", \"Glasses\", \"Watch\", \"Wallet\", \"Purse\", \"Backpack\", \"Tote bag\",\n", " \"Luggage\", \"Umbrella\", \"Belt\", \"Jewelry\", \"Necklace\", \"Bracelet\", \"Ring\",\n", " \"Starbucks\", \"McDonald's\", \"Burger King\", \"KFC\", \"Subway\", \"Pizza Hut\",\n", " \"Domino's\", \"Walmart\", \"Target\", \"Costco\", \"Kroger\", \"Safeway\", \"Trader Joe's\",\n", " \"Whole Foods\", \"CVS\", \"Walgreens\", \"Home Depot\", \"Lowe's\", \"Best Buy\",\n", " \"Apple Store\", \"Microsoft Store\", \"Amazon\", \"eBay\", \"Etsy\", \"Netflix\",\n", " \"Spotify\", \"Uber\", \"Lyft\", \"Airbnb\", \"Nike\", \"Adidas\", \"Puma\", \"Reebok\",\n", " \"H&M\", \"Zara\", \"Gap\", \"Old Navy\", \"IKEA\", \"Wayfair\", \"7-Eleven\", \"FedEx\",\n", " \"UPS\", \"USPS\", \"Internet\", \"Phone service\", \"Cable TV\", \"Streaming\", \"Electricity\", \"Gas\",\n", " \"Water\", \"Sewage\", \"Trash collection\", \"Rent\", \"Mortgage\", \"Insurance\",\n", " \"Car payment\", \"Gas\", \"Parking\", \"Toll\", \"Bus fare\", \"Train ticket\",\n", " \"Plane ticket\", \"Hotel\", \"Gym membership\", \"Haircut\", \"Manicure\", \"Pedicure\",\n", " \"Massage\", \"Therapy\", \"Doctor visit\", \"Dentist\", \"Veterinarian\", \"Tuition\",\n", " \"Tutoring\", \"Course fee\", \"Subscription\", \"Donation\", \"Tip\", \"Tax\",\n", " \"Cleaning service\", \"Lawn care\", \"Snow removal\", \"Plumber\", \"Electrician\",\n", " \"Repair service\", \"Installation fee\", \"Delivery fee\", \"Shipping\"\n", "]\n", "\n", "def generate_random_price(op_type=None):\n", " if op_type is None:\n", " op_type = random.choice([\"simple\", \"divide\", \"minus\", \"multiply\"])\n", "\n", " if op_type == \"simple\":\n", " # Simple price: 1.5-50 with 0-2 decimal places\n", " price = round(random.uniform(1.5, 50), random.randint(0, 2))\n", " return price, f\"{price:.2f}\" if price % 1 != 0 else f\"{int(price)}\"\n", "\n", " elif op_type == \"divide\":\n", " # Price divided by (2-4)\n", " original = round(random.uniform(8, 100), random.randint(0, 2))\n", " divisor = random.randint(2, 4)\n", " result = original / divisor\n", " return result, f\"{original:.2f}/{divisor}={result:.2f}\"\n", "\n", " elif op_type == \"minus\":\n", " # Price with a discount\n", " original = round(random.uniform(10, 80), random.randint(0, 2))\n", " discount = round(random.uniform(1, original/3), random.randint(0, 2))\n", " result = original - discount\n", " return result, f\"{original:.2f}-{discount:.2f}={result:.2f}\"\n", "\n", " elif op_type == \"multiply\":\n", " # Price with tax or service charge\n", " base = round(random.uniform(8, 50), random.randint(0, 2))\n", " multiplier = round(random.uniform(1.05, 1.25), 2)\n", " result = base * multiplier\n", " return result, f\"{base:.2f}*{multiplier:.2f}={result:.2f}\"\n", "\n", "def format_item_line(item, price_text):\n", " #shuffle\n", " if random.random() < 0.5:\n", " return f\"{price_text}{item}\"\n", " else:\n", " return f\"{item}{price_text}\"\n", "\n", "\n", "def generate_random_bill(num_items=1000, include_division=True):\n", " bill_items = []\n", " total = 0\n", "\n", " # Create a copy of vocabulary and shuffle it to avoid duplicates\n", " available_items = random.sample(vocabulary, min(len(vocabulary), num_items*2))\n", "\n", " # Generate items\n", " for _ in range(num_items):\n", " item = available_items.pop() if available_items else random.choice(vocabulary)\n", "\n", " # Decide operation type with weights\n", " op_weights = {\"simple\": 0.55, \"divide\": 0.25, \"minus\": 0.1, \"multiply\": 0.1}\n", " op_type = random.choices(\n", " list(op_weights.keys()),\n", " weights=list(op_weights.values()),\n", " k=1\n", " )[0]\n", "\n", " # Special case: if include_division is True, make sure we have at least one division\n", " if include_division and _ == num_items - 1 and not any(item[1].find('/') != -1 for item in bill_items):\n", " op_type = \"divide\"\n", "\n", " value, price_text = generate_random_price(op_type)\n", " bill_items.append((item, price_text, value))\n", " total += value\n", "\n", " # Format the bill\n", " bill_text = \"\"\n", " for item, price_text, _ in bill_items:\n", " formatted_line = format_item_line(item, price_text)\n", " bill_text += f\"{formatted_line}\\n\"\n", "\n", " # Add total\n", " bill_text += f\"\\nTotal Number = {total:.2f}\"\n", " bill_text += f\"\\nTotal Items = {len(bill_items)}\"\n", "\n", " return bill_text\n", "\n", "def main():\n", " # Generate a bill with a random number of items\n", " num_items = random.randint(1000, 1001)\n", " bill = generate_random_bill(num_items)\n", "\n", " with open(\"random_bill.txt\", \"w\", encoding=\"utf-8\") as f:\n", " f.write(bill)\n", " print(\"\\nBill has been saved to 'random_bill.txt'\")\n" ] }, { "cell_type": "code", "source": [ "import os\n", "import datetime\n", "\n", "def save_bill_to_file(bill_text, num_items, count, directory=\"/content/bills/\", filename=None):\n", " # Create directory if it doesn't exist\n", " os.makedirs(directory, exist_ok=True)\n", "\n", " if num_items is None and count is None:\n", " raise ValueError(\"Either num_items or count must be provided\")\n", "\n", " filename = filename or f\"bill_{num_items}_{count}.txt\"\n", "\n", " if os.path.isfile(filename):\n", " return filename\n", "\n", " # Full path\n", " filepath = os.path.join(directory, filename)\n", "\n", " # Save the bill\n", " with open(filepath, \"w\", encoding=\"utf-8\") as f:\n", " f.write(bill_text)\n", "\n", " return filepath\n", "\n", "# Generate multiple bills\n", "def generate_multiple_bills(count=100, num_items=1000, directory=\"/content/bills/\"):\n", " bills_info = []\n", "\n", " for i in range(count):\n", " # Random number of items\n", " min_items = num_items - 50\n", " max_items = num_items + 50\n", " items = random.randint(min_items, max_items)\n", "\n", " # Generate bill\n", " bill_text = generate_random_bill(items)\n", "\n", " # Create filename\n", " filename = f\"bill_{num_items}_{i+1}.txt\"\n", "\n", " # Save bill\n", " filepath = save_bill_to_file(bill_text, num_items, i+1, directory+str(num_items)+'/', filename)\n", "\n", " # Store info\n", " bills_info.append({\n", " \"index\": i+1,\n", " \"filepath\": filepath,\n", " })\n", "\n", " return bills_info\n", "\n", "generate_multiple_bills(count=100, num_items=1000, directory=\"/content/bills/\")" ], "metadata": { "id": "KkIxfRxFpwtv" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# Vital Log" ], "metadata": { "id": "uusf4fQBSKL2" } }, { "cell_type": "code", "source": [ "import numpy as np\n", "import pandas as pd\n", "import matplotlib.pyplot as plt\n", "import random\n", "import os\n", "from datetime import datetime, timedelta\n", "\n", "\n", "def generate_random_start_time():\n", " start_date = datetime(2020, 1, 1)\n", " end_date = datetime(2025, 12, 31)\n", " delta = end_date - start_date\n", " random_days = random.randint(0, delta.days)\n", " random_seconds = random.randint(0, 86400 - 1)\n", " return start_date + timedelta(days=random_days, seconds=random_seconds)\n", "\n", "def generate_heart_rate_data(num_points):\n", " states = []\n", " heart_rates = []\n", "\n", " for i in range(num_points):\n", " state = random.choices(['rest', 'exercise', 'recovery'], weights=[0.8, 0.1, 0.1])[0]\n", " if state == 'rest':\n", " hr = np.random.normal(70, 3)\n", " elif state == 'exercise':\n", " hr = np.random.normal(140, 8)\n", " else: # recovery\n", " hr = np.random.normal(90, 5)\n", "\n", " states.append(state)\n", " heart_rates.append(int(hr))\n", "\n", " return heart_rates, states\n", "\n", "\n", "def generate_vital_log(count=50, num_points = 100, directory=None):\n", " interval_minutes = 10\n", " start_time = generate_random_start_time()\n", " timestamps = [start_time + timedelta(minutes=i * interval_minutes) for i in range(num_points)]\n", "\n", " heart_rates, states = generate_heart_rate_data(num_points)\n", "\n", " vital_log = pd.DataFrame({\n", " 'timestamp': timestamps,\n", " 'heart_rate': heart_rates,\n", " 'state': states\n", " })\n", "\n", " vital_log_serialized = vital_log.copy()\n", " vital_log_serialized['timestamp'] = vital_log_serialized['timestamp'].dt.strftime('%Y-%m-%d %H:%M:%S')\n", "\n", " json_data = vital_log_serialized.to_json(orient='records', indent=2)\n", "\n", " os.makedirs(directory, exist_ok=True)\n", " csv_path = f\"heartrate_{num_points}_{count+1}.csv\"\n", " csv_path = os.path.join(directory, csv_path)\n", " vital_log.to_csv(csv_path, index=False)\n", "\n", " json_path = f\"heartrate_{num_points}_{count+1}.json\"\n", " json_path = os.path.join(directory, json_path)\n", " with open(json_path, 'w') as f:\n", " f.write(json_data)\n", "\n", "num_points = 1000\n", "count = 50\n", "directory=\"/content/drive/MyDrive/sheets_vital_log/\"\n", "for i in range(count):\n", " generate_vital_log(count=i, num_points=num_points, directory=directory+str(num_points)+'/')" ], "metadata": { "id": "XZeMvlAvpCSi" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import json\n", "import random\n", "\n", "counts = 50\n", "\n", "heart_rate_variations = [\n", " lambda hr: f\"The true heart rate is {hr}.\",\n", " lambda hr: f\"The heart rate = {hr}.\",\n", " lambda hr: f\"HR: {hr} (beats per minute).\",\n", " lambda hr: f\"Current heart rate: {hr} BPM.\",\n", " lambda hr: f\"Heart rate measured at {hr}.\",\n", " lambda hr: f\"{hr} — that's the heart rate!\",\n", " lambda hr: f\"Your heart is beating at {hr} BPM.\",\n", " lambda hr: f\"Pulse rate detected: {hr}.\",\n", " lambda hr: f\"Heart rate reading: {hr} beats per minute.\",\n", " lambda hr: f\"{hr} BPM — steady and normal.\",\n", " lambda hr: f\"The monitor shows a heart rate of {hr}.\",\n", " lambda hr: f\"Heart rate (measured): {hr}.\",\n", " lambda hr: f\"BPM = {hr} (heart rate).\",\n", " lambda hr: f\"Your current pulse is {hr} beats per minute.\",\n", " lambda hr: f\"Heart rate recorded as {hr} BPM.\",\n", " lambda hr: f\"{hr} — the magic number for your heart rate!\",\n", " lambda hr: f\"HR measurement result: {hr}.\",\n", " lambda hr: f\"Beats per minute: {hr}.\",\n", " lambda hr: f\"The heart is ticking at {hr} BPM.\",\n", " lambda hr: f\"Heart rate analysis: {hr} beats per minute.\"\n", "]\n", "\n", "variations = [\n", " lambda key,value: f\"{key}:{value}\",\n", " lambda key,value: f\"{key}={value}\",\n", " lambda key,value: f\"{key} is {value}\",\n", "]\n", "\n", "fake_heart_rate_variations = [\n", " lambda hr: f\"The fake heart rate is {hr}.\",\n", " lambda hr: f\"Fake HR: {hr} bpm.\",\n", "]\n", "\n", "#\n", "for num_items in [100, 200, 500, 1000]:\n", " for j in range(counts):\n", " prompt_path = f'/content/drive/MyDrive/sheets_vital_log/{num_items}/heartrate_{num_items}_{j+1}.json'\n", "\n", " with open(prompt_path, 'r') as infile:\n", " data = json.load(infile)\n", "\n", " lines = []\n", " for line in data:\n", " random_variation = random.choice(heart_rate_variations)\n", " hr_text = random_variation(line[\"heart_rate\"])\n", "\n", " fake_hr = random.randint(50, 200) # Adjust range as needed\n", " fake_hr_text = random.choice(fake_heart_rate_variations)(fake_hr)\n", "\n", " # line = f\"timestamp:{line['timestamp']} {hr_text} state:{line['state']}\"\n", " # line = \" \".join(f\"{key}:{value}\" for key, value in line.items())\n", " time_variation = random.choice(variations)\n", " time_text = time_variation(\"timestamp\", line[\"timestamp\"])\n", " state_variation = random.choice(variations)\n", " state_text = state_variation(\"state\", line[\"state\"])\n", "\n", " if random.random() < 0.5:\n", " line = f\"{time_text} {fake_hr_text} {hr_text} {state_text}\"\n", " else:\n", " line = f\"{time_text} {hr_text} {fake_hr_text} {state_text}\"\n", "\n", " lines.append(line)\n", "\n", " output_file = f'/content/drive/MyDrive/sheets_vital_log/{num_items}/heartrate_{num_items}_{j+1}.txt'\n", "\n", " with open(output_file, 'w') as outfile:\n", " outfile.write(\"\\n\".join(lines))" ], "metadata": { "id": "RPPoDBfJ5JeM" }, "execution_count": null, "outputs": [] }, { "cell_type": "markdown", "source": [ "# Transcript" ], "metadata": { "id": "TxAubn6gSf9H" } }, { "cell_type": "code", "source": [ "!pip install faker\n", "import pandas as pd\n", "import random\n", "from faker import Faker\n", "import os\n", "\n", "def generate_transcript(count=50, num_students = 100, directory=\"/content/drive/MyDrive/sheets_transcript/\"):\n", " fake = Faker()\n", " names = [fake.name() for _ in range(num_students)]\n", "\n", " subjects = ['Math', 'Chemistry', 'Biology', 'Physics', 'Geography']\n", "\n", " data = {'Name': names}\n", " for subject in subjects:\n", " data[subject] = [random.randint(0, 100) for _ in range(num_students)]\n", "\n", " df = pd.DataFrame(data)\n", " json_data = df.to_json(orient='records')\n", "\n", " os.makedirs(directory, exist_ok=True)\n", " csv_path = f\"transcript_{num_students}_{count+1}.csv\"\n", " csv_path = os.path.join(directory, csv_path)\n", " df.to_csv(csv_path, index=False)\n", "\n", " json_path = f\"transcript_{num_students}_{count+1}.json\"\n", " json_path = os.path.join(directory, json_path)\n", " with open(json_path, 'w') as f:\n", " f.write(json_data)\n", "\n", "count = 50\n", "num_students = 1000\n", "directory=\"/content/drive/MyDrive/sheets_transcript/\"\n", "for i in range(count):\n", " generate_transcript(count=i, num_students=num_students, directory=directory+str(num_students)+'/')" ], "metadata": { "id": "Ol17CCnIh9FP" }, "execution_count": null, "outputs": [] }, { "cell_type": "code", "source": [ "import json\n", "\n", "counts = 50\n", "variations = [\n", " lambda key,value: f\"{key}:{value}\",\n", " lambda key,value: f\"{key}={value}\",\n", " lambda key,value: f\"{key} is {value}\",\n", "]\n", "#\n", "for num_items in [100, 200, 500, 1000]:\n", " for j in range(counts):\n", " prompt_path = f'/content/drive/MyDrive/sheets_transcript/{num_items}/transcript_{num_items}_{j+1}.json'\n", "\n", " with open(prompt_path, 'r') as infile:\n", " data = json.load(infile)\n", "\n", " lines = []\n", "\n", " for line_data in data:\n", " line = \"\"\n", " for key, value in line_data.items():\n", " random_variation = random.choice(variations)\n", " line=line+random_variation(key=key,value=value)+\" \"\n", " lines.append(line.strip())\n", "\n", " output_file = f'/content/drive/MyDrive/sheets_transcript/{num_items}/transcript_{num_items}_{j+1}.txt'\n", "\n", " with open(output_file, 'w') as outfile:\n", " outfile.write(\"\\n\".join(lines))" ], "metadata": { "id": "waA7fHuV5gM4" }, "execution_count": null, "outputs": [] } ] }