ShivamChattar commited on
Commit
59899e2
·
verified ·
1 Parent(s): d2906fd

Upload 2 files

Browse files
Files changed (3) hide show
  1. .gitattributes +1 -0
  2. arakoo.ipynb +215 -0
  3. orca dataset (2).pdf +3 -0
.gitattributes CHANGED
@@ -53,3 +53,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
 
 
53
  *.jpg filter=lfs diff=lfs merge=lfs -text
54
  *.jpeg filter=lfs diff=lfs merge=lfs -text
55
  *.webp filter=lfs diff=lfs merge=lfs -text
56
+ orca[[:space:]]dataset[[:space:]](2).pdf filter=lfs diff=lfs merge=lfs -text
arakoo.ipynb ADDED
@@ -0,0 +1,215 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 3,
6
+ "metadata": {},
7
+ "outputs": [
8
+ {
9
+ "name": "stdout",
10
+ "output_type": "stream",
11
+ "text": [
12
+ "Collecting spacy\n",
13
+ " Using cached spacy-3.7.4-cp311-cp311-win_amd64.whl (12.1 MB)\n",
14
+ "Requirement already satisfied: spacy-legacy<3.1.0,>=3.0.11 in c:\\python311\\lib\\site-packages (from spacy) (3.0.12)\n",
15
+ "Requirement already satisfied: spacy-loggers<2.0.0,>=1.0.0 in c:\\python311\\lib\\site-packages (from spacy) (1.0.5)\n",
16
+ "Requirement already satisfied: murmurhash<1.1.0,>=0.28.0 in c:\\python311\\lib\\site-packages (from spacy) (1.0.10)\n",
17
+ "Requirement already satisfied: cymem<2.1.0,>=2.0.2 in c:\\python311\\lib\\site-packages (from spacy) (2.0.8)\n",
18
+ "Requirement already satisfied: preshed<3.1.0,>=3.0.2 in c:\\python311\\lib\\site-packages (from spacy) (3.0.9)\n",
19
+ "Collecting thinc<8.3.0,>=8.2.2\n",
20
+ " Using cached thinc-8.2.3-cp311-cp311-win_amd64.whl (1.5 MB)\n",
21
+ "Requirement already satisfied: wasabi<1.2.0,>=0.9.1 in c:\\python311\\lib\\site-packages (from spacy) (1.1.2)\n",
22
+ "Requirement already satisfied: srsly<3.0.0,>=2.4.3 in c:\\python311\\lib\\site-packages (from spacy) (2.4.8)\n",
23
+ "Requirement already satisfied: catalogue<2.1.0,>=2.0.6 in c:\\python311\\lib\\site-packages (from spacy) (2.0.10)\n",
24
+ "Requirement already satisfied: weasel<0.4.0,>=0.1.0 in c:\\python311\\lib\\site-packages (from spacy) (0.3.4)\n",
25
+ "Requirement already satisfied: typer<0.10.0,>=0.3.0 in c:\\python311\\lib\\site-packages (from spacy) (0.9.0)\n",
26
+ "Requirement already satisfied: smart-open<7.0.0,>=5.2.1 in c:\\python311\\lib\\site-packages (from spacy) (6.4.0)\n",
27
+ "Requirement already satisfied: tqdm<5.0.0,>=4.38.0 in c:\\python311\\lib\\site-packages (from spacy) (4.65.0)\n",
28
+ "Requirement already satisfied: requests<3.0.0,>=2.13.0 in c:\\python311\\lib\\site-packages (from spacy) (2.28.2)\n",
29
+ "Requirement already satisfied: pydantic!=1.8,!=1.8.1,<3.0.0,>=1.7.4 in c:\\python311\\lib\\site-packages (from spacy) (1.10.14)\n",
30
+ "Requirement already satisfied: jinja2 in c:\\python311\\lib\\site-packages (from spacy) (3.1.3)\n",
31
+ "Requirement already satisfied: setuptools in c:\\python311\\lib\\site-packages (from spacy) (65.5.0)\n",
32
+ "Requirement already satisfied: packaging>=20.0 in c:\\users\\shivam\\appdata\\roaming\\python\\python311\\site-packages (from spacy) (23.0)\n",
33
+ "Requirement already satisfied: langcodes<4.0.0,>=3.2.0 in c:\\python311\\lib\\site-packages (from spacy) (3.3.0)\n",
34
+ "Requirement already satisfied: numpy>=1.19.0 in c:\\python311\\lib\\site-packages (from spacy) (1.24.2)\n",
35
+ "Requirement already satisfied: typing-extensions>=4.2.0 in c:\\python311\\lib\\site-packages (from pydantic!=1.8,!=1.8.1,<3.0.0,>=1.7.4->spacy) (4.9.0)\n",
36
+ "Requirement already satisfied: charset-normalizer<4,>=2 in c:\\python311\\lib\\site-packages (from requests<3.0.0,>=2.13.0->spacy) (3.1.0)\n",
37
+ "Requirement already satisfied: idna<4,>=2.5 in c:\\python311\\lib\\site-packages (from requests<3.0.0,>=2.13.0->spacy) (3.4)\n",
38
+ "Requirement already satisfied: urllib3<1.27,>=1.21.1 in c:\\python311\\lib\\site-packages (from requests<3.0.0,>=2.13.0->spacy) (1.26.15)\n",
39
+ "Requirement already satisfied: certifi>=2017.4.17 in c:\\python311\\lib\\site-packages (from requests<3.0.0,>=2.13.0->spacy) (2022.12.7)\n",
40
+ "Requirement already satisfied: blis<0.8.0,>=0.7.8 in c:\\python311\\lib\\site-packages (from thinc<8.3.0,>=8.2.2->spacy) (0.7.11)\n",
41
+ "Requirement already satisfied: confection<1.0.0,>=0.0.1 in c:\\python311\\lib\\site-packages (from thinc<8.3.0,>=8.2.2->spacy) (0.1.4)\n",
42
+ "Requirement already satisfied: colorama in c:\\users\\shivam\\appdata\\roaming\\python\\python311\\site-packages (from tqdm<5.0.0,>=4.38.0->spacy) (0.4.6)\n",
43
+ "Requirement already satisfied: click<9.0.0,>=7.1.1 in c:\\python311\\lib\\site-packages (from typer<0.10.0,>=0.3.0->spacy) (8.1.7)\n",
44
+ "Requirement already satisfied: cloudpathlib<0.17.0,>=0.7.0 in c:\\python311\\lib\\site-packages (from weasel<0.4.0,>=0.1.0->spacy) (0.16.0)\n",
45
+ "Requirement already satisfied: MarkupSafe>=2.0 in c:\\python311\\lib\\site-packages (from jinja2->spacy) (2.1.3)\n",
46
+ "Installing collected packages: thinc, spacy\n"
47
+ ]
48
+ },
49
+ {
50
+ "name": "stderr",
51
+ "output_type": "stream",
52
+ "text": [
53
+ " WARNING: Failed to write executable - trying to use .deleteme logic\n",
54
+ "ERROR: Could not install packages due to an OSError: [WinError 2] The system cannot find the file specified: 'C:\\\\Python311\\\\Scripts\\\\spacy.exe' -> 'C:\\\\Python311\\\\Scripts\\\\spacy.exe.deleteme'\n",
55
+ "\n",
56
+ "\n",
57
+ "[notice] A new release of pip available: 22.3 -> 24.0\n",
58
+ "[notice] To update, run: python.exe -m pip install --upgrade pip\n"
59
+ ]
60
+ }
61
+ ],
62
+ "source": [
63
+ "!pip install spacy\n",
64
+ "!python -m spacy download en_core_web_sm"
65
+ ]
66
+ },
67
+ {
68
+ "cell_type": "markdown",
69
+ "metadata": {},
70
+ "source": [
71
+ "Importing required Libraries "
72
+ ]
73
+ },
74
+ {
75
+ "cell_type": "code",
76
+ "execution_count": 10,
77
+ "metadata": {},
78
+ "outputs": [
79
+ {
80
+ "ename": "OSError",
81
+ "evalue": "[E050] Can't find model 'https://mlabonne.github.io/blog/notes/Large%20Language%20Models/orca.html'. It doesn't seem to be a Python package or a valid path to a data directory.",
82
+ "output_type": "error",
83
+ "traceback": [
84
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
85
+ "\u001b[1;31mOSError\u001b[0m Traceback (most recent call last)",
86
+ "Cell \u001b[1;32mIn[10], line 4\u001b[0m\n\u001b[0;32m 1\u001b[0m \u001b[38;5;28;01mimport\u001b[39;00m \u001b[38;5;21;01mspacy\u001b[39;00m\n\u001b[0;32m 3\u001b[0m \u001b[38;5;66;03m# Load spaCy model\u001b[39;00m\n\u001b[1;32m----> 4\u001b[0m nlp \u001b[38;5;241m=\u001b[39m \u001b[43mspacy\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mhttps://mlabonne.github.io/blog/notes/Large\u001b[39;49m\u001b[38;5;132;43;01m%20La\u001b[39;49;00m\u001b[38;5;124;43mnguage\u001b[39;49m\u001b[38;5;124;43m%\u001b[39;49m\u001b[38;5;124;43m20Models/orca.html\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m 6\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mfilter_short_instructions\u001b[39m(dataset):\n\u001b[0;32m 7\u001b[0m filtered_dataset \u001b[38;5;241m=\u001b[39m []\n",
87
+ "File \u001b[1;32mc:\\Python311\\Lib\\site-packages\\spacy\\__init__.py:51\u001b[0m, in \u001b[0;36mload\u001b[1;34m(name, vocab, disable, enable, exclude, config)\u001b[0m\n\u001b[0;32m 27\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mload\u001b[39m(\n\u001b[0;32m 28\u001b[0m name: Union[\u001b[38;5;28mstr\u001b[39m, Path],\n\u001b[0;32m 29\u001b[0m \u001b[38;5;241m*\u001b[39m,\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 34\u001b[0m config: Union[Dict[\u001b[38;5;28mstr\u001b[39m, Any], Config] \u001b[38;5;241m=\u001b[39m util\u001b[38;5;241m.\u001b[39mSimpleFrozenDict(),\n\u001b[0;32m 35\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m Language:\n\u001b[0;32m 36\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Load a spaCy model from an installed package or a local path.\u001b[39;00m\n\u001b[0;32m 37\u001b[0m \n\u001b[0;32m 38\u001b[0m \u001b[38;5;124;03m name (str): Package name or model path.\u001b[39;00m\n\u001b[1;32m (...)\u001b[0m\n\u001b[0;32m 49\u001b[0m \u001b[38;5;124;03m RETURNS (Language): The loaded nlp object.\u001b[39;00m\n\u001b[0;32m 50\u001b[0m \u001b[38;5;124;03m \"\"\"\u001b[39;00m\n\u001b[1;32m---> 51\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mutil\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mload_model\u001b[49m\u001b[43m(\u001b[49m\n\u001b[0;32m 52\u001b[0m \u001b[43m \u001b[49m\u001b[43mname\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 53\u001b[0m \u001b[43m \u001b[49m\u001b[43mvocab\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mvocab\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 54\u001b[0m \u001b[43m \u001b[49m\u001b[43mdisable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mdisable\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 55\u001b[0m \u001b[43m \u001b[49m\u001b[43menable\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43menable\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 56\u001b[0m \u001b[43m \u001b[49m\u001b[43mexclude\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mexclude\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 57\u001b[0m \u001b[43m \u001b[49m\u001b[43mconfig\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mconfig\u001b[49m\u001b[43m,\u001b[49m\n\u001b[0;32m 58\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
88
+ "File \u001b[1;32mc:\\Python311\\Lib\\site-packages\\spacy\\util.py:472\u001b[0m, in \u001b[0;36mload_model\u001b[1;34m(name, vocab, disable, enable, exclude, config)\u001b[0m\n\u001b[0;32m 470\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m name \u001b[38;5;129;01min\u001b[39;00m OLD_MODEL_SHORTCUTS:\n\u001b[0;32m 471\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mIOError\u001b[39;00m(Errors\u001b[38;5;241m.\u001b[39mE941\u001b[38;5;241m.\u001b[39mformat(name\u001b[38;5;241m=\u001b[39mname, full\u001b[38;5;241m=\u001b[39mOLD_MODEL_SHORTCUTS[name])) \u001b[38;5;66;03m# type: ignore[index]\u001b[39;00m\n\u001b[1;32m--> 472\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mIOError\u001b[39;00m(Errors\u001b[38;5;241m.\u001b[39mE050\u001b[38;5;241m.\u001b[39mformat(name\u001b[38;5;241m=\u001b[39mname))\n",
89
+ "\u001b[1;31mOSError\u001b[0m: [E050] Can't find model 'https://mlabonne.github.io/blog/notes/Large%20Language%20Models/orca.html'. It doesn't seem to be a Python package or a valid path to a data directory."
90
+ ]
91
+ }
92
+ ],
93
+ "source": [
94
+ "import spacy\n",
95
+ "\n",
96
+ "# Load spaCy model\n",
97
+ "nlp = spacy.load(\"https://mlabonne.github.io/blog/notes/Large%20Language%20Models/orca.html\")\n",
98
+ "\n",
99
+ "def filter_short_instructions(dataset):\n",
100
+ " filtered_dataset = []\n",
101
+ " for instruction in dataset:\n",
102
+ " doc = nlp(instruction)\n",
103
+ " if len(doc) >= 100:\n",
104
+ " filtered_dataset.append(instruction)\n",
105
+ " return filtered_dataset\n",
106
+ "\n",
107
+ "# Replace 'your_dataset' with the actual variable containing the Orca dataset\n",
108
+ "your_dataset = [...] # Load your dataset here\n",
109
+ "\n",
110
+ "filtered_dataset = filter_short_instructions(your_dataset)"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": 14,
116
+ "metadata": {},
117
+ "outputs": [
118
+ {
119
+ "ename": "ValueError",
120
+ "evalue": "empty vocabulary; perhaps the documents only contain stop words",
121
+ "output_type": "error",
122
+ "traceback": [
123
+ "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
124
+ "\u001b[1;31mValueError\u001b[0m Traceback (most recent call last)",
125
+ "Cell \u001b[1;32mIn[14], line 50\u001b[0m\n\u001b[0;32m 47\u001b[0m filtered_dataset \u001b[38;5;241m=\u001b[39m filter_short_instructions(orca_dataset)\n\u001b[0;32m 49\u001b[0m \u001b[38;5;66;03m# Step 3: Deduplicate dataset using cosine similarity\u001b[39;00m\n\u001b[1;32m---> 50\u001b[0m deduplicated_dataset \u001b[38;5;241m=\u001b[39m \u001b[43mdeduplicate_dataset\u001b[49m\u001b[43m(\u001b[49m\u001b[43mfiltered_dataset\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mthreshold\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.95\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m 52\u001b[0m \u001b[38;5;66;03m# Print the results or further process as needed\u001b[39;00m\n\u001b[0;32m 53\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOriginal Dataset Length:\u001b[39m\u001b[38;5;124m\"\u001b[39m, \u001b[38;5;28mlen\u001b[39m(orca_dataset))\n",
126
+ "Cell \u001b[1;32mIn[14], line 23\u001b[0m, in \u001b[0;36mdeduplicate_dataset\u001b[1;34m(dataset, threshold)\u001b[0m\n\u001b[0;32m 21\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mdeduplicate_dataset\u001b[39m(dataset, threshold\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.95\u001b[39m):\n\u001b[0;32m 22\u001b[0m tfidf_vectorizer \u001b[38;5;241m=\u001b[39m TfidfVectorizer()\n\u001b[1;32m---> 23\u001b[0m tfidf_matrix \u001b[38;5;241m=\u001b[39m \u001b[43mtfidf_vectorizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit_transform\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdataset\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 25\u001b[0m \u001b[38;5;66;03m# Calculate cosine similarity\u001b[39;00m\n\u001b[0;32m 26\u001b[0m cosine_sim \u001b[38;5;241m=\u001b[39m cosine_similarity(tfidf_matrix, tfidf_matrix)\n",
127
+ "File \u001b[1;32mc:\\Python311\\Lib\\site-packages\\sklearn\\feature_extraction\\text.py:2138\u001b[0m, in \u001b[0;36mTfidfVectorizer.fit_transform\u001b[1;34m(self, raw_documents, y)\u001b[0m\n\u001b[0;32m 2131\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_check_params()\n\u001b[0;32m 2132\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tfidf \u001b[38;5;241m=\u001b[39m TfidfTransformer(\n\u001b[0;32m 2133\u001b[0m norm\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnorm,\n\u001b[0;32m 2134\u001b[0m use_idf\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39muse_idf,\n\u001b[0;32m 2135\u001b[0m smooth_idf\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msmooth_idf,\n\u001b[0;32m 2136\u001b[0m sublinear_tf\u001b[38;5;241m=\u001b[39m\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msublinear_tf,\n\u001b[0;32m 2137\u001b[0m )\n\u001b[1;32m-> 2138\u001b[0m X \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfit_transform\u001b[49m\u001b[43m(\u001b[49m\u001b[43mraw_documents\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 2139\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39m_tfidf\u001b[38;5;241m.\u001b[39mfit(X)\n\u001b[0;32m 2140\u001b[0m \u001b[38;5;66;03m# X is already a transformed view of raw_documents so\u001b[39;00m\n\u001b[0;32m 2141\u001b[0m \u001b[38;5;66;03m# we set copy to False\u001b[39;00m\n",
128
+ "File \u001b[1;32mc:\\Python311\\Lib\\site-packages\\sklearn\\base.py:1351\u001b[0m, in \u001b[0;36m_fit_context.<locals>.decorator.<locals>.wrapper\u001b[1;34m(estimator, *args, **kwargs)\u001b[0m\n\u001b[0;32m 1344\u001b[0m estimator\u001b[38;5;241m.\u001b[39m_validate_params()\n\u001b[0;32m 1346\u001b[0m \u001b[38;5;28;01mwith\u001b[39;00m config_context(\n\u001b[0;32m 1347\u001b[0m skip_parameter_validation\u001b[38;5;241m=\u001b[39m(\n\u001b[0;32m 1348\u001b[0m prefer_skip_nested_validation \u001b[38;5;129;01mor\u001b[39;00m global_skip_validation\n\u001b[0;32m 1349\u001b[0m )\n\u001b[0;32m 1350\u001b[0m ):\n\u001b[1;32m-> 1351\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfit_method\u001b[49m\u001b[43m(\u001b[49m\u001b[43mestimator\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
129
+ "File \u001b[1;32mc:\\Python311\\Lib\\site-packages\\sklearn\\feature_extraction\\text.py:1389\u001b[0m, in \u001b[0;36mCountVectorizer.fit_transform\u001b[1;34m(self, raw_documents, y)\u001b[0m\n\u001b[0;32m 1381\u001b[0m warnings\u001b[38;5;241m.\u001b[39mwarn(\n\u001b[0;32m 1382\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mUpper case characters found in\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1383\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m vocabulary while \u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mlowercase\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1384\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m is True. These entries will not\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1385\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m be matched with any documents\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1386\u001b[0m )\n\u001b[0;32m 1387\u001b[0m \u001b[38;5;28;01mbreak\u001b[39;00m\n\u001b[1;32m-> 1389\u001b[0m vocabulary, X \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43m_count_vocab\u001b[49m\u001b[43m(\u001b[49m\u001b[43mraw_documents\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfixed_vocabulary_\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m 1391\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mbinary:\n\u001b[0;32m 1392\u001b[0m X\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mfill(\u001b[38;5;241m1\u001b[39m)\n",
130
+ "File \u001b[1;32mc:\\Python311\\Lib\\site-packages\\sklearn\\feature_extraction\\text.py:1295\u001b[0m, in \u001b[0;36mCountVectorizer._count_vocab\u001b[1;34m(self, raw_documents, fixed_vocab)\u001b[0m\n\u001b[0;32m 1293\u001b[0m vocabulary \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mdict\u001b[39m(vocabulary)\n\u001b[0;32m 1294\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m vocabulary:\n\u001b[1;32m-> 1295\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[0;32m 1296\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mempty vocabulary; perhaps the documents only contain stop words\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[0;32m 1297\u001b[0m )\n\u001b[0;32m 1299\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m indptr[\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m] \u001b[38;5;241m>\u001b[39m np\u001b[38;5;241m.\u001b[39miinfo(np\u001b[38;5;241m.\u001b[39mint32)\u001b[38;5;241m.\u001b[39mmax: \u001b[38;5;66;03m# = 2**31 - 1\u001b[39;00m\n\u001b[0;32m 1300\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m _IS_32BIT:\n",
131
+ "\u001b[1;31mValueError\u001b[0m: empty vocabulary; perhaps the documents only contain stop words"
132
+ ]
133
+ }
134
+ ],
135
+ "source": [
136
+ "import requests\n",
137
+ "from bs4 import BeautifulSoup\n",
138
+ "import spacy\n",
139
+ "from sklearn.feature_extraction.text import TfidfVectorizer\n",
140
+ "from sklearn.metrics.pairwise import cosine_similarity\n",
141
+ "\n",
142
+ "# Function to fetch the Orca dataset from the given URL\n",
143
+ "def fetch_orca_dataset(orca_url):\n",
144
+ " response = requests.get(orca_url)\n",
145
+ " soup = BeautifulSoup(response.text, 'html.parser')\n",
146
+ " dataset = [p.text.strip() for p in soup.find_all('p')] # Assuming paragraphs contain the instructions\n",
147
+ " return dataset\n",
148
+ "\n",
149
+ "# Function to filter short instructions (less than 100 tokens)\n",
150
+ "def filter_short_instructions(dataset):\n",
151
+ " nlp = spacy.load(\"en_core_web_sm\")\n",
152
+ " filtered_dataset = [instruction for instruction in dataset if len(nlp(instruction)) >= 100]\n",
153
+ " return filtered_dataset\n",
154
+ "\n",
155
+ "# Function to deduplicate dataset using cosine similarity\n",
156
+ "def deduplicate_dataset(dataset, threshold=0.95):\n",
157
+ " tfidf_vectorizer = TfidfVectorizer()\n",
158
+ " tfidf_matrix = tfidf_vectorizer.fit_transform(dataset)\n",
159
+ " \n",
160
+ " # Calculate cosine similarity\n",
161
+ " cosine_sim = cosine_similarity(tfidf_matrix, tfidf_matrix)\n",
162
+ "\n",
163
+ " # Identify duplicate indices\n",
164
+ " duplicates = set()\n",
165
+ " for i in range(len(cosine_sim)):\n",
166
+ " for j in range(i+1, len(cosine_sim)):\n",
167
+ " if cosine_sim[i, j] > threshold:\n",
168
+ " duplicates.add(j)\n",
169
+ "\n",
170
+ " # Remove duplicate instructions\n",
171
+ " deduplicated_dataset = [instruction for i, instruction in enumerate(dataset) if i not in duplicates]\n",
172
+ "\n",
173
+ " return deduplicated_dataset\n",
174
+ "\n",
175
+ "# Replace 'orca_url' with the actual URL containing the Orca dataset\n",
176
+ "orca_url = 'https://mlabonne.github.io/blog/notes/Large%20Language%20Models/orca.html'\n",
177
+ "\n",
178
+ "# Step 1: Fetch Orca dataset\n",
179
+ "orca_dataset = fetch_orca_dataset(orca_url)\n",
180
+ "\n",
181
+ "# Step 2: Filter short instructions\n",
182
+ "filtered_dataset = filter_short_instructions(orca_dataset)\n",
183
+ "\n",
184
+ "# Step 3: Deduplicate dataset using cosine similarity\n",
185
+ "deduplicated_dataset = deduplicate_dataset(filtered_dataset, threshold=0.95)\n",
186
+ "\n",
187
+ "# Print the results or further process as needed\n",
188
+ "print(\"Original Dataset Length:\", len(orca_dataset))\n",
189
+ "print(\"Filtered Dataset Length:\", len(filtered_dataset))\n",
190
+ "print(\"Deduplicated Dataset Length:\", len(deduplicated_dataset))\n"
191
+ ]
192
+ }
193
+ ],
194
+ "metadata": {
195
+ "kernelspec": {
196
+ "display_name": "Python 3",
197
+ "language": "python",
198
+ "name": "python3"
199
+ },
200
+ "language_info": {
201
+ "codemirror_mode": {
202
+ "name": "ipython",
203
+ "version": 3
204
+ },
205
+ "file_extension": ".py",
206
+ "mimetype": "text/x-python",
207
+ "name": "python",
208
+ "nbconvert_exporter": "python",
209
+ "pygments_lexer": "ipython3",
210
+ "version": "3.11.0"
211
+ }
212
+ },
213
+ "nbformat": 4,
214
+ "nbformat_minor": 2
215
+ }
orca dataset (2).pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fc5b0e2e069ae56183b94fd02f437ff33893a1698c37238d7e1871979180953d
3
+ size 1053849