Upload folder using huggingface_hub
Browse files- .craft/experiment_config.ini +2 -0
- .craft/measure.py +16 -0
- AGENTS.md +0 -0
- Taskfile.yml +9 -1
- guidetour/.ipynb_checkpoints/Untitled-checkpoint.ipynb +79 -0
- guidetour/.ipynb_checkpoints/testspider-checkpoint.py +19 -0
- guidetour/Untitled.ipynb +79 -0
- guidetour/testspider.py +19 -0
- procedures/crawl_and_scrap_all_polish_law/Taskfile.yml +0 -0
- procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/__init__.py +0 -0
- procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/items.py +12 -0
- procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/middlewares.py +100 -0
- procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/pipelines.py +13 -0
- procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/settings.py +87 -0
- procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/spiders/__init__.py +4 -0
- procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/spiders/kontnikLinkowaty.py +23 -0
- procedures/crawl_and_scrap_all_polish_law/scrapy.cfg +11 -0
- pylock.toml +0 -0
- pyproject.toml +5 -0
- reqirements.txt +0 -0
- resources/links_from_dotgov_sites/.omnis.toml +0 -0
- resources/markdowned_documents/.omnis.toml +0 -0
- resources/orginal_data_checksum_bank/.omnis.toml +0 -0
- uv.lock +0 -0
.craft/experiment_config.ini
ADDED
|
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
|
|
|
| 1 |
+
[now_ptr]
|
| 2 |
+
UNIXUSAT = 1765151507283
|
.craft/measure.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import configparser
|
| 2 |
+
import pathlib
|
| 3 |
+
|
| 4 |
+
int( # for future to ACK info: this int is not binded by same limits like classic c integer
|
| 5 |
+
configparser.ConfigParser().read(
|
| 6 |
+
[
|
| 7 |
+
pathlib.join(
|
| 8 |
+
pathlib.dirpath(__file__),
|
| 9 |
+
'experiment_config.ini'
|
| 10 |
+
)
|
| 11 |
+
]
|
| 12 |
+
).get(
|
| 13 |
+
"DEFAULT",
|
| 14 |
+
"UNIXUSAT"
|
| 15 |
+
)
|
| 16 |
+
)
|
AGENTS.md
ADDED
|
File without changes
|
Taskfile.yml
CHANGED
|
@@ -6,7 +6,15 @@ vars:
|
|
| 6 |
GREETING: Hello, World!
|
| 7 |
|
| 8 |
tasks:
|
| 9 |
-
|
| 10 |
cmds:
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
- uv run python ./procedures/push-that-juicy-alluring-datasets-to-huggingface-cloud.py
|
|
|
|
|
|
|
|
|
|
|
|
|
| 12 |
silent: true
|
|
|
|
| 6 |
GREETING: Hello, World!
|
| 7 |
|
| 8 |
tasks:
|
| 9 |
+
sexyslap: # experiment with time measurement when someone notice that this namespace is silly, my now ptr is UNIXUSAT=1765151507283
|
| 10 |
cmds:
|
| 11 |
+
- uv sync
|
| 12 |
+
- uv lock
|
| 13 |
+
- uv export --format requirements.txt > ./reqirements.txt
|
| 14 |
+
- uv export --format pylock.toml > ./pylock.toml
|
| 15 |
- uv run python ./procedures/push-that-juicy-alluring-datasets-to-huggingface-cloud.py
|
| 16 |
+
default:
|
| 17 |
+
cmds:
|
| 18 |
+
- uv run python -m notebook
|
| 19 |
+
# - uv run python ./procedures/push-that-juicy-alluring-datasets-to-huggingface-cloud.py
|
| 20 |
silent: true
|
guidetour/.ipynb_checkpoints/Untitled-checkpoint.ipynb
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 8,
|
| 6 |
+
"id": "0a1873d1-ae68-4bf1-80c0-49ef21008c3b",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [
|
| 9 |
+
{
|
| 10 |
+
"data": {
|
| 11 |
+
"text/plain": [
|
| 12 |
+
"<Popen: returncode: 0 args: ['node', '-e', '\"console.log(Date.now())\"']>"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
"execution_count": 8,
|
| 16 |
+
"metadata": {},
|
| 17 |
+
"output_type": "execute_result"
|
| 18 |
+
}
|
| 19 |
+
],
|
| 20 |
+
"source": [
|
| 21 |
+
"import subprocess\n",
|
| 22 |
+
"\n",
|
| 23 |
+
"x = subprocess.Popen(['node', '-e', '\"console.log(Date.now())\"'])\n",
|
| 24 |
+
"x.wait()\n",
|
| 25 |
+
"x"
|
| 26 |
+
]
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"cell_type": "code",
|
| 30 |
+
"execution_count": 6,
|
| 31 |
+
"id": "746d146b-df64-4f5f-ad81-57a0164e1e07",
|
| 32 |
+
"metadata": {},
|
| 33 |
+
"outputs": [
|
| 34 |
+
{
|
| 35 |
+
"data": {
|
| 36 |
+
"text/plain": [
|
| 37 |
+
"<Popen: returncode: None args: ['node', '-e', '\"console.log(Date.now())\"']>"
|
| 38 |
+
]
|
| 39 |
+
},
|
| 40 |
+
"execution_count": 6,
|
| 41 |
+
"metadata": {},
|
| 42 |
+
"output_type": "execute_result"
|
| 43 |
+
}
|
| 44 |
+
],
|
| 45 |
+
"source": [
|
| 46 |
+
"x."
|
| 47 |
+
]
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"cell_type": "code",
|
| 51 |
+
"execution_count": null,
|
| 52 |
+
"id": "9095532d-84d6-464d-8978-3b161ac90ee4",
|
| 53 |
+
"metadata": {},
|
| 54 |
+
"outputs": [],
|
| 55 |
+
"source": []
|
| 56 |
+
}
|
| 57 |
+
],
|
| 58 |
+
"metadata": {
|
| 59 |
+
"kernelspec": {
|
| 60 |
+
"display_name": "Python 3 (ipykernel)",
|
| 61 |
+
"language": "python",
|
| 62 |
+
"name": "python3"
|
| 63 |
+
},
|
| 64 |
+
"language_info": {
|
| 65 |
+
"codemirror_mode": {
|
| 66 |
+
"name": "ipython",
|
| 67 |
+
"version": 3
|
| 68 |
+
},
|
| 69 |
+
"file_extension": ".py",
|
| 70 |
+
"mimetype": "text/x-python",
|
| 71 |
+
"name": "python",
|
| 72 |
+
"nbconvert_exporter": "python",
|
| 73 |
+
"pygments_lexer": "ipython3",
|
| 74 |
+
"version": "3.12.12"
|
| 75 |
+
}
|
| 76 |
+
},
|
| 77 |
+
"nbformat": 4,
|
| 78 |
+
"nbformat_minor": 5
|
| 79 |
+
}
|
guidetour/.ipynb_checkpoints/testspider-checkpoint.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import scrapy
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class QuotesSpider(scrapy.Spider):
|
| 5 |
+
name = "quotes"
|
| 6 |
+
start_urls = [
|
| 7 |
+
"https://quotes.toscrape.com/tag/humor/",
|
| 8 |
+
]
|
| 9 |
+
|
| 10 |
+
def parse(self, response):
|
| 11 |
+
for quote in response.css("div.quote"):
|
| 12 |
+
yield {
|
| 13 |
+
"author": quote.xpath("span/small/text()").get(),
|
| 14 |
+
"text": quote.css("span.text::text").get(),
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
next_page = response.css('li.next a::attr("href")').get()
|
| 18 |
+
if next_page is not None:
|
| 19 |
+
yield response.follow(next_page, self.parse)
|
guidetour/Untitled.ipynb
ADDED
|
@@ -0,0 +1,79 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"cells": [
|
| 3 |
+
{
|
| 4 |
+
"cell_type": "code",
|
| 5 |
+
"execution_count": 11,
|
| 6 |
+
"id": "0a1873d1-ae68-4bf1-80c0-49ef21008c3b",
|
| 7 |
+
"metadata": {},
|
| 8 |
+
"outputs": [
|
| 9 |
+
{
|
| 10 |
+
"data": {
|
| 11 |
+
"text/plain": [
|
| 12 |
+
"<Popen: returncode: 0 args: ['node', '-e', '\"process.exit(Date.now())\"']>"
|
| 13 |
+
]
|
| 14 |
+
},
|
| 15 |
+
"execution_count": 11,
|
| 16 |
+
"metadata": {},
|
| 17 |
+
"output_type": "execute_result"
|
| 18 |
+
}
|
| 19 |
+
],
|
| 20 |
+
"source": [
|
| 21 |
+
"import subprocess\n",
|
| 22 |
+
"\n",
|
| 23 |
+
"x = subprocess.Popen(['node', '-e', ''])\n",
|
| 24 |
+
"x.wait()\n",
|
| 25 |
+
"x"
|
| 26 |
+
]
|
| 27 |
+
},
|
| 28 |
+
{
|
| 29 |
+
"cell_type": "code",
|
| 30 |
+
"execution_count": 6,
|
| 31 |
+
"id": "746d146b-df64-4f5f-ad81-57a0164e1e07",
|
| 32 |
+
"metadata": {},
|
| 33 |
+
"outputs": [
|
| 34 |
+
{
|
| 35 |
+
"data": {
|
| 36 |
+
"text/plain": [
|
| 37 |
+
"<Popen: returncode: None args: ['node', '-e', '\"console.log(Date.now())\"']>"
|
| 38 |
+
]
|
| 39 |
+
},
|
| 40 |
+
"execution_count": 6,
|
| 41 |
+
"metadata": {},
|
| 42 |
+
"output_type": "execute_result"
|
| 43 |
+
}
|
| 44 |
+
],
|
| 45 |
+
"source": [
|
| 46 |
+
"x."
|
| 47 |
+
]
|
| 48 |
+
},
|
| 49 |
+
{
|
| 50 |
+
"cell_type": "code",
|
| 51 |
+
"execution_count": null,
|
| 52 |
+
"id": "9095532d-84d6-464d-8978-3b161ac90ee4",
|
| 53 |
+
"metadata": {},
|
| 54 |
+
"outputs": [],
|
| 55 |
+
"source": []
|
| 56 |
+
}
|
| 57 |
+
],
|
| 58 |
+
"metadata": {
|
| 59 |
+
"kernelspec": {
|
| 60 |
+
"display_name": "Python 3 (ipykernel)",
|
| 61 |
+
"language": "python",
|
| 62 |
+
"name": "python3"
|
| 63 |
+
},
|
| 64 |
+
"language_info": {
|
| 65 |
+
"codemirror_mode": {
|
| 66 |
+
"name": "ipython",
|
| 67 |
+
"version": 3
|
| 68 |
+
},
|
| 69 |
+
"file_extension": ".py",
|
| 70 |
+
"mimetype": "text/x-python",
|
| 71 |
+
"name": "python",
|
| 72 |
+
"nbconvert_exporter": "python",
|
| 73 |
+
"pygments_lexer": "ipython3",
|
| 74 |
+
"version": "3.12.12"
|
| 75 |
+
}
|
| 76 |
+
},
|
| 77 |
+
"nbformat": 4,
|
| 78 |
+
"nbformat_minor": 5
|
| 79 |
+
}
|
guidetour/testspider.py
ADDED
|
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import scrapy
|
| 2 |
+
|
| 3 |
+
|
| 4 |
+
class QuotesSpider(scrapy.Spider):
|
| 5 |
+
name = "quotes"
|
| 6 |
+
start_urls = [
|
| 7 |
+
"https://quotes.toscrape.com/tag/humor/",
|
| 8 |
+
]
|
| 9 |
+
|
| 10 |
+
def parse(self, response):
|
| 11 |
+
for quote in response.css("div.quote"):
|
| 12 |
+
yield {
|
| 13 |
+
"author": quote.xpath("span/small/text()").get(),
|
| 14 |
+
"text": quote.css("span.text::text").get(),
|
| 15 |
+
}
|
| 16 |
+
|
| 17 |
+
next_page = response.css('li.next a::attr("href")').get()
|
| 18 |
+
if next_page is not None:
|
| 19 |
+
yield response.follow(next_page, self.parse)
|
procedures/crawl_and_scrap_all_polish_law/Taskfile.yml
ADDED
|
File without changes
|
procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/__init__.py
ADDED
|
File without changes
|
procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/items.py
ADDED
|
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Define here the models for your scraped items
|
| 2 |
+
#
|
| 3 |
+
# See documentation in:
|
| 4 |
+
# https://docs.scrapy.org/en/latest/topics/items.html
|
| 5 |
+
|
| 6 |
+
import scrapy
|
| 7 |
+
|
| 8 |
+
|
| 9 |
+
class CrawlAndScrapAllPolishLawItem(scrapy.Item):
|
| 10 |
+
# define the fields for your item here like:
|
| 11 |
+
# name = scrapy.Field()
|
| 12 |
+
pass
|
procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/middlewares.py
ADDED
|
@@ -0,0 +1,100 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Define here the models for your spider middleware
|
| 2 |
+
#
|
| 3 |
+
# See documentation in:
|
| 4 |
+
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
| 5 |
+
|
| 6 |
+
from scrapy import signals
|
| 7 |
+
|
| 8 |
+
# useful for handling different item types with a single interface
|
| 9 |
+
from itemadapter import ItemAdapter
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class CrawlAndScrapAllPolishLawSpiderMiddleware:
|
| 13 |
+
# Not all methods need to be defined. If a method is not defined,
|
| 14 |
+
# scrapy acts as if the spider middleware does not modify the
|
| 15 |
+
# passed objects.
|
| 16 |
+
|
| 17 |
+
@classmethod
|
| 18 |
+
def from_crawler(cls, crawler):
|
| 19 |
+
# This method is used by Scrapy to create your spiders.
|
| 20 |
+
s = cls()
|
| 21 |
+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
| 22 |
+
return s
|
| 23 |
+
|
| 24 |
+
def process_spider_input(self, response, spider):
|
| 25 |
+
# Called for each response that goes through the spider
|
| 26 |
+
# middleware and into the spider.
|
| 27 |
+
|
| 28 |
+
# Should return None or raise an exception.
|
| 29 |
+
return None
|
| 30 |
+
|
| 31 |
+
def process_spider_output(self, response, result, spider):
|
| 32 |
+
# Called with the results returned from the Spider, after
|
| 33 |
+
# it has processed the response.
|
| 34 |
+
|
| 35 |
+
# Must return an iterable of Request, or item objects.
|
| 36 |
+
for i in result:
|
| 37 |
+
yield i
|
| 38 |
+
|
| 39 |
+
def process_spider_exception(self, response, exception, spider):
|
| 40 |
+
# Called when a spider or process_spider_input() method
|
| 41 |
+
# (from other spider middleware) raises an exception.
|
| 42 |
+
|
| 43 |
+
# Should return either None or an iterable of Request or item objects.
|
| 44 |
+
pass
|
| 45 |
+
|
| 46 |
+
async def process_start(self, start):
|
| 47 |
+
# Called with an async iterator over the spider start() method or the
|
| 48 |
+
# maching method of an earlier spider middleware.
|
| 49 |
+
async for item_or_request in start:
|
| 50 |
+
yield item_or_request
|
| 51 |
+
|
| 52 |
+
def spider_opened(self, spider):
|
| 53 |
+
spider.logger.info("Spider opened: %s" % spider.name)
|
| 54 |
+
|
| 55 |
+
|
| 56 |
+
class CrawlAndScrapAllPolishLawDownloaderMiddleware:
|
| 57 |
+
# Not all methods need to be defined. If a method is not defined,
|
| 58 |
+
# scrapy acts as if the downloader middleware does not modify the
|
| 59 |
+
# passed objects.
|
| 60 |
+
|
| 61 |
+
@classmethod
|
| 62 |
+
def from_crawler(cls, crawler):
|
| 63 |
+
# This method is used by Scrapy to create your spiders.
|
| 64 |
+
s = cls()
|
| 65 |
+
crawler.signals.connect(s.spider_opened, signal=signals.spider_opened)
|
| 66 |
+
return s
|
| 67 |
+
|
| 68 |
+
def process_request(self, request, spider):
|
| 69 |
+
# Called for each request that goes through the downloader
|
| 70 |
+
# middleware.
|
| 71 |
+
|
| 72 |
+
# Must either:
|
| 73 |
+
# - return None: continue processing this request
|
| 74 |
+
# - or return a Response object
|
| 75 |
+
# - or return a Request object
|
| 76 |
+
# - or raise IgnoreRequest: process_exception() methods of
|
| 77 |
+
# installed downloader middleware will be called
|
| 78 |
+
return None
|
| 79 |
+
|
| 80 |
+
def process_response(self, request, response, spider):
|
| 81 |
+
# Called with the response returned from the downloader.
|
| 82 |
+
|
| 83 |
+
# Must either;
|
| 84 |
+
# - return a Response object
|
| 85 |
+
# - return a Request object
|
| 86 |
+
# - or raise IgnoreRequest
|
| 87 |
+
return response
|
| 88 |
+
|
| 89 |
+
def process_exception(self, request, exception, spider):
|
| 90 |
+
# Called when a download handler or a process_request()
|
| 91 |
+
# (from other downloader middleware) raises an exception.
|
| 92 |
+
|
| 93 |
+
# Must either:
|
| 94 |
+
# - return None: continue processing this exception
|
| 95 |
+
# - return a Response object: stops process_exception() chain
|
| 96 |
+
# - return a Request object: stops process_exception() chain
|
| 97 |
+
pass
|
| 98 |
+
|
| 99 |
+
def spider_opened(self, spider):
|
| 100 |
+
spider.logger.info("Spider opened: %s" % spider.name)
|
procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/pipelines.py
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Define your item pipelines here
|
| 2 |
+
#
|
| 3 |
+
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
|
| 4 |
+
# See: https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
| 5 |
+
|
| 6 |
+
|
| 7 |
+
# useful for handling different item types with a single interface
|
| 8 |
+
from itemadapter import ItemAdapter
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class CrawlAndScrapAllPolishLawPipeline:
|
| 12 |
+
def process_item(self, item, spider):
|
| 13 |
+
return item
|
procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/settings.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Scrapy settings for crawl_and_scrap_all_polish_law project
|
| 2 |
+
#
|
| 3 |
+
# For simplicity, this file contains only settings considered important or
|
| 4 |
+
# commonly used. You can find more settings consulting the documentation:
|
| 5 |
+
#
|
| 6 |
+
# https://docs.scrapy.org/en/latest/topics/settings.html
|
| 7 |
+
# https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
| 8 |
+
# https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
| 9 |
+
|
| 10 |
+
BOT_NAME = "crawl_and_scrap_all_polish_law"
|
| 11 |
+
|
| 12 |
+
SPIDER_MODULES = ["crawl_and_scrap_all_polish_law.spiders"]
|
| 13 |
+
NEWSPIDER_MODULE = "crawl_and_scrap_all_polish_law.spiders"
|
| 14 |
+
|
| 15 |
+
ADDONS = {}
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# Crawl responsibly by identifying yourself (and your website) on the user-agent
|
| 19 |
+
#USER_AGENT = "crawl_and_scrap_all_polish_law (+http://www.yourdomain.com)"
|
| 20 |
+
|
| 21 |
+
# Obey robots.txt rules
|
| 22 |
+
ROBOTSTXT_OBEY = True
|
| 23 |
+
|
| 24 |
+
# Concurrency and throttling settings
|
| 25 |
+
#CONCURRENT_REQUESTS = 16
|
| 26 |
+
CONCURRENT_REQUESTS_PER_DOMAIN = 1
|
| 27 |
+
DOWNLOAD_DELAY = 1
|
| 28 |
+
|
| 29 |
+
# Disable cookies (enabled by default)
|
| 30 |
+
#COOKIES_ENABLED = False
|
| 31 |
+
|
| 32 |
+
# Disable Telnet Console (enabled by default)
|
| 33 |
+
#TELNETCONSOLE_ENABLED = False
|
| 34 |
+
|
| 35 |
+
# Override the default request headers:
|
| 36 |
+
#DEFAULT_REQUEST_HEADERS = {
|
| 37 |
+
# "Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8",
|
| 38 |
+
# "Accept-Language": "en",
|
| 39 |
+
#}
|
| 40 |
+
|
| 41 |
+
# Enable or disable spider middlewares
|
| 42 |
+
# See https://docs.scrapy.org/en/latest/topics/spider-middleware.html
|
| 43 |
+
#SPIDER_MIDDLEWARES = {
|
| 44 |
+
# "crawl_and_scrap_all_polish_law.middlewares.CrawlAndScrapAllPolishLawSpiderMiddleware": 543,
|
| 45 |
+
#}
|
| 46 |
+
|
| 47 |
+
# Enable or disable downloader middlewares
|
| 48 |
+
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html
|
| 49 |
+
#DOWNLOADER_MIDDLEWARES = {
|
| 50 |
+
# "crawl_and_scrap_all_polish_law.middlewares.CrawlAndScrapAllPolishLawDownloaderMiddleware": 543,
|
| 51 |
+
#}
|
| 52 |
+
|
| 53 |
+
# Enable or disable extensions
|
| 54 |
+
# See https://docs.scrapy.org/en/latest/topics/extensions.html
|
| 55 |
+
#EXTENSIONS = {
|
| 56 |
+
# "scrapy.extensions.telnet.TelnetConsole": None,
|
| 57 |
+
#}
|
| 58 |
+
|
| 59 |
+
# Configure item pipelines
|
| 60 |
+
# See https://docs.scrapy.org/en/latest/topics/item-pipeline.html
|
| 61 |
+
#ITEM_PIPELINES = {
|
| 62 |
+
# "crawl_and_scrap_all_polish_law.pipelines.CrawlAndScrapAllPolishLawPipeline": 300,
|
| 63 |
+
#}
|
| 64 |
+
|
| 65 |
+
# Enable and configure the AutoThrottle extension (disabled by default)
|
| 66 |
+
# See https://docs.scrapy.org/en/latest/topics/autothrottle.html
|
| 67 |
+
#AUTOTHROTTLE_ENABLED = True
|
| 68 |
+
# The initial download delay
|
| 69 |
+
#AUTOTHROTTLE_START_DELAY = 5
|
| 70 |
+
# The maximum download delay to be set in case of high latencies
|
| 71 |
+
#AUTOTHROTTLE_MAX_DELAY = 60
|
| 72 |
+
# The average number of requests Scrapy should be sending in parallel to
|
| 73 |
+
# each remote server
|
| 74 |
+
#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0
|
| 75 |
+
# Enable showing throttling stats for every response received:
|
| 76 |
+
#AUTOTHROTTLE_DEBUG = False
|
| 77 |
+
|
| 78 |
+
# Enable and configure HTTP caching (disabled by default)
|
| 79 |
+
# See https://docs.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings
|
| 80 |
+
#HTTPCACHE_ENABLED = True
|
| 81 |
+
#HTTPCACHE_EXPIRATION_SECS = 0
|
| 82 |
+
#HTTPCACHE_DIR = "httpcache"
|
| 83 |
+
#HTTPCACHE_IGNORE_HTTP_CODES = []
|
| 84 |
+
#HTTPCACHE_STORAGE = "scrapy.extensions.httpcache.FilesystemCacheStorage"
|
| 85 |
+
|
| 86 |
+
# Set settings whose default value is deprecated to a future-proof value
|
| 87 |
+
FEED_EXPORT_ENCODING = "utf-8"
|
procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/spiders/__init__.py
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# This package will contain the spiders of your Scrapy project
|
| 2 |
+
#
|
| 3 |
+
# Please refer to the documentation for information on how to create and manage
|
| 4 |
+
# your spiders.
|
procedures/crawl_and_scrap_all_polish_law/crawl_and_scrap_all_polish_law/spiders/kontnikLinkowaty.py
ADDED
|
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
class GrabAllLinksFromGovSites(scrapy.Spider):
|
| 2 |
+
name = "books"
|
| 3 |
+
allowed_domains = ["toscrape.com"]
|
| 4 |
+
url: str = "https://books.toscrape.com/catalogue/category/books/fantasy_19/index.html"
|
| 5 |
+
|
| 6 |
+
async def start(self):
|
| 7 |
+
yield scrapy.Request(self.url, callback=self.parse_listpage)
|
| 8 |
+
|
| 9 |
+
async def parse_listpage(self, response):
|
| 10 |
+
product_urls = response.css("article.product_pod h3 a::attr(href)").getall()
|
| 11 |
+
for url in product_urls:
|
| 12 |
+
yield response.follow(url, callback=self.parse_book)
|
| 13 |
+
|
| 14 |
+
next_page_url = response.css("li.next a::attr(href)").get()
|
| 15 |
+
if next_page_url:
|
| 16 |
+
yield response.follow(next_page_url, callback=self.parse_listpage)
|
| 17 |
+
|
| 18 |
+
async def parse_book(self, response):
|
| 19 |
+
yield {
|
| 20 |
+
"name": response.css("h1::text").get(),
|
| 21 |
+
"price": response.css("p.price_color::text").get(),
|
| 22 |
+
"url": response.url
|
| 23 |
+
}
|
procedures/crawl_and_scrap_all_polish_law/scrapy.cfg
ADDED
|
@@ -0,0 +1,11 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Automatically created by: scrapy startproject
|
| 2 |
+
#
|
| 3 |
+
# For more information about the [deploy] section see:
|
| 4 |
+
# https://scrapyd.readthedocs.io/en/latest/deploy.html
|
| 5 |
+
|
| 6 |
+
[settings]
|
| 7 |
+
default = crawl_and_scrap_all_polish_law.settings
|
| 8 |
+
|
| 9 |
+
[deploy]
|
| 10 |
+
#url = http://localhost:6800/
|
| 11 |
+
project = crawl_and_scrap_all_polish_law
|
pylock.toml
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
pyproject.toml
CHANGED
|
@@ -5,5 +5,10 @@ description = "Add your description here"
|
|
| 5 |
readme = "README.md"
|
| 6 |
requires-python = ">=3.12"
|
| 7 |
dependencies = [
|
|
|
|
| 8 |
"huggingface-hub>=1.2.1",
|
|
|
|
|
|
|
|
|
|
|
|
|
| 9 |
]
|
|
|
|
| 5 |
readme = "README.md"
|
| 6 |
requires-python = ">=3.12"
|
| 7 |
dependencies = [
|
| 8 |
+
"chromadb>=1.3.5",
|
| 9 |
"huggingface-hub>=1.2.1",
|
| 10 |
+
"notebook>=7.5.0",
|
| 11 |
+
"pytoml>=0.1.21",
|
| 12 |
+
"pyyaml>=6.0.3",
|
| 13 |
+
"scrapy>=2.13.4",
|
| 14 |
]
|
reqirements.txt
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
resources/links_from_dotgov_sites/.omnis.toml
ADDED
|
File without changes
|
resources/markdowned_documents/.omnis.toml
ADDED
|
File without changes
|
resources/orginal_data_checksum_bank/.omnis.toml
ADDED
|
File without changes
|
uv.lock
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|