repo_name stringclasses 400 values | branch_name stringclasses 4 values | file_content stringlengths 16 72.5k | language stringclasses 1 value | num_lines int64 1 1.66k | avg_line_length float64 6 85 | max_line_length int64 9 949 | path stringlengths 5 103 | alphanum_fraction float64 0.29 0.89 | alpha_fraction float64 0.27 0.89 |
|---|---|---|---|---|---|---|---|---|---|
mrpal39/ev_code | refs/heads/master | from datetime import datetime
from scrapy.spiders import SitemapSpider
class FilteredSitemapSpider(SitemapSpider):
name = 'filtered_sitemap_spider'
allowed_domains = ['example.com']
sitemap_urls = ['http://example.com/sitemap.xml']
def sitemap_filter(self, entries):
for entry in entries:
date_time = datetime.strptime(entry['lastmod'], '%Y-%m-%d')
if date_time.year >= 2005:
yield entry | Python | 13 | 33.923077 | 71 | /scrap/tutorial/scrap/spiders/SitemapSpider.py | 0.653422 | 0.644592 |
mrpal39/ev_code | refs/heads/master | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.8/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import include, url
from django.contrib import admin
from mysite.views import custom_login, custom_register
from django.contrib.auth.views import logout
import scrapyproject.urls as projecturls
urlpatterns = [
url(r'^admin/', include(admin.site.urls)),
url(r'^accounts/login/$', custom_login, name='login'),
url(r'^accounts/register/$', custom_register, name='registration_register'),
url(r'^accounts/logout/$', logout, {'next_page': '/project'}, name='logout'),
url(r'^project/', include(projecturls)),
]
| Python | 27 | 40.037037 | 81 | /Web-UI/mysite/urls.py | 0.704874 | 0.698556 |
mrpal39/ev_code | refs/heads/master | from django.contrib.sitemaps import Sitemap
from . models import Post
class PostSitemap(Sitemap):
changefreq='weekly' # You create a custom sitemap by inheriting the Sitemap class of the sitemaps
priority = 0.9 # module. The changefreq and priority attributes indicate the change frequency
# of your post pages and their relevance in your website (the maximum value is 1 ).
def items(self):
return Post.published.all()
def lastmod(self,obj):
return obj.updated
| Python | 17 | 29.235294 | 103 | /awssam/fullfeblog/blog/sitemaps.py | 0.699809 | 0.694073 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
import re
import json
import scrapy
import copy
from articles.items import PmArticlesItem
from articles.utils.common import date_convert
class PmSpiderSpider(scrapy.Spider):
name = 'pm_spider'
allowed_domains = ['woshipm.com']
# start_urls = ['http://www.woshipm.com/__api/v1/stream-list/page/1']
base_url = 'http://www.woshipm.com/__api/v1/stream-list/page/{}'
def start_requests(self):
for i in range(1, 10):
url = self.base_url.format(i)
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
item = PmArticlesItem()
# print(response.text)
data_set = json.loads(response.text)
# print(datas.get('payload'))
if data_set:
for data in data_set.get('payload'):
# print(data)
item["title"] = data.get("title", '')
item["create_date"] = date_convert(data.get("date", ''))
item["url"] = data.get("permalink", '')
# item["content"] = data.get("snipper", '').replace('\n', '').replace('\r', '')
item["view"] = data.get("view", '')
item["tag"] = re.search(r'tag">(.*?)<', data.get("category", '')).group(1)
item["url_id"] = data.get('id', '')
# print(item)
yield scrapy.Request(url=item["url"], callback=self.parse_detail, meta=copy.deepcopy({'item': item}))
def parse_detail(self, response):
item = response.meta['item']
content = response.xpath("//div[@class='grap']//text()").re(r'\S+')
item["content"] = ''.join(content)
# print(item)
yield item
| Python | 44 | 37.795456 | 117 | /eswork/articles/articles/spiders/pm_spider.py | 0.546838 | 0.542155 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
import scrapy
from properties.items import PropertiesItem
class BasicSpider(scrapy.Spider):
name = 'basic'
allowed_domains = ['web']
start_urls = (
# 'http://web:9312/properties/property_000000.html',
# 'https://www.coreapi.org/#examples',
# 'https://www.freecodecamp.org/news/git-ssh-how-to',
'https://djangopackages.org',
)
# start_urls = ['https://django-dynamic-scraper.readthedocs.io/en/latest/getting_started.html',]
def parse(self, response):
l.add_xpath('title', '//*[@itemprop="name"][1]/text()',
MapCompose(unicode.strip, unicode.title))
l.add_xpath('price', './/*[@itemprop="price"][1]/text()',
MapCompose(lambda i: i.replace(',', ''), float),
re='[,.0-9]+')
l.add_xpath('description', '//*[@itemprop="description"]'
'[1]/text()', MapCompose(unicode.strip), Join())
l.add_xpath('address',
'//*[@itemtype="http://schema.org/Place"][1]/text()',
MapCompose(unicode.strip))
l.add_xpath('image_urls', '//*[@itemprop="image"][1]/@src',
MapCompose(
lambda i: urlparse.urljoin(response.url, i)))
# l.add_xpath('title', '//*[@itemprop="name"][1]/text()')
# l.add_xpath('price', './/*[@itemprop="price"]'
# '[1]/text()', re='[,.0-9]+')
# l.add_xpath('description', '//*[@itemprop="description"]'
# '[1]/text()')
# l.add_xpath('address', '//*[@itemtype='
# '"http://schema.org/Place"][1]/text()')
# l.add_xpath('image_urls', '//*[@itemprop="image"][1]/@src')
return l.load_item()
# item = PropertiesItem()
# item['title'] = response.xpath(
# '//*[@id="myrotatingnav"]/div/div[1]').extract()
# # item['price'] = response.xpath(
# # '//*[@itemprop="price"][1]/text()').re('[.0-9]+')
# item['description'] = response.xpath(
# '//*[@id="myrotatingnav"]/div/div[1]/a[1]').extract()
# # item['address'] = response.xpath(
# # '//*[@itemtype="http://schema.org/'
# # 'Place"][1]/text()').extract()
# # item['image_urls'] = response.xpath(
# # '//*[@itemprop="image"][1]/@src').extract()
# return item
# self.log("title: %s" % response.xpath(
# '//*[@itemprop="name"][1]/text()').extract())
# self.log("price: %s" % response.xpath(
# '//*[@itemprop="price"][1]/text()').re('[.0-9]+'))
# self.log("description: %s" % response.xpath(
# '//*[@itemprop="description"][1]/text()').extract())
# self.log("address: %s" % response.xpath(
# '//*[@itemtype="http://schema.org/'
# 'Place"][1]/text()').extract())
# self.log("image_urls: %s" % response.xpath(
# '//*[@itemprop="image"][1]/@src').extract())
| Python | 71 | 39.985916 | 100 | /scrap/properties/properties/spiders/basic.py | 0.5029 | 0.489253 |
mrpal39/ev_code | refs/heads/master | # from core.models import Item
from django.shortcuts import render
# from django.views.generic import ListView,DetailView
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Post
from django.views.generic import (
ListView,
DetailView,
# CreateView,
# UpdateView,
# DeleteView
)
from django.core.mail import send_mail
from .forms import EmailPostForm
from core.models import Comment
from .forms import EmailPostForm, CommentForm , SearchForm
from taggit.models import Tag
from django.db.models import Count
from django.contrib.postgres.search import SearchVector #Building a search view veter
def post_search(request):
form= SearchForm()
query=None
results=[]
if 'query' in request.GET:
form=SearchForm(request.GET)
if form.is_valid():
query=form.cleaned_data['query']
results=Post.published.annotate(
search =SearchVector('title','body'),
).filter(search=query)
return render(request,'search.html',{
'form':form,
'query':query,
'results':results
})
def post_share(request, post_id):
# Retrieve post by id
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
# Form was submitted
form = EmailPostForm(request.POST)
if form.is_valid():
# Form fields passed validation
cd = form.cleaned_data
# ... send email
post_url = request.build_absolute_uri(
post.get_absolute_url())
subject = f"{cd['name']} recommends you read "f"{post.title}"
message = f"Read {post.title} at {post_url}\n\n" f"{cd['name']}\'s comments: {cd['comments']}"
send_mail(subject, message, 'rp9545416@gmail.com',[cd['to']])
sent = True
else:
form=EmailPostForm()
return render(request, 'share.html', {'post': post,
'form': form,
'sent': sent})
class PostDetailView(DetailView):
model = Post
class PostListView(ListView):
queryset=Post.published.all()
context_object_name='posts'
paginate_by=2
template_name='list.html'
def post_list(request , tag_slug=None):
object_list=Post.published.all()
tag=None
if tag_slug:
tag=get_object_or_404(Tag,slug=tag_slug)
object_list=object_list.filter(tags__in=[tag])
paginator=Paginator(object_list, 2) # 3 posts in each page
page=request.GET.get('page')
try:
posts=paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
posts=paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
posts=paginator.page(paginator.num_pages)
return render(request,
'list.html',
{'posts': posts,
'page': page,
'tag': tag})
def post_detail(request, year, month, day, post):
post=get_object_or_404(Post, slug = post,
status = 'published',
publish__year = year,
publish__month = month,
publish__day = day)
comments=post.comments.filter(active=True)
new_comment=None
# List of similar posts
post_tags_ids = post.tags.values_list('id', flat=True)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts=similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
if request.method== 'POST':
#comment aas passed
comment_form=CommentForm(data=request.POST)
if comment_form.is_valid():
#new coment object
new_comment=comment_form.save(comment=False)
new_comment.post
new_comment.save()
else:
comment_form=CommentForm()
return render(request,
'blog/post_detail.html',
{'post': post,
'comments': comments,
'new_comment': new_comment,
'comment_form': comment_form,
'similar_posts': similar_posts})
def home(request):
return render(request, 'base.html')
def about(request):
return render(request, 'about.html')
# def product(request):
# return render (request ,'product.html' )
# class ItemdDetailView(DetailView):
# model=Item
# template_name="product.html"
# def checkout(request):
# return render (request ,'checkout.html')
| Python | 164 | 28.323172 | 103 | /awssam/fullfeblog/blog/views.py | 0.590559 | 0.587024 |
mrpal39/ev_code | refs/heads/master | import scrapy
class PySpider(scrapy.Spider):
name = 'quots'
# start_urls = [
def start_requests(self):
urls=['https://pypi.org/']
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
# return super().start_requests()()
def parse(self, response):
page=response.url.split("/")[-0]
response.xpath('/html/body/main/div[4]/div/text()').get()
filename=f'pyp-{page}.html'
with open (filename,'wb')as f:
f.write(response.body)
self.log(f'saved file{filename}')
# return super().parse(response) | Python | 30 | 20 | 65 | /scrap/tutorial/scrap/spiders/spider.py | 0.562401 | 0.559242 |
mrpal39/ev_code | refs/heads/master |
from django.shortcuts import render, get_object_or_404
from django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin
from django.contrib.auth.models import User
from django.views.generic import (
ListView,
DetailView,
CreateView,
UpdateView,
DeleteView
)
from .models import Post, Products,MyModel,feeds
def home(request):
context={
'posts':Post.objects.all()
}
return render (request,'blog/home.html',context)
class PostListView(ListView):
model = Post
template_name ='blog/home.html' # <app>/<model>_<viewtype>.html
context_object_name ='posts'
ordering = ['-date_posted']
paginate_by = 5
class UserPostListView(ListView):
model = Post
template_name = 'blog/user_posts.html' # <app>/<model>_<viewtype>.html
context_object_name = 'posts'
paginate_by = 5
def get_queryset(self):
user = get_object_or_404(User, username=self.kwargs.get('username'))
return Post.objects.filter(author=user).order_by('-date_posted')
class PostDetailView(DetailView):
model=Post
template_name = 'blog/post_detail.html'
class PostCreateView(LoginRequiredMixin, CreateView):
model = Post
fields = ['title', 'content','description']
template_name = 'blog/post_form.html' # <app>/<model>_<viewtype>.html
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PostUpdateView(LoginRequiredMixin,UserPassesTestMixin,UpdateView):
model=Post
fields=['title','content','description']
template_name='blog/post_form.html'
def form_valid(self, form):
form.instance.author=self.request.user
return super().form_valid(form)
def test_func(self):
post =self.get_object()
if self.request.user==post.author:
return True
return False
class PostDeleteView(LoginRequiredMixin,UserPassesTestMixin,DeleteView):
model=Post
success_url='/'
template_name = 'blog/post_confirm_delete.html'
def test_func(self):
post =self.get_object()
if self.request.user==post.author:
return True
return False
def index(request):
fore=Products.objects.all()
feed=feeds.objects.all()
context={
'fore':fore,
'feed':feed
}
return render(request, 'index.html',context)
def about(request):
return render(request, 'about.html')
def product(request):
form =productForm(request.POST)
if form.is_valid():
form.save()
form =productForm()
context={
'form':form
}
return render(request, 'product.html',context)
def contact(request):
feed=feeds.objects.all()
return render(request, "contact.html",{'feed':feed}) | Python | 137 | 18.467154 | 78 | /awssam/ideablog/core/views.py | 0.691789 | 0.688789 |
mrpal39/ev_code | refs/heads/master | from fpdf import FPDF
from PIL import Image
import you
import os
pdf = FPDF ()
imagelist = [] # Contains the list of all images to be converted to PDF.
# --------------- USER INPUT -------------------- #
folder = "/home/rudi/Documents/Pictures/1.png" # Folder containing all the images.
name = "pdf" # Name of the output PDF file.
# ------------- ADD ALL THE IMAGES IN A LIST ------------- #
for dirpath , dirnames , filenames in os . walk ( folder ):
for filename in [ f for f in filenames if f . endswith ( ".jpg" )]:
full_path = os . path . join ( dirpath , filename )
imagelist . append ( full_path )
imagelist . sort () # Sort the images by name.
for i in range ( 0 , len ( imagelist )):
print ( imagelist [ i ])
# --------------- ROTATE ANY LANDSCAPE MODE IMAGE IF PRESENT ----------------- #
for i in range ( 0 , len ( imagelist )):
im1 = Image . open ( imagelist [ i ]) # Open the image.
width , height = im1 . size # Get the width and height of that image.
if width > height :
im2 = im1 . transpose ( Image . ROTATE_270 ) # If width > height, rotate the image.
os . remove ( imagelist [ i ]) # Delete the previous image.
im2 . save ( imagelist [ i ]) # Save the rotated image.
# im.save
print ( " \n Found " + str ( len ( imagelist )) + " image files. Converting to PDF.... \n " )
# -------------- CONVERT TO PDF ------------ #
for image in imagelist :
pdf . add_page ()
pdf . image ( image , 0 , 0 , 210 , 297 ) # 210 and 297 are the dimensions of an A4 size sheet.
pdf . output ( folder + name , "F" ) # Save the PDF.
print ( "PDF generated successfully!" ) | Python | 48 | 44.020832 | 137 | /myapi/devfile/gitapi/jp.py | 0.441462 | 0.429431 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
from pymongo import MongoClient
from scrapy import log
import traceback
from scrapy.exceptions import DropItem
class SingleMongodbPipeline(object):
MONGODB_SERVER = "101.200.46.191"
MONGODB_PORT = 27017
MONGODB_DB = "zufang_fs"
def __init__(self):
#初始化mongodb连接
try:
client = MongoClient(self.MONGODB_SERVER, self.MONGODB_PORT)
self.db = client[self.MONGODB_DB]
except Exception as e:
traceback.print_exc()
@classmethod
def from_crawler(cls, crawler):
cls.MONGODB_SERVER = crawler.settings.get('SingleMONGODB_SERVER', '101.200.46.191')
cls.MONGODB_PORT = crawler.settings.getint('SingleMONGODB_PORT', 27017)
cls.MONGODB_DB = crawler.settings.get('SingleMONGODB_DB', 'zufang_fs')
pipe = cls()
pipe.crawler = crawler
return pipe
def process_item(self, item, spider):
if item['pub_time'] == 0:
raise DropItem("Duplicate item found: %s" % item)
if item['method'] == 0:
raise DropItem("Duplicate item found: %s" % item)
if item['community']==0:
raise DropItem("Duplicate item found: %s" % item)
if item['money']==0:
raise DropItem("Duplicate item found: %s" % item)
if item['area'] == 0:
raise DropItem("Duplicate item found: %s" % item)
if item['city'] == 0:
raise DropItem("Duplicate item found: %s" % item)
# if item['phone'] == 0:
# raise DropItem("Duplicate item found: %s" % item)
# if item['img1'] == 0:
# raise DropItem("Duplicate item found: %s" % item)
# if item['img2'] == 0:
# raise DropItem("Duplicate item found: %s" % item)
zufang_detail = {
'title': item.get('title'),
'money': item.get('money'),
'method': item.get('method'),
'area': item.get('area', ''),
'community': item.get('community', ''),
'targeturl': item.get('targeturl'),
'pub_time': item.get('pub_time', ''),
'city':item.get('city',''),
'phone':item.get('phone',''),
'img1':item.get('img1',''),
'img2':item.get('img2',''),
}
result = self.db['zufang_detail'].insert(zufang_detail)
print '[success] the '+item['targeturl']+'wrote to MongoDB database'
return item | Python | 63 | 37.84127 | 89 | /tc_zufang/tc_zufang-slave/tc_zufang/mongodb_pipeline.py | 0.551104 | 0.533115 |
mrpal39/ev_code | refs/heads/master | from django.db import models
from django.contrib.auth.models import User
class Project(models.Model):
project_name = models.CharField(max_length=50)
user = models.ForeignKey(User)
link_generator = models.TextField(blank=True)
scraper_function = models.TextField(blank=True)
settings_scraper = models.TextField(blank=True)
settings_link_generator = models.TextField(blank=True)
def __str__(self):
return "%s by %s" % (self.project_name, self.user.username)
class Item(models.Model):
item_name = models.CharField(max_length=50)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
def __str__(self):
return self.item_name
class Field(models.Model):
field_name = models.CharField(max_length=50)
item = models.ForeignKey(Item, on_delete=models.CASCADE)
def __str__(self):
return self.field_name
class Pipeline(models.Model):
pipeline_name = models.CharField(max_length=50)
pipeline_order = models.IntegerField()
pipeline_function = models.TextField(blank=True)
project = models.ForeignKey(Project, on_delete=models.CASCADE)
def __str__(self):
return self.pipeline_name
class LinkgenDeploy(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
success = models.BooleanField(blank=False)
date = models.DateTimeField(auto_now_add=True)
version = models.IntegerField(blank=False, default=0)
class ScrapersDeploy(models.Model):
project = models.ForeignKey(Project, on_delete=models.CASCADE)
success = models.TextField(blank=True)
date = models.DateTimeField(auto_now_add=True)
version = models.IntegerField(blank=False, default=0)
class Dataset(models.Model):
user = models.ForeignKey(User)
database = models.CharField(max_length=50) | Python | 59 | 29.864407 | 67 | /Web-UI/scrapyproject/models.py | 0.715934 | 0.709341 |
mrpal39/ev_code | refs/heads/master |
from haystack import indexes
from django . conf import settings
from .models import Article ,Category ,Tag
class ArticleIndex ( indexes . SearchIndex , indexes . Indexable ):
text = indexes . CharField ( document = True , use_template = True )
def get_model ( self ):
return Article
def index_queryset ( self , using = None ):
return self . get_model (). objects . filter ( status = 'p' ) | Python | 13 | 33.46154 | 76 | /myapi/fullfeblog/blog/search_indexes.py | 0.623608 | 0.623608 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import redis
import scrapy
import datetime
from scrapy.loader.processors import MapCompose
from articles.model.es_types import ArticleType
from elasticsearch_dsl.connections import connections
es = connections.create_connection(ArticleType._doc_type.using)
redis_cli = redis.StrictRedis()
def gen_suggests(index, info_tuple):
# 根据字符串生成搜索建议数组
used_words = set()
suggests = []
for text, weight in info_tuple:
if text:
# 调用es的analyze接口分析字符串
words = es.indices.analyze(index=index, analyzer="ik_max_word", params={'filter': ["lowercase"]}, body=text)
anylyzed_words = set([r["token"] for r in words["tokens"] if len(r["token"]) > 1])
new_words = anylyzed_words - used_words
else:
new_words = set()
if new_words:
suggests.append({"input": list(new_words), "weight": weight})
return suggests
class PmArticlesItem(scrapy.Item):
# define the fields for your item here like:
title = scrapy.Field()
create_date = scrapy.Field()
url = scrapy.Field()
content = scrapy.Field()
view = scrapy.Field()
tag = scrapy.Field()
url_id = scrapy.Field()
def save_to_es(self):
article = ArticleType()
article.title = self['title']
article.create_date = self["create_date"]
article.content = self["content"]
article.url = self["url"]
article.view = self["view"]
article.tag = self["tag"]
article.meta.id = self["url_id"]
article.suggest = gen_suggests(ArticleType._doc_type.index, ((article.title, 10), (article.tag, 7)))
article.save()
redis_cli.incr("pm_count") # redis存储爬虫数量
return
| Python | 65 | 27.861538 | 120 | /eswork/articles/articles/items.py | 0.630261 | 0.627597 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
import redis
redis_cli = redis.StrictRedis()
redis_cli.incr("pm_count")
| Python | 6 | 15.333333 | 31 | /eswork/lcvsearch/test.py | 0.646465 | 0.636364 |
mrpal39/ev_code | refs/heads/master |
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
SECRET_KEY = 'f!7k7a9k10)fbx7#@y@u9u@v3%b)f%h6xxnxf71(21z1uj^#+e'
DEBUG = True
ALLOWED_HOSTS = []
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'users',
# 'oauth2_provider',
# 'oauth2_provider',
'corsheaders',
'django.contrib.sites.apps.SitesConfig',
'django.contrib.humanize.apps.HumanizeConfig',
'django_nyt.apps.DjangoNytConfig',
'mptt',
'sekizai',
'sorl.thumbnail',
'wiki.apps.WikiConfig',
'wiki.plugins.attachments.apps.AttachmentsConfig',
'wiki.plugins.notifications.apps.NotificationsConfig',
'wiki.plugins.images.apps.ImagesConfig',
'wiki.plugins.macros.apps.MacrosConfig',
]
# AUTHENTICATION_BACKENDS = (
# 'oauth2_provider.backends.OAuth2Backend',
# # Uncomment following if you want to access the admin
# #'django.contrib.auth.backends.ModelBackend'
# )
MIDDLEWARE = [
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'oauth2_provider.middleware.OAuth2TokenMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'iam.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.contrib.auth.context_processors.auth',
'django.template.context_processors.debug',
'django.template.context_processors.i18n',
'django.template.context_processors.media',
'django.template.context_processors.request',
'django.template.context_processors.static',
'django.template.context_processors.tz',
'django.contrib.messages.context_processors.messages',
"sekizai.context_processors.sekizai",
],
},
},
]
WSGI_APPLICATION = 'iam.wsgi.application'
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/2.2/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
SITE_ID = 1
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/2.2/howto/static-files/
STATIC_URL = '/static/'
AUTH_USER_MODEL='users.User'
LOGIN_URL='/admin/login/'
CORS_ORIGIN_ALLOW_ALL = True
WIKI_ACCOUNT_HANDLING = True
WIKI_ACCOUNT_SIGNUP_ALLOWED = True
# export ID =vW1RcAl7Mb0d5gyHNQIAcH110lWoOW2BmWJIero8
# export SECRET=DZFpuNjRdt5xUEzxXovAp40bU3lQvoMvF3awEStn61RXWE0Ses4RgzHWKJKTvUCHfRkhcBi3ebsEfSjfEO96vo2Sh6pZlxJ6f7KcUbhvqMMPoVxRwv4vfdWEoWMGPeIO
# # | Python | 129 | 26.496124 | 91 | /awssam/iam/iam/settings.py | 0.664694 | 0.658777 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
try:
import pika
except ImportError:
raise ImportError("Please install pika before running scrapy-rabbitmq.")
RABBITMQ_CONNECTION_TYPE = 'blocking'
RABBITMQ_CONNECTION_PARAMETERS = {'host': 'localhost'}
def from_settings(settings, spider_name):
connection_type = settings.get('RABBITMQ_CONNECTION_TYPE',
RABBITMQ_CONNECTION_TYPE)
queue_name = "%s:requests" % spider_name
connection_host = settings.get('RABBITMQ_HOST')
connection_port = settings.get('RABBITMQ_PORT')
connection_username = settings.get('RABBITMQ_USERNAME')
connection_pass = settings.get('RABBITMQ_PASSWORD')
connection_attempts = 5
retry_delay = 3
credentials = pika.PlainCredentials(connection_username, connection_pass)
connection = {
'blocking': pika.BlockingConnection,
'libev': pika.LibevConnection,
'select': pika.SelectConnection,
'tornado': pika.TornadoConnection,
'twisted': pika.TwistedConnection
}[connection_type](pika.ConnectionParameters(host=connection_host,
port=connection_port, virtual_host='/',
credentials=credentials,
connection_attempts=connection_attempts,
retry_delay=retry_delay))
channel = connection.channel()
channel.queue_declare(queue=queue_name, durable=True)
return channel
def close(channel):
channel.close()
| Python | 47 | 30.510639 | 77 | /Web-UI/scrapyproject/scrapy_packages/rabbitmq/connection.py | 0.655638 | 0.653612 |
mrpal39/ev_code | refs/heads/master | from django.conf.urls import url
from . import views
urlpatterns = [
url('api/', views.apiurl, name='index'),
] | Python | 7 | 15.857142 | 44 | /march19/devfile/api/urls.py | 0.675214 | 0.675214 |
mrpal39/ev_code | refs/heads/master | from django.http.response import HttpResponse
from requests_oauthlib import OAuth2Session
import json
import requests_oauthlib
from django.HttpResponse import request
import requests
from django.shortcuts import redirect, session,
# payload={'key1':'search?q=','key2':['form','&api_key=306cf1684a42e4be5ec0a1c60362c2ef']}
# client_id = '&api_key=306cf1684a42e4be5ec0a1c60362c2ef'
client_id = "<your client key>"
client_secret = "<your client secret>"
authorization_base_url = 'https://github.com/login/oauth/authorize'
token_url = 'https://github.com/login/oauth/access_token'
@app.route("/login")
def login():
github = OAuth2Session(client_id)
authorization_url, state = github.authorization_url(authorization_base_url)
# State is used to prevent CSRF, keep this for later.
session['oauth_state'] = state
return redirect(authorization_url)
@app.route("/callback")
def callback():
github = OAuth2Session(client_id, state=session['oauth_state'])
token = github.fetch_token(token_url, client_secret=client_secret,
authorization_response=request.url)
return json(github.get('https://api.github.com/user').json()) | Python | 38 | 30.236841 | 91 | /myapi/devfile/core/api.py | 0.72437 | 0.688235 |
mrpal39/ev_code | refs/heads/master | from django import template
from ..models import Post
from django.utils.safestring import mark_safe
import markdown
from django.db.models import Count
register = template.Library()
@register.filter(name='markdown')
def markdown_fromat(text):
return mark_safe(markdown.markdown(text))
@register.simple_tag
def total_posts():
return Post.published.count()
@register.inclusion_tag('latest_posts.html')
def show_latest_posts(count=3):
latest_posts = Post.published.order_by('-publish')[:count]
return {'latest_posts': latest_posts}
@register.simple_tag
# In the preceding template tag, you build a QuerySet using the annotate() function
# to aggregate the total number of comments for each post. You use the Count
# aggregation function to store the number of comments in the computed field total_
# comments for each Post object. You order the QuerySet by the computed field in
# descending order. You also provide an optional count variable to limit the total
def get_most_commented_posts(count=2):
return Post.published.annotate(
total_comments=Count('comments')
).order_by('-total_comments')[:count] | Python | 33 | 33.666668 | 83 | /myapi/fullfeblog/blog/templatetags/blog_tags.py | 0.755906 | 0.754156 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scrapyproject', '0008_scrapersdeploy'),
]
operations = [
migrations.AddField(
model_name='linkgendeploy',
name='version',
field=models.IntegerField(default=0),
),
migrations.AddField(
model_name='scrapersdeploy',
name='version',
field=models.IntegerField(default=0),
),
]
| Python | 24 | 22.666666 | 49 | /Web-UI/scrapyproject/migrations/0009_auto_20170215_0657.py | 0.573944 | 0.56162 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
import redis
def inserintotc(str,type):
try:
r = redis.Redis(host='127.0.0.1', port=6379, db=0)
except:
print '连接redis失败'
else:
if type == 1:
r.lpush('start_urls', str)
def inserintota(str,type):
try:
r = redis.Redis(host='127.0.0.1', port=6379, db=0)
except:
print '连接redis失败'
else:
if type == 2:
r.lpush('tczufang_tc:requests', str) | Python | 18 | 24.277779 | 58 | /tc_zufang/tc_zufang/tc_zufang/utils/InsertRedis.py | 0.528634 | 0.473568 |
mrpal39/ev_code | refs/heads/master | from django.apps import AppConfig
class CorescrapConfig(AppConfig):
name = 'corescrap'
| Python | 5 | 17.6 | 33 | /awssam/myscrapyproject/dev/corescrap/apps.py | 0.763441 | 0.763441 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# https://doc.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item ,Field
from scrapy.loader import ItemLoader
from scrapy.loader.processors import TakeFirst, MapCompose, Join
class DemoLoader(ItemLoader):
default_output_processor = TakeFirst()
title_in = MapCompose(unicode.title)
title_out = Join()
size_in = MapCompose(unicode.strip)
# you can continue scraping here
class DemoItem(scrapy.Item):
# define the fields for your item here like:
product_title = scrapy.Field()
product_link = scrapy.Field()
product_description = scrapy.Field()
pass
| Python | 29 | 24.275862 | 66 | /scrap/tuto/tuto/items.py | 0.709413 | 0.708049 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
from scrapy_redis.spiders import RedisSpider
from scrapy.selector import Selector
from tc_zufang.utils.result_parse import list_first_item
from scrapy.http import Request
from tc_zufang.items import TcZufangItem
import re
defaultencoding = 'utf-8'
'''
58同城的爬虫
'''
#继承自RedisSpider,则start_urls可以从redis读取
#继承自BaseSpider,则start_urls需要写出来
class TczufangSpider(RedisSpider):
name='tczufang'
redis_key = 'tczufang_tc:requests'
#解析从start_urls下载返回的页面
#页面页面有两个目的:
#第一个:解析获取下一页的地址,将下一页的地址传递给爬虫调度器,以便作为爬虫的下一次请求
#第二个:获取详情页地址,再对详情页进行下一步的解析
#对详情页进行下一步的解析
def parse(self, response):
tczufangItem=TcZufangItem()
response_url = re.findall('^http\:\/\/\w+\.58\.com', response.url)
response_selector = Selector(response)
# 字段的提取可以使用在终端上scrapy shell进行调试使用
# 帖子名称
raw_title=list_first_item(response_selector.xpath(u'//div[contains(@class,"house-title")]/h1[contains(@class,"c_333 f20")]/text()').extract())
if raw_title:
tczufangItem['title'] =raw_title.encode('utf8')
#t帖子发布时间,进一步处理
raw_time=list_first_item(response_selector.xpath(u'//div[contains(@class,"house-title")]/p[contains(@class,"house-update-info c_888 f12")]/text()').extract())
try:
tczufangItem['pub_time'] =re.findall(r'\d+\-\d+\-\d+\s+\d+\:\d+\:\d+',raw_time)[0]
except:
tczufangItem['pub_time']=0
#租金
tczufangItem['money']=list_first_item(response_selector.xpath(u'//div[contains(@class,"house-pay-way f16")]/span[contains(@class,"c_ff552e")]/b[contains(@class,"f36")]/text()').extract())
# 租赁方式
raw_method=list_first_item(response_selector.xpath(u'//ul[contains(@class,"f14")]/li[1]/span[2]/text()').extract())
try:
tczufangItem['method'] =raw_method.encode('utf8')
except:
tczufangItem['method']=0
# 所在区域
try:
area=response_selector.xpath(u'//ul[contains(@class,"f14")]/li/span/a[contains(@class,"c_333")]/text()').extract()[1]
except:
area=''
if area:
area=area
try:
area2=response_selector.xpath(u'//ul[contains(@class,"f14")]/li/span/a[contains(@class,"c_333")]/text()').extract()[2]
except:
area2=''
raw_area=area+"-"+area2
if raw_area:
raw_area=raw_area.encode('utf8')
tczufangItem['area'] =raw_area if raw_area else None
# 所在小区
try:
raw_community = response_selector.xpath(u'//ul[contains(@class,"f14")]/li/span/a[contains(@class,"c_333")]/text()').extract()[0]
if raw_community:
raw_community=raw_community.encode('utf8')
tczufangItem['community']=raw_community if raw_community else None
except:
tczufangItem['community']=0
# 帖子详情url
tczufangItem['targeturl']=response.url
#帖子所在城市
tczufangItem['city']=response.url.split("//")[1].split('.')[0]
#帖子的联系电话
try:
tczufangItem['phone']=response_selector.xpath(u'//div[contains(@class,"house-fraud-tip")]/span[1]/em[contains(@class,"phone-num")]/text()').extract()[0]
except:
tczufangItem['phone']=0
# 图片1的联系电话
try:
tczufangItem['img1'] = response_selector.xpath(u'//ul[contains(@class,"pic-list-wrap pa")]/li[1]/@data-src').extract()[0]
except:
tczufangItem['img1'] = 0
# 图片1的联系电话
try:
tczufangItem['img2'] = response_selector.xpath(u'//ul[contains(@class,"pic-list-wrap pa")]/li[2]/@data-src').extract()[0]
except:
tczufangItem['img2'] = 0
yield tczufangItem | Python | 87 | 41.873562 | 195 | /tc_zufang/tc_zufang-slave/tc_zufang/spiders/tczufang_detail_spider.py | 0.600965 | 0.581121 |
mrpal39/ev_code | refs/heads/master | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy.item import Item,Field
class PropertiesItem():
title=Field()
price=Field()
description=Field()
address = Field()
image_urls = Field()
#imagescalculaitons
images = Field()
locations = Field()
#housekeeping
url=Field()
project = Field()
spider=Field()
server = Field()
date=Field()
| Python | 26 | 18.038462 | 53 | /cte/properties/properties/items.py | 0.655242 | 0.655242 |
mrpal39/ev_code | refs/heads/master | # from core.models import Item
from django.shortcuts import render
# from django.views.generic import ListView,DetailView
from django.shortcuts import render, get_object_or_404
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from .models import Post
from django.views.generic import (
ListView,
DetailView,
# CreateView,
# UpdateView,
# DeleteView
)
from django.core.mail import send_mail
from .forms import EmailPostForm
from core.models import Comment
from .forms import EmailPostForm, CommentForm , SearchForm
from taggit.models import Tag
from django.db.models import Count
from django.contrib.postgres.search import SearchVector #Building a search view veter
import requests
def post_api(request):
form= SearchForm()
query=None
results=[]
api_key='306cf1684a42e4be5ec0a1c60362c2ef'
url=("https://libraries.io/api/search?q={}&api_key={}".format(form,api_key))
response = requests.get(url)
response_dict = response.json()
# if 'query' in request.GET:
# response_dict=SearchForm(request.GET)
# if response_dict.is_valid():
# query=form.cleaned_data['query']
# results=Post.published.annotate(
# search =SearchVector('title','body'),
# ).filter(search=query)
return render(request,'search.html',{
'form':response_dict,
# 'query':query,
# 'results':results
})
def post_search(request):
form= SearchForm()
query=None
results=[]
if 'query' in request.GET:
form=SearchForm(request.GET)
if form.is_valid():
query=form.cleaned_data['query']
results=Post.published.annotate(
search =SearchVector('title','body'),
).filter(search=query)
return render(request,'api.html',{
'form':form,
'query':query,
'results':results
})
def post_share(request, post_id):
# Retrieve post by id
post = get_object_or_404(Post, id=post_id, status='published')
sent = False
if request.method == 'POST':
# Form was submitted
form = EmailPostForm(request.POST)
if form.is_valid():
# Form fields passed validation
cd = form.cleaned_data
# ... send email
post_url = request.build_absolute_uri(
post.get_absolute_url())
subject = f"{cd['name']} recommends you read "f"{post.title}"
message = f"Read {post.title} at {post_url}\n\n" f"{cd['name']}\'s comments: {cd['comments']}"
send_mail(subject, message, 'rp9545416@gmail.com',[cd['to']])
sent = True
else:
form=EmailPostForm()
return render(request, 'share.html', {'post': post,
'form': form,
'sent': sent})
class PostDetailView(DetailView):
model = Post
pk_url_kwarg = 'article_id'
context_object_name = "article"
def get_object(self, queryset=None):
obj = super(PostDetailView, self).get_object()
obj.viewed()
self.object = obj
return obj
def get_context_data(self, **kwargs):
articleid = int(self.kwargs[self.pk_url_kwarg])
comment_form = CommentForm()
user = self.request.user
# 如果用户已经登录,则隐藏邮件和用户名输入框
if user.is_authenticated and not user.is_anonymous and user.email and user.username:
comment_form.fields.update({
'email': forms.CharField(widget=forms.HiddenInput()),
'name': forms.CharField(widget=forms.HiddenInput()),
})
comment_form.fields["email"].initial = user.email
comment_form.fields["name"].initial = user.username
article_comments = self.object.comment_list()
kwargs['form'] = comment_form
kwargs['article_comments'] = article_comments
kwargs['comment_count'] = len(
article_comments) if article_comments else 0
kwargs['next_article'] = self.object.next_article
kwargs['prev_article'] = self.object.prev_article
return super(ArticleDetailView, self).get_context_data(**kwargs)
class PostListView(ListView):
queryset=Post.published.all()
context_object_name='posts'
paginate_by=2
template_name='list.html'
page_type = ''
page_kwarg = 'page'
def get_view_cache_key(self):
return self.request.get['pages']
@property
def page_number(self):
page_kwarg = self.page_kwarg
page = self.kwargs.get(
page_kwarg) or self.request.GET.get(page_kwarg) or 1
return page
def get_queryset_cache_key(self):
raise NotImplementedError()
def get_queryset_data(self):
"""
子类重写.获取queryset的数据
"""
raise NotImplementedError()
# def get_queryset_from_cache(self, cache_key):
# value = cache.get(cache_key)
# if value:
# logger.info('get view cache.key:{key}'.format(key=cache_key))
# return value
# else:
# article_list = self.get_queryset_data()
# cache.set(cache_key, article_list)
# logger.info('set view cache.key:{key}'.format(key=cache_key))
# return article_list
# def get_queryset(self):
# key = self.get_queryset_cache_key()
# value = self.get_queryset_from_cache(key)
# return value
# def get_context_data(self, **kwargs):
# kwargs['linktype'] = self.link_type
# return super(PostListView, self).get_context_data(**kwargs)
def post_list(request , tag_slug=None):
object_list=Post.published.all()
tag=None
if tag_slug:
tag=get_object_or_404(Tag,slug=tag_slug)
object_list=object_list.filter(tags__in=[tag])
paginator=Paginator(object_list, 2) # 3 posts in each page
page=request.GET.get('page')
try:
posts=paginator.page(page)
except PageNotAnInteger:
# If page is not an integer deliver the first page
posts=paginator.page(1)
except EmptyPage:
# If page is out of range deliver last page of results
posts=paginator.page(paginator.num_pages)
return render(request,
'list.html',
{'posts': posts,
'page': page,
'tag': tag})
def post_detail(request, year, month, day, post):
post=get_object_or_404(Post, slug = post,
status = 'published',
publish__year = year,
publish__month = month,
publish__day = day)
comments=post.comments.filter(active=True)
new_comment=None
# List of similar posts
post_tags_ids = post.tags.values_list('id', flat=True)
similar_posts = Post.published.filter(tags__in=post_tags_ids).exclude(id=post.id)
similar_posts=similar_posts.annotate(same_tags=Count('tags')).order_by('-same_tags','-publish')[:4]
if request.method== 'POST':
#comment aas passed
comment_form=CommentForm(data=request.POST)
if comment_form.is_valid():
#new coment object
new_comment=comment_form.save(comment=False)
new_comment.post
new_comment.save()
else:
comment_form=CommentForm()
return render(request,
'blog/post_detail.html',
{'post': post,
'comments': comments,
'new_comment': new_comment,
'comment_form': comment_form,
'similar_posts': similar_posts})
def home(request):
return render(request, 'base.html')
def about(request):
return render(request, 'about.html')
# def product(request):
# return render (request ,'product.html' )
# class ItemdDetailView(DetailView):
# model=Item
# template_name="product.html"
# def checkout(request):
# return render (request ,'checkout.html')
| Python | 272 | 28.94853 | 103 | /myapi/fullfeblog/blog/views.py | 0.587528 | 0.582863 |
mrpal39/ev_code | refs/heads/master |
import scrapy
class FirstScrapyItem(scrapy.Item):
# define the fields for your item here like:
item=DmozItem()
item ['title'] = scrapy.Field()
item ['url'] = scrapy.Field()
item ['desc'] = scrapy.Field()
| Python | 11 | 20.181818 | 48 | /scrap/first_scrapy/first_scrapy/items.py | 0.606695 | 0.606695 |
mrpal39/ev_code | refs/heads/master |
import hashlib
import datetime
def date_convert(value):
# 日期转化
try:
create_date = datetime.datetime.strptime(value, "%Y/%m/%d").date()
except Exception as e:
print(e)
create_date = datetime.datetime.now().date()
return create_date
def get_md5(url):
# url md5加密
if isinstance(url, str):
url = url.encode("utf-8")
m = hashlib.md5()
m.update(url)
return m.hexdigest()
if __name__ == '__main__':
print(date_convert('2020/02/28'))
print(get_md5('http://www.woshipm.com/it/3443027.html'))
| Python | 27 | 19.962963 | 74 | /eswork/articles/articles/utils/common.py | 0.59612 | 0.560847 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
res=u'\u4e30\u6cf0\u57ce'
# rr=res.encode('gbk')
print res | Python | 4 | 19.75 | 25 | /tc_zufang/django_web/django_web/test.py | 0.621951 | 0.52439 |
mrpal39/ev_code | refs/heads/master | from django.shortcuts import render
from urllib.request import urlopen
from django.shortcuts import render
from django.views import View
import requests
# class apiurl(View):
def apiurl(request):
url =requests('https://api.github.com/')
data=url.requests.json()
context ={
'data':data
}
return render(request,'index.html', context)
| Python | 17 | 20.882353 | 48 | /march19/devfile/api/views.py | 0.692513 | 0.692513 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
import smtplib
from email.mime.text import MIMEText
from email.header import Header
def sendMessage_warning():
server = smtplib.SMTP('smtp.163.com', 25)
server.login('seven_2016@163.com', 'ssy102009')
msg = MIMEText('爬虫slave被封警告!请求解封!', 'plain', 'utf-8')
msg['From'] = 'seven_2016@163.com <seven_2016@163.com>'
msg['Subject'] = Header(u'爬虫被封禁警告!', 'utf8').encode()
msg['To'] = u'seven <751401459@qq.com>'
server.sendmail('seven_2016@163.com', ['751401459@qq.com'], msg.as_string())
| Python | 12 | 38.333332 | 60 | /tc_zufang/tc_zufang-slave/tc_zufang/utils/message.py | 0.620763 | 0.591102 |
mrpal39/ev_code | refs/heads/master | # Define here the models for your scraped items
#
# See documentation in:
# https://docs.scrapy.org/en/latest/topics/items.html
import scrapy
from scrapy import Item, Field
# define the fields for your item here like:
#
class SainsburysItem(scrapy.Item):
name = scrapy.Field()
class SainsburysItem(Item):
url = Field()
product_name = Field()
product_image = Field()
price_per_unit = Field()
unit = Field()
rating = Field()
product_reviews = Field()
item_code = Field()
nutritions = Field()
product_origin = Field()
class FlatSainsburysItem(Item):
url = Field()
product_name = Field()
product_image = Field()
price_per_unit = Field()
unit = Field()
rating = Field()
product_reviews = Field()
item_code = Field()
product_origin = Field()
energy = Field()
energy_kj = Field()
kcal = Field()
fibre_g = Field()
carbohydrates_g = Field()
of_which_sugars = Field()
| Python | 45 | 22.133333 | 53 | /cte/projectfile/projectfile/items.py | 0.589817 | 0.589817 |
mrpal39/ev_code | refs/heads/master | from . settings import *
DEBUG = True
for template_engine in TEMPLATES:
template_engine["OPTIONS"]["debug"] = True
EMAIL_BACKEND = "django.core.mail.backends.console.EmailBackend"
try:
import debug_toolbar # @UnusedImport
MIDDLEWARE = list(MIDDLEWARE) + [
"debug_toolbar.middleware.DebugToolbarMiddleware",
]
INSTALLED_APPS = list(INSTALLED_APPS) + ["debug_toolbar"]
INTERNAL_IPS = ("127.0.0.1",)
DEBUG_TOOLBAR_CONFIG = {"INTERCEPT_REDIRECTS": False}
except ImportError:
pass | Python | 22 | 22.818182 | 64 | /awssam/wikidj/wikidj/dev.py | 0.688336 | 0.676864 |
mrpal39/ev_code | refs/heads/master | import logging
import scrapy
logger = logging.getLogger('mycustomlogger')
class MySpider(scrapy.Spider):
name = 'myspider1'
start_urls = ['https://scrapinghub.com']
def parse(self, response):
logger.info('Parse function called on %s', response.url) | Python | 12 | 21.75 | 64 | /scrap/tutorial/scrap/spiders/reactor.py | 0.698529 | 0.694853 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-02-24 08:54
from __future__ import unicode_literals
from django.db import migrations, models
import open_news.models
class Migration(migrations.Migration):
dependencies = [
('open_news', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Document',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('file', models.FileField(upload_to=open_news.models.upload_location)),
],
),
]
| Python | 23 | 26.173914 | 114 | /scrap/example_project/open_news/migrations/0002_document.py | 0.5952 | 0.56 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
from django.shortcuts import render
from . models import ItemInfo
from django.core.paginator import Paginator
from mongoengine import connect
connect("zufang_fs",host='127.0.0.1')
# Create your views here.
def document(request):
limit=15
zufang_info=ItemInfo.objects
pageinator=Paginator(zufang_info,limit)
page=request.GET.get('page',1)
loaded = pageinator.page(page)
cities=zufang_info.distinct("city")
citycount=len(cities)
context={
'itemInfo':loaded,
'counts':zufang_info.count,
'cities':cities,
'citycount':citycount
}
return render(request,'document.html',context)
def binzhuantu():
##饼状图
citys = []
zufang_info = ItemInfo.objects
sums = float(zufang_info.count())
cities = zufang_info.distinct("city")
for city in cities:
length = float(len(zufang_info(city=city)))
ocu = round(float(length / sums * 100))
item = [city.encode('raw_unicode_escape'), ocu]
citys.append(item)
return citys
def chart(request):
##饼状图
citys=binzhuantu()
# #柱状图
# zufang_info = ItemInfo.objects
# res = zufang_info.all()
# cities = zufang_info.distinct("city")
# cc = []
# time = []
# counts = []
# for re in res:
# if re.pub_time != None:
# if re.pub_time > '2017-03-01':
# if re.pub_time < '2017-04-01':
# time.append(re.city)
# for city in cities:
# count = time.count(city)
# counts.append(count)
# item = city.encode('utf8')
# cc.append(item)
context ={
# 'count': counts,
# 'citys': cc,
'cities':citys,
}
return render(request,'chart.html',context)
def cloud(request):
zufang_info = ItemInfo.objects
res = zufang_info.distinct('community')
length=len(res)
context={
'count':length,
'wenzi':res
}
return render(request, 'test.html',context)
def test(request):
zufang_info = ItemInfo.objects
rr=[]
res = zufang_info.distinct('community')
i=0
while i<500:
item=res[i]
rr.append(item)
i=i+1
length = len(res)
context = {
'count': length,
'wenzi': rr
}
return render(request,'test.html',context) | Python | 86 | 26.023256 | 55 | /tc_zufang/django_web/datashow/views.py | 0.583728 | 0.568661 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: context_processors.py
Description :
Author : JHao
date: 2017/4/14
-------------------------------------------------
Change Activity:
2017/4/14:
-------------------------------------------------
"""
__author__ = 'JHao'
import importlib
from django_blog import blogroll
from blog.models import Category, Article, Tag, Comment
def sidebar(request):
category_list = Category.objects.all()
# 所有类型
blog_top = Article.objects.all().values("id", "title", "view").order_by('-view')[0:6]
# 文章排行
tag_list = Tag.objects.all()
# 标签
comment = Comment.objects.all().order_by('-create_time')[0:6]
# 评论
importlib.reload(blogroll)
# 友链
return {
'category_list': category_list,
'blog_top': blog_top,
'tag_list': tag_list,
'comment_list': comment,
'blogroll': blogroll.sites
}
if __name__ == '__main__':
pass
| Python | 47 | 21.106382 | 89 | /awssam/django-blog/src/blog/context_processors.py | 0.481232 | 0.462945 |
mrpal39/ev_code | refs/heads/master | import requests
import json
url='https://www.scraping-bot.io/rawHtmlPage.html'
username = 'yourUsername'
apiKey = 'yourApiKey'
apiUrl = "http://api.scraping-bot.io/scrape/raw-html"
payload = json.dumps({"url":url})
headers = {
'Content-Type': "application/json"
}
response = requests.request("POST", apiUrl, data=payload, auth=(username,apiKey), headers=headers)
print(response.text)
import requests
import json
url='https://www.scraping-bot.io/rawHtmlPage.html'
username = 'yourUsername'
apiKey = 'yourApiKey'
apiEndPoint = "http://api.scraping-bot.io/scrape/raw-html"
options = {
"useChrome": False,#set to True if you want to use headless chrome for javascript rendering
"premiumProxy": False, # set to True if you want to use premium proxies Unblock Amazon,Google,Rakuten
"proxyCountry": None, # allows you to choose a country proxy (example: proxyCountry:"FR")
"waitForNetworkRequests":False # wait for most ajax requests to finish until returning the Html content (this option can only be used if useChrome is set to true),
# this can slowdown or fail your scraping if some requests are never ending only use if really needed to get some price loaded asynchronously for example
}
payload = json.dumps({"url":url,"options":options})
headers = {
'Content-Type': "application/json"
}
response = requests.request("POST", apiEndPoint, data=payload, auth=(username,apiKey), headers=headers)
print(response.text)
https://libraries.io/api/NPM/base62?api_key=306cf1684a42e4be5ec0a1c60362c2ef
import requests
import json
url='https://www.scraping-bot.io/example-ebay.html'
username = 'yourUsername'
apiKey = '306cf1684a42e4be5ec0a1c60362c2ef'
apiEndPoint = "http://api.scraping-bot.io/scrape/retail"
payload = json.dumps({"url":url,"options":options})
headers = {
'Content-Type': "application/json"
}
response = requests.request("POST", apiEndPoint, data=payload, auth=(username,apiKey), headers=headers)
print(response.text) | Python | 64 | 30.359375 | 188 | /myapi/devfile/request/api1.py | 0.736291 | 0.716351 |
mrpal39/ev_code | refs/heads/master | from django import forms
from .models import Products
class productForm(forms.ModelForm):
class Meta:
model=Products
fields=['title','description','price'] | Python | 13 | 12 | 40 | /awssam/ideablog/core/forms.py | 0.738095 | 0.738095 |
mrpal39/ev_code | refs/heads/master | import scrapy
def authentication_failed(response):
pass
class LoginSpider(scrapy.Spider):
name='ex'
start_urls=['https://www.facebook.com/login.php']
def parse(self,response):
return scrapy.FormRequest.from_response(
response,formdata={'username':'john','password':'secret'},
callback=self.after_login
)
def after_login(self,response):
if authentication_failed(response):
self.logger.error('Login Failed')
return
page = response.url.split("/")[-2]
filename = f'quotes-{page}.html'
with open(filename, 'wb') as f:
f.write(response.body) | Python | 30 | 22.033333 | 74 | /scrap/tutorial/scrap/spiders/login.py | 0.587896 | 0.586455 |
mrpal39/ev_code | refs/heads/master | #rabbitmq and mongodb settings
SCHEDULER = ".rabbitmq.scheduler.Scheduler"
SCHEDULER_PERSIST = True
RABBITMQ_HOST = 'ip address'
RABBITMQ_PORT = 5672
RABBITMQ_USERNAME = 'guest'
RABBITMQ_PASSWORD = 'guest'
MONGODB_PUBLIC_ADDRESS = 'ip:port' # This will be shown on the web interface, but won't be used for connecting to DB
MONGODB_URI = 'ip:port' # Actual uri to connect to DB
MONGODB_USER = ''
MONGODB_PASSWORD = ''
MONGODB_SHARDED = False
MONGODB_BUFFER_DATA = 100
LINK_GENERATOR = 'http://192.168.0.209:6800' # Set your link generator worker address here
SCRAPERS = ['http://192.168.0.210:6800',
'http://192.168.0.211:6800', 'http://192.168.0.212:6800'] # Set your scraper worker addresses here
LINUX_USER_CREATION_ENABLED = False # Set this to True if you want a linux user account created during registration
| Python | 20 | 40.950001 | 117 | /Web-UI/scrapyproject/scrapy_packages/sample_settings.py | 0.727056 | 0.651967 |
mrpal39/ev_code | refs/heads/master | from django.conf.urls import include, url
from . import views
urlpatterns = [
url(r'^$', views.main_page, name="mainpage"),
url(r'^create/$', views.create_new, name="newproject"),
url(r'^manage/(?P<projectname>[\w]+)/', views.manage_project, name="manageproject"),
url(r'^delete/(?P<projectname>[\w]+)/', views.delete_project, name="deleteproject"),
url(r'^createitem/(?P<projectname>[\w]+)/', views.create_item, name="newitem"),
url(r'^edititems/(?P<projectname>[\w]+)/', views.itemslist, name="listitems"),
url(r'^deleteitem/(?P<projectname>[\w]+)/(?P<itemname>[\w]+)/', views.deleteitem, name="deleteitem"),
url(r'^edititem/(?P<projectname>[\w]+)/(?P<itemname>[\w]+)/', views.edititem, name="edititem"),
url(r'^addpipeline/(?P<projectname>[\w]+)/', views.addpipeline, name="addpipeline"),
url(r'^editpipelines/(?P<projectname>[\w]+)/', views.pipelinelist, name="listpipelines"),
url(r'^editpipeline/(?P<projectname>[\w]+)/(?P<pipelinename>[\w]+)/', views.editpipeline, name="editpipeline"),
url(r'^deletepipeline/(?P<projectname>[\w]+)/(?P<pipelinename>[\w]+)/', views.deletepipeline, name="deletepipeline"),
url(r'^linkgenerator/(?P<projectname>[\w]+)/', views.linkgenerator, name="linkgenerator"),
url(r'^scraper/(?P<projectname>[\w]+)/', views.scraper, name="scraper"),
url(r'^deploy/(?P<projectname>[\w]+)/', views.deploy, name='deploy'),
url(r'^changepassword/$', views.change_password, name="changepass"),
url(r'^deploystatus/(?P<projectname>[\w]+)/', views.deployment_status, name="deploystatus"),
url(r'^startproject/(?P<projectname>[\w]+)/(?P<worker>[\w]+)/', views.start_project, name="startproject"),
url(r'^stopproject/(?P<projectname>[\w]+)/(?P<worker>[\w]+)/', views.stop_project, name="stopproject"),
url(r'^allworkerstatus/(?P<projectname>[\w]+)/', views.get_project_status_from_all_workers, name="allworkerstatus"),
url(r'^getlog/(?P<projectname>[\w]+)/(?P<worker>[\w]+)/', views.see_log_file, name="seelogfile"),
url(r'^allprojectstatus/', views.gather_status_for_all_projects, name="allprojectstatus"),
url(r'^editsettings/(?P<settingtype>[\w]+)/(?P<projectname>[\w]+)/', views.editsettings, name="editsettings"),
url(r'^startonall/(?P<projectname>[\w]+)/', views.start_project_on_all, name="startonall"),
url(r'^stoponall/(?P<projectname>[\w]+)/', views.stop_project_on_all, name="stoponall"),
url(r'^globalstatus/', views.get_global_system_status, name="globalstatus"),
url(r'^sharedb/(?P<projectname>[\w]+)/', views.share_db, name="sharedatabase"),
url(r'^shareproject/(?P<projectname>[\w]+)/', views.share_project, name="shareproject"),
url(r'^dbpreview/(?P<db>[\w]+)/', views.database_preview, name="dbpreview"),
] | Python | 34 | 80.147057 | 121 | /Web-UI/scrapyproject/urls.py | 0.654097 | 0.654097 |
mrpal39/ev_code | refs/heads/master | # Generated by Django 3.1.3 on 2020-11-13 06:20
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0002_products'),
]
operations = [
migrations.RenameModel(
old_name='Post',
new_name='feeds',
),
]
| Python | 17 | 17.470589 | 47 | /awssam/ideablog/core/migrations/0003_auto_20201113_0620.py | 0.563694 | 0.503185 |
mrpal39/ev_code | refs/heads/master | import scrapy
class WebiSpider(scrapy.Spider):
name = 'webi'
allowed_domains = ['web']
start_urls = ['http://web/']
def parse(self, response):
pass
| Python | 10 | 16.5 | 32 | /cte/properties/properties/spiders/webi.py | 0.594286 | 0.594286 |
mrpal39/ev_code | refs/heads/master | import scrapy
from scrapy.spiders import CSVFeedSpider
from scrapy.spiders import SitemapSpider
from scrapy.spiders import CrawlSpider,Rule
from scrapy.linkextractor import LinkExtractor
from tuto.items import DemoItem
from scrapy.loader import ItemLoader
from tuto.items import Demo
class DemoSpider(CrawlSpider):
name='demo'
allowed_domais=["www.tutorialspoint.com"]
start_url=["https://www.tutorialspoint.com/scrapy/index.htm"]
def parse(self, response):
l = ItemLoader(item = Product(), response = response)
l.add_xpath("title", "//div[@class = 'product_title']")
l.add_xpath("title", "//div[@class = 'product_name']")
l.add_xpath("desc", "//div[@class = 'desc']")
l.add_css("size", "div#size]")
l.add_value("last_updated", "yesterday")
return l.load_item()
# loader = ItemLoader(item = Item())
# loader.add_xpath('social''a[@class = "social"]/@href')
# loader.add_xpath('email','a[@class = "email"]/@href')
# rules =(
# Rule(LinkExtractor(allow=(),restrict_xpaths=('')))
# )
class DemoSpider(CSVFeedSpider):
name = "demo"
allowed_domains = ["www.demoexample.com"]
start_urls = ["http://www.demoexample.com/feed.csv"]
delimiter = ";"
quotechar = "'"
headers = ["product_title", "product_link", "product_description"]
def parse_row(self, response, row):
self.logger.info("This is row: %r", row)
item = DemoItem()
item["product_title"] = row["product_title"]
item["product_link"] = row["product_link"]
item["product_description"] = row["product_description"]
return item
class DemoSpider(SitemapSpider):
urls = ["http://www.demoexample.com/sitemap.xml"]
rules = [
("/item/", "parse_item"),
("/group/", "parse_group"),
]
def parse_item(self, response):
# you can scrap item here
def parse_group(self, response):
# you can scrap group here | Python | 60 | 31.416666 | 71 | /scrap/tuto/tuto/spiders/scrapy.py | 0.637018 | 0.637018 |
mrpal39/ev_code | refs/heads/master | from oauth2_provider.views.generic import ProtectedResourceView
from django.http import HttpResponse | Python | 2 | 49.5 | 63 | /awssam/iam/users/views.py | 0.89 | 0.88 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: custom_filter.py
Description :
Author : JHao
date: 2017/4/14
-------------------------------------------------
Change Activity:
2017/4/14:
-------------------------------------------------
"""
__author__ = 'JHao'
import markdown
from django import template
from django.utils.safestring import mark_safe
from django.template.defaultfilters import stringfilter
register = template.Library()
@register.filter
def slice_list(value, index):
return value[index]
@register.filter(is_safe=True)
@stringfilter
def custom_markdown(value):
content = mark_safe(markdown.markdown(value,
output_format='html5',
extensions=[
'markdown.extensions.extra',
'markdown.extensions.fenced_code',
'markdown.extensions.tables',
],
safe_mode=True,
enable_attributes=False))
return content
@register.filter
def tag2string(value):
"""
将Tag转换成string >'python,爬虫'
:param value:
:return:
"""
return ','.join([each.get('tag_name', '') for each in value])
if __name__ == '__main__':
pass
| Python | 54 | 26.796297 | 80 | /awssam/django-blog/src/blog/templatetags/custom_filter.py | 0.439707 | 0.428381 |
mrpal39/ev_code | refs/heads/master | from django.db import models
# Create your models here.
from datetime import datetime
from elasticsearch_dsl import DocType, Date, Nested, Boolean, \
analyzer, InnerObjectWrapper, Completion, Keyword, Text, Integer
from elasticsearch_dsl.analysis import CustomAnalyzer as _CustomAnalyzer
from elasticsearch_dsl.connections import connections
connections.create_connection(hosts=["localhost"])
class CustomAnalyzer(_CustomAnalyzer):
def get_analysis_definition(self):
return {}
ik_analyzer = CustomAnalyzer("ik_max_word", filter=["lowercase"])
class ArticleType(DocType):
"""
# elasticsearch_dsl安装5.4版本
"""
# 文章类型
suggest = Completion(analyzer=ik_analyzer)
title = Text(analyzer="ik_max_word")
create_date = Date()
url = Keyword()
view = Integer()
category = Text(analyzer="ik_max_word")
content = Text(analyzer="ik_max_word")
class Meta:
index = "pm"
doc_type = "article"
if __name__ == "__main__":
data = ArticleType.init()
print(data)
| Python | 43 | 23.139534 | 72 | /eswork/lcvsearch/search/models.py | 0.685274 | 0.683349 |
mrpal39/ev_code | refs/heads/master | from django.contrib import admin
from .models import Project, Item, Field, Pipeline
# Register your models here.
admin.site.register(Project)
admin.site.register(Item)
admin.site.register(Field)
admin.site.register(Pipeline) | Python | 8 | 27.25 | 50 | /Web-UI/scrapyproject/admin.py | 0.808889 | 0.808889 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: models.py
Description :
Author : JHao
date: 2016/11/18
-------------------------------------------------
Change Activity:
2016/11/18:
-------------------------------------------------
"""
from django.db import models
from django.conf import settings
# Create your models here.
class Tag(models.Model):
tag_name = models.CharField('标签名称', max_length=30)
def __str__(self):
return self.tag_name
class Article(models.Model):
title = models.CharField(max_length=200) # 博客标题
category = models.ForeignKey('Category', verbose_name='文章类型', on_delete=models.CASCADE)
date_time = models.DateField(auto_now_add=True) # 博客日期
content = models.TextField(blank=True, null=True) # 文章正文
digest = models.TextField(blank=True, null=True) # 文章摘要
author = models.ForeignKey(settings.AUTH_USER_MODEL, verbose_name='作者', on_delete=models.CASCADE)
view = models.BigIntegerField(default=0) # 阅读数
comment = models.BigIntegerField(default=0) # 评论数
picture = models.CharField(max_length=200) # 标题图片地址
tag = models.ManyToManyField(Tag) # 标签
def __str__(self):
return self.title
def sourceUrl(self):
source_url = settings.HOST + '/blog/detail/{id}'.format(id=self.pk)
return source_url # 给网易云跟帖使用
def viewed(self):
"""
增加阅读数
:return:
"""
self.view += 1
self.save(update_fields=['view'])
def commenced(self):
"""
增加评论数
:return:
"""
self.comment += 1
self.save(update_fields=['comment'])
class Meta: # 按时间降序
ordering = ['-date_time']
class Category(models.Model):
name = models.CharField('文章类型', max_length=30)
created_time = models.DateTimeField('创建时间', auto_now_add=True)
last_mod_time = models.DateTimeField('修改时间', auto_now=True)
class Meta:
ordering = ['name']
verbose_name = "文章类型"
verbose_name_plural = verbose_name
def __str__(self):
return self.name
class Comment(models.Model):
title = models.CharField("标题", max_length=100)
source_id = models.CharField('文章id或source名称', max_length=25)
create_time = models.DateTimeField('评论时间', auto_now=True)
user_name = models.CharField('评论用户', max_length=25)
url = models.CharField('链接', max_length=100)
comment = models.CharField('评论内容', max_length=500)
| Python | 86 | 28.302326 | 101 | /awssam/django-blog/src/blog/models.py | 0.582937 | 0.565476 |
mrpal39/ev_code | refs/heads/master | from types import resolve_bases
import scrapy
from scrapy.spidermiddlewares.httperror import HttpError
from twisted.internet.error import DNSLookupError
from twisted.internet.error import TimeoutError,TCPTimedOutError
class DemoSpider(scrapy.Spider):
name='demo'
start_urls=[
"http://www.httpbin.org/", # HTTP 200 expected
"http://www.httpbin.org/status/404", # Webpage not found
"http://www.httpbin.org/status/500", # Internal server error
"http://www.httpbin.org:12345/", # timeout expected
"http://www.httphttpbinbin.org/",
]
def start_requests(self):
for u in self.start_urls:
yield scrapy.Request(u,callback=self.parse_httpbin),
dont_filter=True
def parse_httpbin(self, response):
self.logger.info('Recieved response from {}'.format(response.url))
# ...
def errback_httpbin(self,failure):
self.logger.error(repr(failure))
if failure.check(HttpError):
response=failure.value.response
self.logger.error('htttp Error occireed on %s',response.url)
elif failure.check(DNSLookupError) :
response=failure.request
self.logger.error("DNSLookupError occurred on %s", request.url)
elif failure.check(TimeoutError,TCPTimedOutError):
request =failure.request
self.logger.eerror("timeout occured on %s",request.url)
| Python | 43 | 33.255814 | 76 | /scrap/tuto/tuto/spiders/callable.py | 0.6417 | 0.632254 |
mrpal39/ev_code | refs/heads/master | #Stage 2 Update (Python 3)
from __future__ import unicode_literals
from django.utils.encoding import python_2_unicode_compatible
from django.db import models
from django.db.models.signals import pre_delete
from django.dispatch import receiver
from scrapy_djangoitem import DjangoItem
from dynamic_scraper.models import Scraper, SchedulerRuntime
@python_2_unicode_compatible
class NewsWebsite(models.Model):
name = models.CharField(max_length=200)
url = models.URLField()
scraper = models.ForeignKey(Scraper, blank=True, null=True, on_delete=models.SET_NULL)
scraper_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.name
@python_2_unicode_compatible
class Article(models.Model):
title = models.CharField(max_length=200)
news_website = models.ForeignKey(NewsWebsite)
description = models.TextField(blank=True)
url = models.URLField(blank=True)
thumbnail = models.CharField(max_length=200, blank=True)
checker_runtime = models.ForeignKey(SchedulerRuntime, blank=True, null=True, on_delete=models.SET_NULL)
def __str__(self):
return self.title
class ArticleItem(DjangoItem):
django_model = Article
@receiver(pre_delete)
def pre_delete_handler(sender, instance, using, **kwargs):
if isinstance(instance, NewsWebsite):
if instance.scraper_runtime:
instance.scraper_runtime.delete()
if isinstance(instance, Article):
if instance.checker_runtime:
instance.checker_runtime.delete()
pre_delete.connect(pre_delete_handler)
def upload_location(instance, filename):
return '%s/documents/%s' % (instance.user.username, filename)
class Document(models.Model):
# user = models.ForeignKey(settings.AUTH_USER_MODEL)
# category = models.ForeignKey(Category, on_delete=models.CASCADE)
file = models.FileField(upload_to=upload_location)
def __str__(self):
return self.filename()
def filename(self):
return os.path.basename(self.file.name) | Python | 64 | 31.71875 | 107 | /scrap/example_project/open_news/models.py | 0.719541 | 0.712852 |
mrpal39/ev_code | refs/heads/master | from django.urls import path
from .views import (
PostListView,
PostDetailView,
# PostCreateView,
# PostUpdateView,
# PostDeleteView,
# UserPostListView
)
from . import views
from .feeds import LatestPostsFeed
urlpatterns = [
path('', views.home, name='home'),
path('blogs/', views.PostListView.as_view(), name='post_list'),
path('blog/<int:pk>/', PostDetailView.as_view(), name='post-detail'),
path('about/', views.about, name='about'),
path('<int:post_id>/share/',views.post_share, name='post_share'),
path('feed/', LatestPostsFeed(), name='post_feed'),
path('search/', views.post_search, name='post_search'),
path('api/', views.post_api, name='post_api'),
path('blog/', views.post_list, name='post_list'),
path('<int:year>/<slug:post>/',
views.post_detail,
name='post_detail'),
path('tag/<slug:tag_slug>/',
views.post_list, name='post_list_by_tag'),
]
| Python | 31 | 29.870968 | 73 | /myapi/fullfeblog/blog/urls.py | 0.623824 | 0.623824 |
mrpal39/ev_code | refs/heads/master | # coding:utf-8
import datetime
from pymongo import errors
from pymongo.mongo_client import MongoClient
from pymongo.mongo_replica_set_client import MongoReplicaSetClient
from pymongo.read_preferences import ReadPreference
from scrapy.exporters import BaseItemExporter
try:
from urllib.parse import quote
except:
from urllib import quote
def not_set(string):
""" Check if a string is None or ''
:returns: bool - True if the string is empty
"""
if string is None:
return True
elif string == '':
return True
return False
class MongoDBPipeline(BaseItemExporter):
""" MongoDB pipeline class """
# Default options
config = {
'uri': 'mongodb://localhost:27017',
'fsync': False,
'write_concern': 0,
'database': 'scrapy-mongodb',
'collection': 'items',
'replica_set': None,
'buffer': None,
'append_timestamp': False,
'sharded': False
}
# Needed for sending acknowledgement signals to RabbitMQ for all persisted items
queue = None
acked_signals = []
# Item buffer
item_buffer = dict()
def load_spider(self, spider):
self.crawler = spider.crawler
self.settings = spider.settings
self.queue = self.crawler.engine.slot.scheduler.queue
def open_spider(self, spider):
self.load_spider(spider)
# Configure the connection
self.configure()
self.spidername = spider.name
self.config['uri'] = 'mongodb://' + self.config['username'] + ':' + quote(self.config['password']) + '@' + self.config['uri'] + '/admin'
self.shardedcolls = []
if self.config['replica_set'] is not None:
self.connection = MongoReplicaSetClient(
self.config['uri'],
replicaSet=self.config['replica_set'],
w=self.config['write_concern'],
fsync=self.config['fsync'],
read_preference=ReadPreference.PRIMARY_PREFERRED)
else:
# Connecting to a stand alone MongoDB
self.connection = MongoClient(
self.config['uri'],
fsync=self.config['fsync'],
read_preference=ReadPreference.PRIMARY)
# Set up the collection
self.database = self.connection[spider.name]
# Autoshard the DB
if self.config['sharded']:
db_statuses = self.connection['config']['databases'].find({})
partitioned = []
notpartitioned = []
for status in db_statuses:
if status['partitioned']:
partitioned.append(status['_id'])
else:
notpartitioned.append(status['_id'])
if spider.name in notpartitioned or spider.name not in partitioned:
try:
self.connection.admin.command('enableSharding', spider.name)
except errors.OperationFailure:
pass
else:
collections = self.connection['config']['collections'].find({})
for coll in collections:
if (spider.name + '.') in coll['_id']:
if coll['dropped'] is not True:
if coll['_id'].index(spider.name + '.') == 0:
self.shardedcolls.append(coll['_id'][coll['_id'].index('.') + 1:])
def configure(self):
""" Configure the MongoDB connection """
# Set all regular options
options = [
('uri', 'MONGODB_URI'),
('fsync', 'MONGODB_FSYNC'),
('write_concern', 'MONGODB_REPLICA_SET_W'),
('database', 'MONGODB_DATABASE'),
('collection', 'MONGODB_COLLECTION'),
('replica_set', 'MONGODB_REPLICA_SET'),
('buffer', 'MONGODB_BUFFER_DATA'),
('append_timestamp', 'MONGODB_ADD_TIMESTAMP'),
('sharded', 'MONGODB_SHARDED'),
('username', 'MONGODB_USER'),
('password', 'MONGODB_PASSWORD')
]
for key, setting in options:
if not not_set(self.settings[setting]):
self.config[key] = self.settings[setting]
def process_item(self, item, spider):
""" Process the item and add it to MongoDB
:type item: Item object
:param item: The item to put into MongoDB
:type spider: BaseSpider object
:param spider: The spider running the queries
:returns: Item object
"""
item_name = item.__class__.__name__
# If we are working with a sharded DB, the collection will also be sharded
if self.config['sharded']:
if item_name not in self.shardedcolls:
try:
self.connection.admin.command('shardCollection', '%s.%s' % (self.spidername, item_name), key={'_id': "hashed"})
self.shardedcolls.append(item_name)
except errors.OperationFailure:
self.shardedcolls.append(item_name)
itemtoinsert = dict(self._get_serialized_fields(item))
if self.config['buffer']:
if item_name not in self.item_buffer:
self.item_buffer[item_name] = []
self.item_buffer[item_name].append([])
self.item_buffer[item_name].append(0)
self.item_buffer[item_name][1] += 1
if self.config['append_timestamp']:
itemtoinsert['scrapy-mongodb'] = {'ts': datetime.datetime.utcnow()}
self.item_buffer[item_name][0].append(itemtoinsert)
if self.item_buffer[item_name][1] == self.config['buffer']:
self.item_buffer[item_name][1] = 0
self.insert_item(self.item_buffer[item_name][0], spider, item_name)
return item
self.insert_item(itemtoinsert, spider, item_name)
return item
def close_spider(self, spider):
""" Method called when the spider is closed
:type spider: BaseSpider object
:param spider: The spider running the queries
:returns: None
"""
for key in self.item_buffer:
if self.item_buffer[key][0]:
self.insert_item(self.item_buffer[key][0], spider, key)
def insert_item(self, item, spider, item_name):
""" Process the item and add it to MongoDB
:type item: (Item object) or [(Item object)]
:param item: The item(s) to put into MongoDB
:type spider: BaseSpider object
:param spider: The spider running the queries
:returns: Item object
"""
self.collection = self.database[item_name]
if not isinstance(item, list):
if self.config['append_timestamp']:
item['scrapy-mongodb'] = {'ts': datetime.datetime.utcnow()}
ack_signal = item['ack_signal']
item.pop('ack_signal', None)
self.collection.insert(item, continue_on_error=True)
if ack_signal not in self.acked_signals:
self.queue.acknowledge(ack_signal)
self.acked_signals.append(ack_signal)
else:
signals = []
for eachitem in item:
signals.append(eachitem['ack_signal'])
eachitem.pop('ack_signal', None)
self.collection.insert(item, continue_on_error=True)
del item[:]
for ack_signal in signals:
if ack_signal not in self.acked_signals:
self.queue.acknowledge(ack_signal)
self.acked_signals.append(ack_signal)
| Python | 213 | 34.953053 | 144 | /Web-UI/scrapyproject/scrapy_packages/mongodb/scrapy_mongodb.py | 0.560721 | 0.55824 |
mrpal39/ev_code | refs/heads/master | from django.contrib.syndication.views import Feed
from django.template.defaultfilters import truncatewords
from django.urls import reverse_lazy
from .models import Post
class LatestPostsFeed(Feed):
title ='My Blog'
link=reverse_lazy('post_list')
description = 'new post of my Blog.'
def items(self):
return Post.published.all()[:5]
def item_title(self, item):
return super().item_title(item)
def item_description(self, item):
return truncatewords(item.body,30)
| Python | 22 | 22.954546 | 56 | /awssam/fullfeblog/blog/feeds.py | 0.682331 | 0.676692 |
mrpal39/ev_code | refs/heads/master | import http.client
conn = http.client.HTTPSConnection("bloomberg-market-and-financial-news.p.rapidapi.com")
headers = {
'x-rapidapi-key': "bd689f15b2msh55122d4390ca494p17cddcjsn225c43ecc6d4",
'x-rapidapi-host': "bloomberg-market-and-financial-news.p.rapidapi.com"
}
conn.request("GET", "/market/get-cross-currencies?id=aed%2Caud%2Cbrl%2Ccad%2Cchf%2Ccnh%2Ccny%2Ccop%2Cczk%2Cdkk%2Ceur%2Cgbp%2Chkd%2Chuf%2Cidr%2Cils%2Cinr%2Cjpy%2Ckrw%2Cmxn%2Cmyr%2Cnok%2Cnzd%2Cphp%2Cpln%2Crub%2Csek%2Csgd%2Cthb%2Ctry%2Ctwd%2Cusd%2Czar", headers=headers)
res = conn.getresponse()
data = res.read()
# print(data.decode("utf-8"))
print(data.json()) | Python | 17 | 34.470589 | 267 | /awssam/tutorial/api.py | 0.737542 | 0.682724 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: urls.py
Description :
Author : JHao
date: 2017/4/13
-------------------------------------------------
Change Activity:
2017/4/13:
-------------------------------------------------
"""
__author__ = 'JHao'
from blog import views
from django.urls import path
urlpatterns = [
path('', views.index, name='index'),
path('list/', views.blog_list, name='list'),
path('tag/<str:name>/', views.tag, name='tag'),
path('category/<str:name>/', views.category, name='category'),
path('detail/<int:pk>/', views.detail, name='detail'),
path('archive/', views.archive, name='archive'),
path('search/', views.search, name='search'),
path('message/', views.message, name='message'),
path('getComment/', views.get_comment, name='get_comment'),
]
| Python | 29 | 30.448277 | 66 | /awssam/django-blog/src/blog/urls.py | 0.486309 | 0.46988 |
mrpal39/ev_code | refs/heads/master | from scrapy.item import Item, Field
import datetime
import socket
class PropertiesItem(Item):
# Primary fields
title = PropertiesItem()
price = Field()
description = Field()
address = Field()
image_urls = Field()
# Calculated fields
images = Field()
location = Field()
# Housekeeping fields
l.add_value('url', response.url)
l.add_value('project', self.settings.get('BOT_NAME'))
l.add_value('spider', self.name)
l.add_value('server', socket.gethostname())
l.add_value('date', datetime.datetime.now())
return l.load_item() | Python | 27 | 21.111111 | 57 | /scrap/properties/properties/items.py | 0.642617 | 0.642617 |
mrpal39/ev_code | refs/heads/master | import scrapy
from properties.items import PropertiesItem
from scrapy.loader import ItemLoader
from itemloaders.processors import MapCompose, Join
class BasicSpider(scrapy.Spider):
name = 'basic'
allowed_domains = ['web']
start_urls = ['http://web:9312/properties/property_000000.html']
def parse(self, response):
#Cleaning up – item loaders and housekeeping fields
l = ItemLoader(item=PropertiesItem(), response=response)
l.add_xpath("title", '//*[@itemprop="name"][1]/text()' ,MapCompose(unicode.strip, unicode.title))
l.add_xpath("price", '//*[@itemprop="price"][1]/text()',MapCompose(lambda i: i.replace(',', ''), float),re('[0.9]+')
l.add_xpath("description", '//*[@itemprop="description"][1]/text()', MapCompose(unicode.strip), Join())
l.add_xpath("address ", '//*[@itemtype="http://schema.org/Place"][1]/text()',MapCompose(unicode.strip))
l.add_xpath("image_urls", '//*[@itemprop="image"][1]/@src', MapCompose(lambda i: urlparse.urljoin(response.url, i)))
return l.load_item()
# def parse(self, response):
# item = PropertiesItem()
# item['title'] = response.xpath(
# '//*[@itemprop="list-group-item"][1]/text()').extract()
# item['price'] = response.xpath('//*[@itemprop="price"][1]/text()').re('[.0-9]+')
# item['description'] = response.xpath('//*[@itemprop="description"][1]/text()').extract()
# return item
# def parse(self, response):
# self.log("title:%s"%response.xpath(
# '//*[@itemprop="name"][1]/text()').extract()
# )
# self.log("price:%s" % response.xpath(
# '//*[@itemprop="price"][1]/text()').re('[0.9]+'))
# self.log("description: %s" % response.xpath(
# '//*[@itemprop="description"][1]/text()').extract())
# self.log("address: %s" % response.xpath(
# '//*[@itemtype="http://schema.org/Place"][1]/text()').extract())
# self.log("image_urls: %s" % response.xpath('//*[@itemprop="image"][1]/@src').extract())
| Python | 44 | 46.81818 | 124 | /cte/properties/properties/spiders/basic.py | 0.574144 | 0.560361 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
"""
-------------------------------------------------
File Name: util
Description :
Author : JHao
date: 2020/9/30
-------------------------------------------------
Change Activity:
2020/9/30:
-------------------------------------------------
"""
__author__ = 'JHao'
from math import ceil
class PageInfo(object):
def __init__(self, page, total, limit=8):
"""
:param page: 页数
:param total: 总条数
:param limit: 每页条数
"""
self._limit = limit
self._total = total
self._page = page
self._index_start = (int(page) - 1) * int(limit)
self._index_end = int(page) * int(limit)
@property
def index_start(self):
return self._index_start
@property
def index_end(self):
return self._index_end
@property
def current_page(self):
return self._page
@property
def total_page(self):
return ceil(self._total / self._limit)
@property
def total_number(self):
return self._total
| Python | 51 | 20.627451 | 56 | /awssam/django-blog/src/django_blog/util.py | 0.455122 | 0.43971 |
mrpal39/ev_code | refs/heads/master | import collections
from scrapy.exceptions import DropItem
from scrapy.exceptions import DropItem
import pymongo
class TutoPipeline(object):
vat=2.55
def process_item(self, item, spider):
if item["price"]:
if item['exclues_vat']:
item['price']= item['price']*self.vat
return item
else:
raise DropItem("missing price in %s"% item)
return item
class MongoPipline(object):
collections_name='scrapy_list'
def __init__(self,mongo_uri,mongo_db):
self.mongo_uri= mongo_uri
self.mongo_db=mongo_db
@classmethod
def from_crewler(cls,crawler):
return cls(
mongo_uri=crawler.settings.get('MONGO_URI'),
mongo_db=crawler.settings.get('MONGO_DB','Lists')
)
def open_spider(self,spider):
self.client=pymongo.MongoClient(self.mongo_uri)
self.db=self.client[self.mongo_db]
def close_spider(self,spider):
self.client.close()
def process_item(self,item,spider):
self.db[self.collection_name].insert(dict(item))
return item
# You can specify the MongoDB address and
# database name in Scrapy settings and MongoDB
# collection can be named after the item class.
# The following code describes
# how to use from_crawler() method to collect the resources properly −
class DuplicatePiline(object):
def __init__(self):
self.ids_seen=set()
def process_item(self,item,spider):
if item['id' ] in self.ids_seen:
raise DropItem("Repacted Item Found:%s"%item)
else:
self.ids_seen.add(item['id'])
return item
| Python | 71 | 23.563381 | 78 | /scrap/tuto/tuto/pipelines.py | 0.600915 | 0.5992 |
mrpal39/ev_code | refs/heads/master | # -*- coding: utf-8 -*-
BOT_NAME = 'tc_zufang'
SPIDER_MODULES = ['tc_zufang.spiders']
NEWSPIDER_MODULE = 'tc_zufang.spiders'
# Crawl responsibly by identifying yourself (and your website) on the user-agent
#USER_AGENT = 'tc_zufang (+http://www.yourdomain.com)'
#item Pipeline同时处理item的最大值为100
# CONCURRENT_ITEMS=100
#scrapy downloader并发请求最大值为16
#CONCURRENT_REQUESTS=4
#对单个网站进行并发请求的最大值为8
#CONCURRENT_REQUESTS_PER_DOMAIN=2
#抓取网站的最大允许的抓取深度值
DEPTH_LIMIT=0
# Obey robots.txt rules
ROBOTSTXT_OBEY = True
DOWNLOAD_TIMEOUT=10
DNSCACHE_ENABLED=True
#避免爬虫被禁的策略1,禁用cookie
# Disable cookies (enabled by default)
COOKIES_ENABLED = False
CONCURRENT_REQUESTS=4
#CONCURRENT_REQUESTS_PER_IP=2
#CONCURRENT_REQUESTS_PER_DOMAIN=2
#设置下载延时,防止爬虫被禁
DOWNLOAD_DELAY = 5
DOWNLOADER_MIDDLEWARES = {
'scrapy.contrib.downloadermiddleware.httpproxy.HttpProxyMiddleware': 110,
"tc_zufang.Proxy_Middleware.ProxyMiddleware":100,
'scrapy.downloadermiddlewares.robotstxt.RobotsTxtMiddleware': 100,
'scrapy.downloadermiddlewares.defaultheaders.DefaultHeadersMiddleware': 550,
'scrapy.downloadermiddlewares.ajaxcrawl.AjaxCrawlMiddleware': 560,
'scrapy.downloadermiddlewares.httpcompression.HttpCompressionMiddleware': 590,
'scrapy.downloadermiddlewares.chunked.ChunkedTransferMiddleware': 830,
'scrapy.downloadermiddlewares.stats.DownloaderStats': 850,
'tc_zufang.timeout_middleware.Timeout_Middleware':610,
'scrapy.downloadermiddlewares.httpauth.HttpAuthMiddleware': None,
'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 300,
'scrapy.downloadermiddlewares.retry.RetryMiddleware': None,
'scrapy.downloadermiddlewares.redirect.MetaRefreshMiddleware': None,
'scrapy.downloadermiddlewares.redirect.RedirectMiddleware': 400,
'scrapy.downloadermiddlewares.cookies.CookiesMiddleware': None,
'scrapy.downloadermiddlewares.httpcache.HttpCacheMiddleware': None,
'tc_zufang.rotate_useragent_dowmloadmiddleware.RotateUserAgentMiddleware':400,
'tc_zufang.redirect_middleware.Redirect_Middleware':500,
}
#使用scrapy-redis组件,分布式运行多个爬虫
#配置日志存储目录
SCHEDULER = "scrapy_redis.scheduler.Scheduler"
DUPEFILTER_CLASS = "scrapy_redis.dupefilter.RFPDupeFilter"
SCHEDULER_PERSIST = True
SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderPriorityQueue'
REDIS_URL = None
REDIS_HOST = '127.0.0.1' # 也可以根据情况改成 localhost
REDIS_PORT = '6379'
#LOG_FILE = "logs/scrapy.log"
| Python | 61 | 38.19672 | 82 | /tc_zufang/tc_zufang/tc_zufang/settings.py | 0.795151 | 0.766304 |
mrpal39/ev_code | refs/heads/master | from django.urls import path,include
from blog import views
urlpatterns = [
# path('', views.index, name='base'),
path('', views.list, name='list'),
# path('home/', views.home, name='home'),
# path('search/', views.Search, name='home_search'),
# path('', views.home, name='home'),
]
| Python | 13 | 23 | 56 | /myapi/devfile/blog/urls.py | 0.592949 | 0.592949 |
dspinellis/PPS-monitor | refs/heads/master | #!/usr/bin/env python3
#
# Copyright 2018-2022 Diomidis Spinellis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PPS/H-Bus monitoring program
"""
import argparse
import os
from itertools import count
import RPi.GPIO as GPIO
from serial import Serial
from struct import unpack
import sys
from time import time
BAUD = 4800
# Netdata update interval. This is the time actually taken to refresh an
# entire record
update_every = 20
def get_raw_telegram(ser):
"""Receive a telegram sequence, terminated by more than one char time"""
t = []
while True:
b = ser.read()
if b:
v = unpack('B', b)[0]
t.append(v)
if t == [0x17]:
return t
else:
if t:
return t
def crc(t):
"""Calculate a telegram's CRC"""
sum = 0
for v in t:
sum += v
sum &= 0xff
return 0xff - sum + 1
def get_telegram(ser):
""" Return a full verified telegram"""
while True:
t = get_raw_telegram(ser)
if len(t) == 9:
if crc(t[:-1]) == t[-1]:
return t[:-1]
else:
sys.stderr.write("CRC error in received telegram\n")
elif len(t) != 1:
sys.stderr.write("Invalid telegram length %d\n" % len(t))
def get_temp(t):
"""Return the temperature associated with a telegram as a string"""
return '%.1f' % (((t[6] << 8) + t[7]) / 64.)
def get_raw_temp(t):
"""Return the temperature associated with a telegram as an integer
multiplied by 64"""
return ((t[6] << 8) + t[7])
def format_telegram(t):
"""Format the passed telegram"""
r = ''
for v in t:
r += '%02x ' % v
r += '(T=%s)' % get_temp(t)
return r
def valid_temp(t):
"""Return true if the telegram's temperature is valid"""
return not (t[6] == 0x80 and t[7] == 0x01)
def decode_telegram(t):
"""Decode the passed telegram into a message and its formatted and
raw value.
The values are None if the telegram is unknown"""
room_unit_mode = ['timed', 'manual', 'off']
if t[1] == 0x08:
return ('Set present room temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x09:
return ('Set absent room temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x0b:
return ('Set DHW temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x19:
return ('Set room temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x28:
return ('Actual room temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x29:
return ('Outside temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x2c and valid_temp(t):
return ('Actual flow temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x2b:
return ('Actual DHW temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x2e and valid_temp(t):
return ('Actual boiler temp', get_temp(t), get_raw_temp(t))
elif t[1] == 0x48:
return ('Authority', ('remote' if t[7] == 0 else 'controller'), t[7])
elif t[1] == 0x49:
return ('Mode', room_unit_mode[t[7]], t[7])
elif t[1] == 0x4c:
return ('Present', ('true' if t[7] else 'false'), t[7])
elif t[1] == 0x7c:
return ('Remaining absence days', t[7], t[7])
else:
return (None, None, None)
def decode_peer(t):
""" Return the peer by its name, and True if the peer is known"""
val = t[0]
if val == 0xfd:
return ('Room unit:', True)
elif val == 0x1d:
return ('Controller:', True)
else:
return ('0x%02x:' % val, False)
def print_csv(out, d):
"""Output the elements of the passed CSV record in a consistent order"""
out.write(str(int(time())))
for key in sorted(d):
out.write(',' + d[key])
out.write("\n")
def print_csv_header(out, d):
"""Output the header of the passed CSV record in a consistent order"""
out.write('time')
for key in sorted(d):
out.write(',' + key)
out.write("\n")
def monitor(port, nmessage, show_unknown, show_raw, out, csv_output,
header_output, netdata_output):
"""Monitor PPS traffic"""
global update_every
CSV_ELEMENTS = 11 # Number of elements per CSV record
NBITS = 10 # * bits plus start and stop
CPS = BAUD / NBITS
# Timeout if nothing received for ten characters
TIMEOUT = 1. / CPS * 10
# Setup 3.3V on pin 12, as required by the circuit board
GPIO.setmode(GPIO.BOARD)
GPIO.setup(12, GPIO.OUT, initial=GPIO.HIGH)
with Serial(port, BAUD, timeout=TIMEOUT) as ser:
csv_record = {}
raw_record = {}
last_run = dt_since_last_run = 0
for i in range(int(nmessage)) if nmessage else count():
t = get_telegram(ser)
known = True
(message, value, raw) = decode_telegram(t)
if not value:
known = False
(peer, known_peer) = decode_peer(t)
if not known_peer:
known = False
if known:
if csv_output:
csv_record[message] = value
raw_record[message] = raw
if len(csv_record) == CSV_ELEMENTS:
if header_output:
print_csv_header(out, csv_record)
header_output = False
print_csv(out, csv_record)
csv_record = {}
else:
out.write("%-11s %s: %s\n" % (peer, message, value))
if show_raw:
out.write("%-11s %s\n" % (peer, format_telegram(t)))
if netdata_output:
raw_record[message] = raw
# Gather telegrams until update_every has lapsed
# https://github.com/firehol/netdata/wiki/External-Plugins
now = time()
if last_run > 0:
dt_since_last_run = now - last_run
if len(raw_record) == CSV_ELEMENTS and (last_run == 0 or
dt_since_last_run >= update_every):
netdata_set_values(raw_record, dt_since_last_run)
raw_record = {}
last_run = now
elif show_unknown:
out.write("%-11s %s\n" % (peer, format_telegram(t)))
GPIO.cleanup()
def netdata_set_values(r, dt):
"""Output the values of a completed record"""
# Express dt in integer microseconds
dt = int(dt * 1e6)
print('BEGIN Heating.ambient %d' % dt)
print('SET t_room_set = %d' % r['Set room temp'])
print('SET t_room_actual = %d' % r['Actual room temp'])
print('SET t_outside = %d' % r['Outside temp'])
print('END')
print('BEGIN Heating.dhw %d' % dt)
print('SET t_dhw_set = %d' % r['Set DHW temp'])
print('SET t_dhw_actual = %d' % r['Actual DHW temp'])
print('END')
if 'Actual flow temp' in r:
print('BEGIN Heating.flow %d' % dt)
print('SET t_heating = %d' % r['Actual flow temp'])
print('END')
if 'Actual boiler temp' in r:
print('BEGIN Heating.boiler %d' % dt)
print('SET t_boiler = %d' % r['Actual boiler temp'])
print('END')
print('BEGIN Heating.set_point %d' % dt)
print('SET t_present = %d' % r['Set present room temp'])
print('SET t_absent = %d' % r['Set absent room temp'])
print('END')
print('BEGIN Heating.present %d' % dt)
print('SET present = %d' % r['Present'])
print('END')
print('BEGIN Heating.mode %d' % dt)
print('SET mode = %d' % r['Mode'])
print('END')
print('BEGIN Heating.authority %d' % dt)
print('SET authority = %d' % r['Authority'])
print('END')
sys.stdout.flush()
def netdata_configure():
"""Configure the supported Netdata charts"""
sys.stdout.write("""
CHART Heating.ambient 'Ambient T' 'Ambient temperature' 'Celsius' Temperatures Heating line 110
DIMENSION t_room_set 'Set room temperature' absolute 1 64
DIMENSION t_room_actual 'Actual room temperature' absolute 1 64
DIMENSION t_outside 'Outside temperature' absolute 1 64
CHART Heating.dhw 'Domestic hot water T' 'DHW temperature' 'Celsius' Temperatures Heating line 120
DIMENSION t_dhw_set 'Set DHW temperature' absolute 1 64
DIMENSION t_dhw_actual 'Actual DHW temperature' absolute 1 64
CHART Heating.flow 'Heating water T' 'Heating temperature' 'Celsius' Temperatures Heating line 130
DIMENSION t_heating 'Heating temperature' absolute 1 64
CHART Heating.boiler 'Boiler T' 'Boiler temperature' 'Celsius' Temperatures Heating line 135
DIMENSION t_boiler 'Heating temperature' absolute 1 64
CHART Heating.set_point 'Set temperatures' 'Set temperatures' 'Celsius' Temperatures Heating line 140
DIMENSION t_present 'Present room temperature' absolute 1 64
DIMENSION t_absent 'Absent room temperature' absolute 1 64
CHART Heating.present 'Present' 'Present' 'False/True' Control Heating line 150
DIMENSION present 'Present' absolute
CHART Heating.authority 'Authority' 'Authority' 'Remote/Controller' Control Heating line 160
DIMENSION authority 'Authority' absolute
CHART Heating.mode 'Mode' 'Mode' 'Timed/Manual/Off' Control Heating line 170
DIMENSION mode 'Mode' 'Mode' 'Timed/Manual/Off'
""")
def main():
"""Program entry point"""
global update_every
# Remove any Netdata-supplied update_every argument
if 'NETDATA_UPDATE_EVERY' in os.environ:
update_every = int(sys.argv[1])
del sys.argv[1]
parser = argparse.ArgumentParser(
description='PPS monitoring program')
parser.add_argument('-c', '--csv',
help='Output CSV records',
action='store_true')
parser.add_argument('-H', '--header',
help='Print CSV header',
action='store_true')
parser.add_argument('-n', '--nmessage',
help='Number of messages to process (default: infinite)')
parser.add_argument('-N', '--netdata',
help='Act as a netdata external plugin',
action='store_true')
parser.add_argument('-o', '--output',
help='Specify CSV output file (default: stdout)')
parser.add_argument('-p', '--port',
help='Serial port to access (default: /dev/serial0)',
default='/dev/serial0')
parser.add_argument('-r', '--raw',
help='Show telegrams also in raw format',
action='store_true')
parser.add_argument('-u', '--unknown',
help='Show unknown telegrams',
action='store_true')
args = parser.parse_args()
if args.output:
out = open(args.output, 'a')
else:
out = sys.stdout
if args.netdata:
netdata_configure()
monitor(args.port, args.nmessage, args.unknown, args.raw, out, args.csv,
args.header, args.netdata)
if __name__ == "__main__":
main()
| Python | 334 | 33.655689 | 101 | /ppsmon.py | 0.575205 | 0.558445 |
Yuliashka/Snake-Game | refs/heads/main |
from turtle import Turtle
import random
# we want this Food class to inherit from the Turtle class, so it will have all the capapibilities from
# the turtle class, but also some specific things that we want
class Food(Turtle):
# creating initializer for this class
def __init__(self):
# we inherit things from the super class:
super().__init__()
# below we are using methods from Turtle class:
self.shape("circle")
self.penup()
# normal sise is 20x20, we want to stretch the length and the width for 0.5 so we have 10x10
self.shapesize(stretch_len=0.5, stretch_wid=0.5)
self.color("blue")
self.speed("fastest")
# call the method refresh so the food goes in random location
self.refresh()
def refresh(self):
# our screen is 600x600
# we want to place our food from -280 to 280 in coordinates:
random_x = random.randint(-280, 280)
random_y = random.randint(-280, 280)
# telling our food to go to random_y and random_x:
self.goto(random_x, random_y)
# All this methods will happen as soon as we create a new object
# This food object we initialize in main.py
| Python | 32 | 36.78125 | 103 | /food.py | 0.636145 | 0.605622 |
Yuliashka/Snake-Game | refs/heads/main |
from turtle import Turtle
STARTING_POSITIONS = [(0, 0), (-20, 0), (-40, 0)]
MOVE_DISTANCE = 20
UP = 90
DOWN = 270
RIGHT = 0
LEFT = 180
class Snake:
# The code here is going to determine what should happen when we initialize a new snake object
def __init__(self):
# below we create a new attribute for our class
self.segments = []
# We create a snake:
self.create_snake()
self.head = self.segments[0]
# CREATING SNAKE (2 functions)
def create_snake(self):
for position in STARTING_POSITIONS:
# we are calling the function and passing there the position that we are looping through
self.add_segment(position)
def add_segment(self, position):
new_segment = Turtle("square")
new_segment.color("white")
new_segment.penup()
new_segment.goto(position)
self.segments.append(new_segment)
# Creating a snake extend function
def extend(self):
# we are using the list of segments and counting from the end of list to get the last one segment of the snake
# after we are going to hold segment's position using a method of Turtle class
# then we add the new_segment to the same position as the last segment
self.add_segment(self.segments[-1].position())
# Creating another method for snake class
def move(self):
for seg_num in range(len(self.segments)-1, 0, -1):
new_x = self.segments[seg_num - 1].xcor()
new_y = self.segments[seg_num - 1].ycor()
self.segments[seg_num].goto(new_x, new_y)
self.head.forward(MOVE_DISTANCE)
def up(self):
# if the current heading is pointed down it can't move up
# because the snake can't go backword
if self.head.heading() != DOWN:
self.head.setheading(UP)
def down(self):
if self.head.heading() != UP:
self.head.setheading(DOWN)
def left(self):
if self.head.heading() != RIGHT:
self.head.setheading(LEFT)
def right(self):
if self.head.heading() != LEFT:
self.head.setheading(RIGHT)
| Python | 69 | 30.31884 | 118 | /snake.py | 0.594558 | 0.582516 |
Yuliashka/Snake-Game | refs/heads/main |
from turtle import Screen
import time
from snake import Snake
from food import Food
from scoreboard import Score
# SETTING UP THE SCREEN:
screen = Screen()
screen.setup(width=600, height=600)
screen.bgcolor("black")
screen.title("My Snake Game")
# to turn off the screen tracer
screen.tracer(0)
# CREATING A SNAKE OBJECT:
snake = Snake()
# CREATING A FOOD OBJECT:
food = Food()
# CREATING A SCORE OBJECT:
score = Score()
# CREATING A KEY CONTROL:
screen.listen()
# these methods snake.up ,,, we have in a snake class (up = 90, down = 270, left = 180, right = 0)
screen.onkey(key="Up", fun=snake.up)
screen.onkey(key="Down", fun=snake.down)
screen.onkey(key="Left", fun=snake.left)
screen.onkey(key="Right", fun=snake.right)
game_is_on = True
while game_is_on:
# while the game is on the screen is going to be updated every 0.1 second
# It is saying delay for 0.1 sec and then update:
screen.update()
time.sleep(0.1)
# every time the screen refreshes we get the snake to move forwards by one step
snake.move()
# DETECT COLLISION WITH THE FOOD
# if the snake head is within 15 px of the food or closer they have collided
if snake.head.distance(food) < 15:
food.refresh()
snake.extend()
print("nom nom nom")
# when the snake collide with the food we increase the score:
score.increase_score()
# # DETECT COLLISION WITH THE TAIL METHOD 1:
# # we can loop through our list of segments in the snake
# for segment in snake.segments:
# # if head has distance from any segment in segments list less than 10 px - that a collision
# # if the head collides with any segment in the tail: trigger GAME OVER
# # the first segment is the head so we should exclude it from the list of segments
# if segment == snake.head:
# pass
# elif snake.head.distance(segment) < 10:
# game_is_on = False
# score.game_over()
# DETECT COLLISION WITH THE TAIL METHOD 2 SLICING:
# we can loop through our list of segments in the snake using slicing method of python
# we are taking all positions inside the list without the first head segment
for segment in snake.segments[1:]:
# if head has distance from any segment in segments list less than 10 px - that a collision
# if the head collides with any segment in the tail: trigger GAME OVER
if snake.head.distance(segment) < 10:
game_is_on = False
score.game_over()
# DETECT COLLISION WITH THE WALL
if snake.head.xcor() >280 or snake.head.xcor() < -280 or snake.head.ycor() > 280 or snake.head.ycor() < -280:
score.game_over()
game_is_on = False
screen.exitonclick() | Python | 94 | 28.74468 | 113 | /main.py | 0.636332 | 0.619377 |
Yuliashka/Snake-Game | refs/heads/main |
from turtle import Turtle
ALIGMENT = "center"
FONT = ("Arial", 18, "normal")
class Score(Turtle):
def __init__(self):
super().__init__()
self.score = 0
self.color("white")
self.penup()
self.goto(0, 270)
self.write(f"Current score: {self.score}", align="center", font=("Arial", 18, "normal"))
self.hideturtle()
self.update_score()
def update_score(self):
self.write(f"Current score: {self.score}", align="center", font=("Arial", 18, "normal"))
def game_over(self):
self.goto(0, 0)
self.write("GAME OVER", align=ALIGMENT, font=FONT)
def increase_score(self):
self.score += 1
# to clear the previous score before we update:
self.clear()
self.update_score()
| Python | 28 | 27.428572 | 97 | /scoreboard.py | 0.545783 | 0.528916 |
marcin-mulawa/Water-Sort-Puzzle-Bot | refs/heads/main | import numpy as np
import cv2
import imutils
picture = 'puzzle.jpg'
def load_transform_img(picture):
image = cv2.imread(picture)
image = imutils.resize(image, height=800)
org = image.copy()
#cv2.imshow('orginal', image)
mask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.rectangle(mask, (15, 150), (440, 700), 255, -1)
#cv2.imshow("Mask", mask)
image = cv2.bitwise_and(image, image, mask = mask)
#cv2.imshow("Applying the Mask", image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('image', image)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
edged = cv2.Canny(blurred, 140, 230)
#cv2.imshow("Canny", edged)
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print(len(cnts))
cv2.fillPoly(edged, pts =cnts, color=(255,255,255))
#cv2.imshow('filled', edged)
fedged = cv2.Canny(edged, 140, 230)
#cv2.imshow("fedged", fedged)
(cnts, _) = cv2.findContours(fedged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
boxes = fedged.copy()
#cv2.drawContours(boxes, cnts, 10, (100 , 200, 100), 2)
#cv2.imshow("Boxes", boxes)
image = cv2.bitwise_and(org, org, mask = edged)
#cv2.imshow("Applying the Mask2", image)
puzzlelist = []
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
print("Box #{}".format(i + 1))
box = org[y:y + h, x:x + w]
cv2.imwrite(f'temp/box{i+1}.jpg',box)
#cv2.imshow("Box", box)
gray = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray", gray)
mask = np.zeros(gray.shape[:2], dtype = "uint8")
y1,y2 = 35, 50
for i in range(4):
cv2.rectangle(mask, (15, y1), (37, y2), 255, -1)
y1,y2 = y1+40, y2+40
#cv2.imshow("Mask2 ", mask)
masked = cv2.bitwise_and(gray, gray, mask = mask)
y1,y2 = 35, 50
temp = []
for i in range(4):
value = masked[y1:y2,15:37]
#cv2.imshow(f'val{i}',value)
max_val = max(value.flatten())
if max_val >= 45:
temp.append(max_val)
y1,y2 = y1+40, y2+40
puzzlelist.append(temp[::-1])
#cv2.waitKey(0)
return puzzlelist[::-1] , len(cnts)
| Python | 78 | 28.948717 | 90 | /loading_phone.py | 0.559503 | 0.494007 |
marcin-mulawa/Water-Sort-Puzzle-Bot | refs/heads/main | import numpy as np
import cv2
import imutils
picture = 'puzzle.jpg'
def load_transform_img(picture):
image = cv2.imread(picture)
#image = imutils.resize(image, height=800)
org = image.copy()
#cv2.imshow('orginal', image)
mask = np.zeros(image.shape[:2], dtype = "uint8")
cv2.rectangle(mask, (680, 260), (1160, 910), 255, -1)
#cv2.imshow("Mask", mask)
image = cv2.bitwise_and(image, image, mask = mask)
#cv2.imshow("Applying the Mask", image)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#cv2.imshow('image', image)
blurred = cv2.GaussianBlur(image, (5, 5), 0)
edged = cv2.Canny(blurred, 140, 230)
#cv2.imshow("Canny", edged)
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#print(len(cnts))
cv2.fillPoly(edged, pts =cnts, color=(255,255,255))
#cv2.imshow('filled', edged)
fedged = cv2.Canny(edged, 140, 230)
#cv2.imshow("fedged", fedged)
(cnts, _) = cv2.findContours(fedged.copy(), cv2.RETR_EXTERNAL,
cv2.CHAIN_APPROX_SIMPLE)
# boxes = fedged.copy()
# cv2.drawContours(boxes, cnts, 10, (100 , 200, 100), 2)
# cv2.imshow("Boxes", boxes)
image = cv2.bitwise_and(org, org, mask = edged)
#cv2.imshow("Applying the Mask2", image)
puzzlelist = []
boxes_positon = []
for (i, c) in enumerate(cnts):
(x, y, w, h) = cv2.boundingRect(c)
#print("Box #{}".format(i + 1))
box = org[y:y + h, x:x + w]
boxes_positon.append( ( (x+x+w)/2, (y+y+h)/2 ) )
cv2.imwrite(f'temp/box{i+1}.jpg',box)
#cv2.imshow("Box", box)
gray = cv2.cvtColor(box, cv2.COLOR_BGR2GRAY)
#cv2.imshow("gray", gray)
mask = np.zeros(gray.shape[:2], dtype = "uint8")
y1,y2 = 45, 60
for i in range(4):
cv2.rectangle(mask, (15, y1), (37, y2), 255, -1)
y1,y2 = y1+45, y2+45
#cv2.imshow("Mask2 ", mask)
masked = cv2.bitwise_and(gray, gray, mask = mask)
#cv2.imshow('Masked', masked)
y1,y2 = 45, 60
temp = []
for i in range(4):
value = masked[y1:y2,15:37]
#cv2.imshow(f'val{i}',value)
max_val = max(value.flatten())
if max_val >= 45:
temp.append(max_val)
y1,y2 = y1+45, y2+45
puzzlelist.append(temp[::-1])
#cv2.waitKey(0)
print(f'Pozycja początkowa: {puzzlelist[::-1]}\n')
print(f'Pozycje boksow: {boxes_positon[::-1]}\n')
return puzzlelist[::-1], boxes_positon[::-1], len(cnts)
if __name__ == '__main__':
answer, boxes_positon[::-1], boxes = load_transform_img('level/screen.jpg')
print(answer)
| Python | 88 | 29.897728 | 90 | /loading_pc.py | 0.5605 | 0.500919 |
marcin-mulawa/Water-Sort-Puzzle-Bot | refs/heads/main | import pyautogui as pya
import solver
import time
import glob
import os
import numpy as np
import cv2
import shutil
path = os.getcwd()
path1 = path + r'/temp'
path2 = path +r'/level'
try:
shutil.rmtree(path1)
except:
pass
try:
os.mkdir('temp')
except:
pass
try:
os.mkdir('level')
except:
pass
bluestacks = pya.locateCenterOnScreen('static/bluestacks.jpg', confidence=.9)
print(bluestacks)
pya.click(bluestacks)
time.sleep(3)
full = pya.locateCenterOnScreen('static/full.jpg', confidence=.8)
pya.click(full)
time.sleep(15)
mojeGry = pya.locateCenterOnScreen('static/mojegry.jpg', confidence=.8)
print(mojeGry)
if mojeGry:
pya.click(mojeGry)
time.sleep(2)
game = pya.locateCenterOnScreen('static/watersort.jpg', confidence=.5)
print(game)
if game:
pya.click(game)
time.sleep(6)
record = pya.locateCenterOnScreen('static/record.jpg', confidence=.8)
for m in range(4):
pya.click(record)
time.sleep(4.5)
for k in range(10):
screenshoot = pya.screenshot()
screenshoot = cv2.cvtColor(np.array(screenshoot), cv2.COLOR_RGB2BGR)
cv2.imwrite("level/screen.jpg", screenshoot)
moves, boxes_position = solver.game_loop("level/screen.jpg")
print(f'Steps to solve level: {len(moves)}')
print(moves)
for i,j in moves:
pya.click(boxes_position[i])
time.sleep(0.3)
pya.click(boxes_position[j])
pya.sleep(2.5)
next_level = pya.locateCenterOnScreen('static/next.jpg', confidence=.7)
pya.click(next_level)
time.sleep(3)
x_location = pya.locateCenterOnScreen('static/x.jpg', confidence=.7)
if x_location:
pya.click(x_location)
time.sleep(2)
x_location = pya.locateCenterOnScreen('static/x.jpg', confidence=.7)
if x_location:
pya.click(x_location)
time.sleep(2)
pya.click(record)
time.sleep(2)
| Python | 77 | 24.38961 | 79 | /auto_puzzle.py | 0.642603 | 0.625318 |
marcin-mulawa/Water-Sort-Puzzle-Bot | refs/heads/main | from collections import deque
import random
import copy
import sys
import loading_pc
import os
def move(new_list, from_, to):
temp = new_list[from_].pop()
for _i in range(0,4):
if len(new_list[from_])>0 and abs(int(temp) - int(new_list[from_][-1]))<3 and len(new_list[to])<3:
temp = new_list[from_].pop()
new_list[to].append(temp)
new_list[to].append(temp)
return new_list
def possible_moves(table, boxes):
pos=[]
for i in range(0, boxes):
for j in range(0, boxes):
pos.append((i,j))
possible = []
for from_, to in pos:
if (len(table[from_])>=1 and len(table[to])<4 and to != from_
and (len(table[to]) == 0 or (abs(int(table[from_][-1]) - int(table[to][-1]))<3))
and not (len(table[from_])==4 and len(set(table[from_]))==1)
and not (len(set(table[from_]))==1 and len(table[to]) ==0)):
possible.append((from_,to))
return possible
def check_win(table):
temp = []
not_full =[]
for i in table:
temp.append(len(set(i)))
if len(i)<4:
not_full.append(i)
if len(not_full)>2:
return False
for i in temp:
if i>1:
return False
print(table)
return True
def game_loop(agent, picture):
table, boxes_position, boxes = loading_pc.load_transform_img(picture)
print(len(boxes_position))
answer = agent(table, boxes)
return answer, boxes_position
def random_agent(table, boxes):
k=5
l=0
while True:
print(l)
table_copy = copy.deepcopy(table)
if l%1000 == 0:
k+=1
correct_moves = []
for i in range(boxes*k):
pmove = possible_moves(table_copy, boxes)
if len(pmove) == 0:
win = check_win(table_copy)
if win:
return correct_moves
else:
break
x, y = random.choice(pmove)
table_copy = move(table_copy, x, y)
correct_moves.append((x,y))
l+=1
if __name__ == '__main__':
answer, boxes_position = game_loop(random_agent, 'level/screen.jpg')
print('answer', answer) | Python | 88 | 24.488636 | 106 | /solver.py | 0.528546 | 0.514719 |
qtngr/HateSpeechClassifier | refs/heads/master | import warnings
import os
import json
import pandas as pd
import numpy as np
import tensorflow as tf
from joblib import dump, load
from pathlib import Path
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import f1_score, classification_report
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import GaussianNB
from tensorflow.keras.preprocessing import text, sequence
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.layers import *
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import backend as K
from keras.callbacks import EarlyStopping
print(f"TensorFlow version: {tf.__version__}")
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
class Configuration():
# Contains everything we need to make an experimentation
def __init__(
self,
max_length = 150,
padding = True,
batch_size = 32,
epochs = 50,
learning_rate = 1e-5,
metrics = ["accuracy"],
verbose = 1,
split_size = 0.25,
accelerator = "TPU",
myluckynumber = 13,
first_time = True,
save_model = True
):
# seed and accelerator
self.SEED = myluckynumber
self.ACCELERATOR = accelerator
# save and load parameters
self.FIRST_TIME = first_time
self.SAVE_MODEL = save_model
#Data Path
self.DATA_PATH = Path('dataset.csv')
self.EMBEDDING_INDEX_PATH = Path('fr_word.vec')
# split
self.SPLIT_SIZE = split_size
# model hyperparameters
self.MAX_LENGTH = max_length
self.PAD_TO_MAX_LENGTH = padding
self.BATCH_SIZE = batch_size
self.EPOCHS = epochs
self.LEARNING_RATE = learning_rate
self.METRICS = metrics
self.VERBOSE = verbose
# initializing accelerator
self.initialize_accelerator()
def initialize_accelerator(self):
#Initializing accelerator
# checking TPU first
if self.ACCELERATOR == "TPU":
print("Connecting to TPU")
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print(f"Running on TPU {tpu.master()}")
except ValueError:
print("Could not connect to TPU")
tpu = None
if tpu:
try:
print("Initializing TPU")
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
self.strategy = tf.distribute.TPUStrategy(tpu)
self.tpu = tpu
print("TPU initialized")
except _:
print("Failed to initialize TPU")
else:
print("Unable to initialize TPU")
self.ACCELERATOR = "GPU"
# default for CPU and GPU
if self.ACCELERATOR != "TPU":
print("Using default strategy for CPU and single GPU")
self.strategy = tf.distribute.get_strategy()
# checking GPUs
if self.ACCELERATOR == "GPU":
print(f"GPUs Available: {len(tf.config.experimental.list_physical_devices('GPU'))}")
# defining replicas
self.AUTO = tf.data.experimental.AUTOTUNE
self.REPLICAS = self.strategy.num_replicas_in_sync
print(f"REPLICAS: {self.REPLICAS}")
def TFIDF_vectorizer(x_train, x_test, first_time):
# Used for logistic regression
if first_time:
print('Building TF-IDF Vectorizer')
vectorizer = TfidfVectorizer(ngram_range = (1,4))
vectorizer.fit(x_train)
dump(vectorizer, 'tfidf_vectorizer.joblib', compress= 3)
else:
print('Loading our TF-IDF vectorizer')
vectorizer = load('tfidf_vectorizer.joblib')
print('Vectorizing our sequences')
x_train, x_test = vectorizer.transform(x_train), vectorizer.transform(x_test)
print('Data Vectorized')
return x_train, x_test
def load_embedding_index(file_path):
embedding_index = {}
for _, line in enumerate(open(file_path)):
values = line.split()
embedding_index[values[0]] = np.asarray(
values[1:], dtype='float32')
return embedding_index
def build_embedding_matrix(x_train, x_test, maxlen, first_time, file_path):
#Tokenizer
if first_time :
tokenizer = text.Tokenizer()
tokenizer.fit_on_texts(x_train)
dump(tokenizer, 'tokenizer.joblib', compress= 3)
else:
tokenizer = load('tokenizer.joblib')
#Word index
word_index = tokenizer.word_index
#Embedding matrix
if first_time:
print('Loading embedding index')
embedding_index = load_embedding_index(file_path)
print('Building our embedding matrix')
embedding_matrix = np.zeros(
(len(word_index) + 1, 300))
for word, i in word_index.items():
embedding_vector = embedding_index.get(word)
if embedding_vector is not None:
embedding_matrix[i] = embedding_vector
dump(embedding_matrix, 'embedding_matrix.joblib', compress= 3)
else:
embedding_matrix = load('embedding_matrix.joblib')
# Tokenzing + padding
seq_x_train = sequence.pad_sequences(
tokenizer.texts_to_sequences(x_train), maxlen=maxlen)
seq_x_test = sequence.pad_sequences(
tokenizer.texts_to_sequences(x_test), maxlen=maxlen)
return seq_x_train, seq_x_test, embedding_matrix, word_index
def build_LogisticRegression(x_train, y_train, save_model, C=110):
print('Fitting Logistic Regression')
modelLR = LogisticRegression(C= C, max_iter=300)
modelLR.fit(x_train, y_train)
print('Logistic Regression fitted')
if save_model:
print('Saving model')
dump(modelLR, 'modelLR.joblib', compress = 3)
return modelLR
def build_RandomFR(x_train, y_train, save_model):
print('Fitting our Random Forest')
modelRF = RandomForestClassifier(n_estimators =100).fit(x_train, y_train)
print('Random Forest Fitted')
if save_model:
print('Saving model')
dump(modelRF, 'modelRF.joblib', compress = 3)
return modelRF
def build_LSTM(embedding_matrix, word_index, maxlen, learning_rate, metrics, first_time):
input_strings = Input(shape=(maxlen,))
x = Embedding(len(word_index) + 1, 300, input_length=maxlen,
weights=[embedding_matrix],
trainable=False)(input_strings)
x = LSTM(100, dropout=0.2, recurrent_dropout=0.2)(x)
x= Dense(1, activation="sigmoid")(x)
model = Model(inputs = input_strings, outputs = x)
opt = Adam(learning_rate = learning_rate)
loss = tf.keras.losses.BinaryCrossentropy()
model.compile(optimizer= opt, loss= loss, metrics = metrics)
if not first_time:
model.load_weights("lstm_model.h5")
return model
def get_tf_dataset(X, y, auto, labelled = True, repeat = False, shuffle = False, batch_size = 32):
"""
Creating tf.data.Dataset for TPU.
"""
if labelled:
ds = (tf.data.Dataset.from_tensor_slices((X, y)))
else:
ds = (tf.data.Dataset.from_tensor_slices(X))
if repeat:
ds = ds.repeat()
if shuffle:
ds = ds.shuffle(2048)
ds = ds.batch(batch_size)
ds = ds.prefetch(auto)
return ds
def run_LogisticRegression(config):
# Reading data
data = pd.read_csv(config.DATA_PATH)
#separating sentences and labels
sentences = data.text.astype(str).values.tolist()
labels = data.label.astype(float).values.tolist()
# splitting data into training and validation
X_train, X_valid, y_train, y_valid = train_test_split(sentences,
labels,
test_size = config.SPLIT_SIZE
)
#Vectorizing data
X_train, X_valid = TFIDF_vectorizer(X_train, X_valid, config.FIRST_TIME)
#Building model
model = build_LogisticRegression(X_train, y_train, save_model = config.SAVE_MODEL)
#predicting outcomes
y_pred = model.predict(X_valid)
print(classification_report(y_valid, y_pred))
def run_RandomForest(config):
# Reading data
data = pd.read_csv(config.DATA_PATH)
#separating sentences and labels
sentences = data.text.astype(str).values.tolist()
labels = data.label.astype(float).values.tolist()
# splitting data into training and validation
X_train, X_valid, y_train, y_valid = train_test_split(sentences,
labels,
test_size = config.SPLIT_SIZE
)
#Vectorizing data
X_train, X_valid = TFIDF_vectorizer(X_train, X_valid, config.FIRST_TIME)
#Building model
model = build_RandomFR(X_train, y_train, save_model = config.SAVE_MODEL)
#predicting outcomes
y_pred = model.predict(X_valid)
print(classification_report(y_valid, y_pred))
def run_lstm_model(config):
"""
Run model
"""
# Reading data
data = pd.read_csv(config.DATA_PATH)
#separating sentences and labels
sentences = data.text.astype(str).values.tolist()
labels = data.label.astype(float).values.tolist()
# splitting data into training and validation
X_train, X_valid, y_train, y_valid = train_test_split(sentences,
labels,
test_size = config.SPLIT_SIZE
)
# Building embedding word to vector:
seq_x_train, seq_x_test, embedding_matrix, word_index = build_embedding_matrix(
X_train,
X_valid,
config.MAX_LENGTH,
config.FIRST_TIME,
config.EMBEDDING_INDEX_PATH)
# initializing TPU
#if config.ACCELERATOR == "TPU":
#if config.tpu:
#config.initialize_accelerator()
# building model
K.clear_session()
#with config.strategy.scope(): (doesn't work because of our embedding layer, has to be fixed)
model = build_LSTM(embedding_matrix, word_index, config.MAX_LENGTH, config.LEARNING_RATE, config.METRICS, config.FIRST_TIME)
print('model builded')
# creating TF Dataset (not used since multiprocessing doesn't work with our embedding model)
#ds_train = get_tf_dataset(X_train, y_train, config.AUTO, repeat = True, shuffle = True, batch_size = config.BATCH_SIZE * config.REPLICAS)
#ds_valid = get_tf_dataset(X_valid, y_valid, config.AUTO, batch_size = config.BATCH_SIZE * config.REPLICAS * 4)
n_train = len(X_train)
# saving model at best accuracy epoch
sv = [tf.keras.callbacks.ModelCheckpoint(
"lstm_model.h5",
monitor = "val_accuracy",
verbose = 1,
save_best_only = True,
save_weights_only = True,
mode = "max",
save_freq = "epoch"),
tf.keras.callbacks.EarlyStopping(patience = 10,
verbose= 1,
monitor='val_accuracy')]
print("\nTraining")
# training model
seq_x_train = np.array(seq_x_train)
y_train = np.array(y_train)
seq_x_test = np.array(seq_x_test)
y_valid = np.array(y_valid)
model_history = model.fit(
x = seq_x_train,
y = y_train,
epochs = config.EPOCHS,
callbacks = [sv],
batch_size = config.BATCH_SIZE,
#steps_per_epoch = n_train / config.BATCH_SIZE // config.REPLICAS,
validation_data = (seq_x_test, y_valid),
verbose = config.VERBOSE
)
print("\nValidating")
# scoring validation data
model.load_weights("lstm_model.h5")
#ds_valid = get_tf_dataset(X_valid, -1, config.AUTO, labelled = False, batch_size = config.BATCH_SIZE * config.REPLICAS * 4)
preds_valid = model.predict(seq_x_test, verbose = config.VERBOSE)
print('Classification report:')
print(classification_report(y_valid, (preds_valid > 0.5)))
if config.SAVE_MODEL:
model_json = model.to_json()
json.dump(model_json, 'lstm_model.json')
| Python | 381 | 32.92651 | 142 | /classifiers.py | 0.590825 | 0.585332 |
qtngr/HateSpeechClassifier | refs/heads/master | ## importing packages
import gc
import os
import random
import transformers
import warnings
import json
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.backend as K
from pathlib import Path
from sklearn.metrics import accuracy_score, classification_report
from sklearn.model_selection import train_test_split
from tensorflow.keras import Model, Sequential
from tensorflow.keras.layers import Input, Dense, Dropout
from tensorflow.keras.optimizers import Adam
from transformers import AutoTokenizer, TFAutoModel
print(f"TensorFlow version: {tf.__version__}")
print(f"Transformers version: {transformers.__version__}")
warnings.filterwarnings("ignore")
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
## defining configuration
class Configuration_BERT():
"""
All configuration for running an experiment
"""
def __init__(
self,
model_name,
max_length = 150,
padding = True,
batch_size = 32,
epochs = 5,
learning_rate = 1e-5,
metrics = ["accuracy"],
verbose = 1,
split_size = 0.25,
accelerator = "TPU",
myluckynumber = 13,
include_english = False,
save_model = True
):
# seed and accelerator
self.SEED = myluckynumber
self.ACCELERATOR = accelerator
# save and load parameters
self.SAVE_MODEL = save_model
# english data
self.INCLUDE_ENG = include_english
# paths
self.PATH_FR_DATA = Path("dataset.csv")
self.PATH_ENG_DATA = Path("eng_dataset.csv")
# splits
self.SPLIT_SIZE = split_size
# model configuration
self.MODEL_NAME = model_name
self.TOKENIZER = AutoTokenizer.from_pretrained(self.MODEL_NAME)
# model hyperparameters
self.MAX_LENGTH = max_length
self.PAD_TO_MAX_LENGTH = padding
self.BATCH_SIZE = batch_size
self.EPOCHS = epochs
self.LEARNING_RATE = learning_rate
self.METRICS = metrics
self.VERBOSE = verbose
# initializing accelerator
self.initialize_accelerator()
def initialize_accelerator(self):
#Initializing accelerator
# checking TPU first
if self.ACCELERATOR == "TPU":
print("Connecting to TPU")
try:
tpu = tf.distribute.cluster_resolver.TPUClusterResolver()
print(f"Running on TPU {tpu.master()}")
except ValueError:
print("Could not connect to TPU")
tpu = None
if tpu:
try:
print("Initializing TPU")
tf.config.experimental_connect_to_cluster(tpu)
tf.tpu.experimental.initialize_tpu_system(tpu)
self.strategy = tf.distribute.TPUStrategy(tpu)
self.tpu = tpu
print("TPU initialized")
except _:
print("Failed to initialize TPU")
else:
print("Unable to initialize TPU")
self.ACCELERATOR = "GPU"
# default for CPU and GPU
if self.ACCELERATOR != "TPU":
print("Using default strategy for CPU and single GPU")
self.strategy = tf.distribute.get_strategy()
# checking GPUs
if self.ACCELERATOR == "GPU":
print(f"GPUs Available: {len(tf.config.experimental.list_physical_devices('GPU'))}")
# defining replicas
self.AUTO = tf.data.experimental.AUTOTUNE
self.REPLICAS = self.strategy.num_replicas_in_sync
print(f"REPLICAS: {self.REPLICAS}")
def encode_text(sequences, tokenizer, max_len, padding):
"""
Preprocessing textual data into encoded tokens.
"""
# encoding text using tokenizer of the model
text_encoded = tokenizer.batch_encode_plus(
sequences,
pad_to_max_length = padding,
truncation=True,
max_length = max_len
)
return text_encoded
def get_tf_dataset(X, y, auto, labelled = True, repeat = False, shuffle = False, batch_size = 32):
"""
Creating tf.data.Dataset for TPU.
"""
if labelled:
ds = (tf.data.Dataset.from_tensor_slices((X["input_ids"], y)))
else:
ds = (tf.data.Dataset.from_tensor_slices(X["input_ids"]))
if repeat:
ds = ds.repeat()
if shuffle:
ds = ds.shuffle(2048)
ds = ds.batch(batch_size)
ds = ds.prefetch(auto)
return ds
## building model
def build_model(model_name, max_len, learning_rate, metrics):
"""
Building the Deep Learning architecture
"""
# defining encoded inputs
input_ids = Input(shape = (max_len,), dtype = tf.int32, name = "input_ids")
# defining transformer model embeddings
transformer_model = TFAutoModel.from_pretrained(model_name)
transformer_embeddings = transformer_model(input_ids)[0]
# defining output layer
output_values = Dense(512, activation = "relu")(transformer_embeddings[:, 0, :])
output_values = Dropout(0.5)(output_values)
#output_values = Dense(32, activation = "relu")(output_values)
output_values = Dense(1, activation='sigmoid')(output_values)
# defining model
model = Model(inputs = input_ids, outputs = output_values)
opt = Adam(learning_rate = learning_rate)
loss = tf.keras.losses.BinaryCrossentropy()
metrics = metrics
model.compile(optimizer = opt, loss = loss, metrics = metrics)
return model
def run_model(config):
"""
Running the model
"""
## reading data
fr_df = pd.read_csv(config.PATH_FR_DATA)
sentences = fr_df.text.astype(str).values.tolist()
labels = fr_df.label.astype(float).values.tolist()
# splitting data into training and validation
X_train, X_valid, y_train, y_valid = train_test_split(sentences,
labels,
test_size = config.SPLIT_SIZE
)
if config.INCLUDE_ENG:
eng_df = pd.read_csv(config.PATH_ENG_DATA)
X_train = eng_df.text.astype(str).tolist() + X_train
y_train = eng_df.labels.astype(float).values.tolist() + y_train
# initializing TPU
if config.ACCELERATOR == "TPU":
if config.tpu:
config.initialize_accelerator()
# building model
K.clear_session()
with config.strategy.scope():
model = build_model(config.MODEL_NAME, config.MAX_LENGTH, config.LEARNING_RATE, config.METRICS)
#print(model.summary())
print("\nTokenizing")
# encoding text data using tokenizer
X_train_encoded = encode_text(X_train, tokenizer = config.TOKENIZER, max_len = config.MAX_LENGTH, padding = config.PAD_TO_MAX_LENGTH)
X_valid_encoded = encode_text(X_valid, tokenizer = config.TOKENIZER, max_len = config.MAX_LENGTH, padding = config.PAD_TO_MAX_LENGTH)
# creating TF Dataset
ds_train = get_tf_dataset(X_train_encoded, y_train, config.AUTO, repeat = True, shuffle = True, batch_size = config.BATCH_SIZE * config.REPLICAS)
ds_valid = get_tf_dataset(X_valid_encoded, y_valid, config.AUTO, batch_size = config.BATCH_SIZE * config.REPLICAS * 4)
n_train = len(X_train)
# saving model at best accuracy epoch
sv = [tf.keras.callbacks.ModelCheckpoint(
"model.h5",
monitor = "val_accuracy",
verbose = 1,
save_best_only = True,
save_weights_only = True,
mode = "max",
save_freq = "epoch"),
tf.keras.callbacks.EarlyStopping(patience = 10,
verbose= 1,
monitor='val_accuracy')]
print("\nTraining")
# training model
model_history = model.fit(
ds_train,
epochs = config.EPOCHS,
callbacks = [sv],
steps_per_epoch = n_train / config.BATCH_SIZE // config.REPLICAS,
validation_data = ds_valid,
verbose = config.VERBOSE
)
print("\nValidating")
# scoring validation data
model.load_weights("model.h5")
ds_valid = get_tf_dataset(X_valid_encoded, -1, config.AUTO, labelled = False, batch_size = config.BATCH_SIZE * config.REPLICAS * 4)
preds_valid = model.predict(ds_valid, verbose = config.VERBOSE)
print('Classification report:')
print(classification_report(y_valid, (preds_valid > 0.5)))
| Python | 267 | 30.940075 | 149 | /BERT_classifiers.py | 0.603893 | 0.598734 |
akshayjh/spacyr | refs/heads/master | # from __future__ import unicode_literals
nlp = spacy.load(lang)
| Python | 3 | 21.333334 | 42 | /inst/python/initialize_spacyPython.py | 0.701493 | 0.701493 |
PointMeAtTheDawn/warmachine-images | refs/heads/master | """This converts a cardbundle.pdf (downloaded from Privateer Press) into
Tabletop Simulator deck Saved Objects."""
import os
import argparse
import json
import threading
from shutil import copyfile
import PIL.ImageOps
from PIL import Image
import cloudinary.uploader
import cloudinary.api
from pdf2image import convert_from_path
def parse_images(fronts, backs, raw_page):
"""Chop a page from the PP PDF into its constituent card images."""
# 400 DPI
# fronts.append(raw_page.crop((188, 303, 1185, 1703)))
# fronts.append(raw_page.crop((1193, 303, 2190, 1703)))
# fronts.append(raw_page.crop((2199, 303, 3196, 1703)))
# fronts.append(raw_page.crop((3205, 303, 4201, 1703)))
# backs.append(raw_page.crop((188, 1709, 1185, 3106)))
# backs.append(raw_page.crop((1193, 1709, 2190, 3106)))
# backs.append(raw_page.crop((2199, 1709, 3196, 3106)))
# backs.append(raw_page.crop((3205, 1709, 4201, 3106)))
# 200 DPI
fronts.append(raw_page.crop((94, 151, 592, 852)))
fronts.append(raw_page.crop((597, 151, 1095, 852)))
fronts.append(raw_page.crop((1099, 151, 1598, 852)))
fronts.append(raw_page.crop((1602, 151, 2101, 852)))
backs.append(raw_page.crop((94, 855, 592, 1553)))
backs.append(raw_page.crop((597, 855, 1095, 1553)))
backs.append(raw_page.crop((1099, 855, 1598, 1553)))
backs.append(raw_page.crop((1602, 855, 2101, 1553)))
# 150 DPI
# fronts.append(page.crop((70,114,444,639)))
# fronts.append(page.crop((447,114,821,639)))
# fronts.append(page.crop((824,114,1198,639)))
# fronts.append(page.crop((1202,114,1576,639)))
# backs.append(page.crop((70,641,444,1165)))
# backs.append(page.crop((447,641,821,1165)))
# backs.append(page.crop((824,641,1198,1165)))
# backs.append(page.crop((1202,641,1576,1165)))
def load_config():
"""Load your config"""
with open('config.json') as json_file:
data = json.load(json_file)
cloudinary.config(
cloud_name=data["cloud_name"],
api_key=data["api_key"],
api_secret=data["api_secret"]
)
return data["width"], data["height"], data["saved_objects_folder"]
def image_upload(name, links):
"""Upload a compiled TTS-compatible deck template image into Cloudinary."""
res = cloudinary.uploader.upload(name)
links[name] = res["url"]
os.remove(name)
print(links[name])
def package_pages(cards_width, cards_height, fronts, backs, page_count, links):
"""Stitch together card images into a TTS-compatible deck template image"""
pixel_width = 4096//cards_width
pixel_height = 4096//cards_height
for i in range(page_count):
fronts_image = Image.new("RGB", (4096, 4096))
backs_image = Image.new("RGB", (4096, 4096))
for j in range(cards_width * cards_height):
if len(fronts) <= i * cards_width * cards_height + j:
continue
front = fronts[i * cards_width * cards_height + j].resize(
(pixel_width, pixel_height), Image.BICUBIC)
back = backs[i * cards_width * cards_height + j].resize(
(pixel_width, pixel_height), Image.BICUBIC).rotate(180)
fronts_image.paste(front, (j % cards_width * pixel_width,
(j // cards_width) * pixel_height))
backs_image.paste(back, (j % cards_width * pixel_width,
(j // cards_width) * pixel_height))
fronts_image.save(f"f-{i}.jpg")
backs_image.save(f"b-{i}.jpg")
t_1 = threading.Thread(
target=image_upload, args=(f"f-{i}.jpg", links)
)
t_1.start()
t_2 = threading.Thread(
target=image_upload, args=(f"b-{i}.jpg", links)
)
t_2.start()
t_1.join()
t_2.join()
def write_deck(deck_json, args, saved_objects_folder, links, num):
"""Craft the JSON for your final TTS deck Saved Object"""
name = args.name + str(num)
deck_json = deck_json.replace("DeckName", name)
deck_json = deck_json.replace("FrontImageURL", links[f"f-{num}.jpg"])
deck_json = deck_json.replace("BackImageURL", links[f"b-{num}.jpg"])
deck_json = deck_json.replace("ReplaceGUID", f"{name}C")
deck_json = deck_json.replace("ReplaceGUID2", f"{name}D")
with open(saved_objects_folder + name + ".json", "w") as deck:
deck.write(deck_json)
copyfile("warmahordes.png", saved_objects_folder + name + ".png")
def parse_arguments():
"""Command line arg parse"""
parser = argparse.ArgumentParser(
description="Convert Privateer Press card pdfs to Tabletop Simulator saved deck objects."
)
parser.add_argument(
"-name",
type=str,
help="your deck name - possibly the faction you are converting",
)
return parser.parse_args()
def convert():
"""This converts a cardbundle.pdf (downloaded from Privateer Press) into
Tabletop Simulator deck Saved Objects."""
args = parse_arguments()
width, height, saved_objects_folder = load_config()
if args.name is None:
args.name = "Warmachine"
print("Naming decks: " + args.name + "X")
# Strip out the card images from the Privateer Press pdfs.
card_fronts = []
card_backs = []
infile = "cardbundle.pdf"
pages = convert_from_path(infile, 200, output_folder="pdf_parts")
for page in pages:
parse_images(card_fronts, card_backs, page)
print("Parsing cardbundle.pdf complete.")
# But we don't want the blank white cards.
# I'd rather do a .filter, but I'm concerned a stray pixel would put them outta sync.
filtered_fronts = []
filtered_backs = []
for i, card in enumerate(card_fronts):
if PIL.ImageOps.invert(card).getbbox():
filtered_fronts.append(card)
filtered_backs.append(card_backs[i])
print("Stripping out blank cards complete.")
# Collate the cards into the image format Tabletop Simulator requires.
links = {}
deck_count = len(card_fronts) // (width*height) + 1
package_pages(width, height, filtered_fronts, filtered_backs, deck_count, links)
print("Packaging cards into TTS deck template images and uploading to Cloudinary complete.")
# And let's shove em all in your Saved Objects folder :)
deck_json = ""
with open("decktemplate.json", "r") as deck_template:
deck_json = deck_template.read()
for i in range(deck_count):
write_deck(deck_json, args, saved_objects_folder, links, i)
print("Writing deck jsons into Saved Object folder complete.")
if __name__ == "__main__":
convert()
| Python | 167 | 38.880241 | 97 | /convert.py | 0.628829 | 0.571471 |
jimrhoskins/dotconfig | refs/heads/master | import os
def vcs_status():
from powerline.lib.vcs import guess
repo = guess(os.path.abspath(os.getcwd()))
if repo and repo.status():
return "X"
else:
return None
| Python | 9 | 19 | 44 | /powerline/lib/powerext/segments.py | 0.666667 | 0.666667 |
thfabian/molec | refs/heads/master | #!usr/bin/env python3
# _
# _ __ ___ ___ | | ___ ___
# | '_ ` _ \ / _ \| |/ _ \/ __|
# | | | | | | (_) | | __/ (__
# |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
#
# Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
# Michel Breyer (mbreyer@student.ethz.ch)
# Florian Frei (flofrei@student.ethz.ch)
# Fabian Thuring (thfabian@student.ethz.ch)
#
# This file is distributed under the MIT Open Source License.
# See LICENSE.txt for details.
from pymolec import *
import numpy as np
import json
import sys
#------------------------------------------------------------------------------
integrators = ['lf', 'lf2', 'lf4', 'lf8', 'lf_avx']
N = np.logspace(2, 5, 12, base=10).astype(np.int32)
steps = np.array([25])
rho = 1.0
rc = 2.5
#------------------------------------------------------------------------------
filename = sys.argv[1]
results = {}
for integrator in integrators:
p = pymolec(N=N, rho=rho, steps=steps, force='q_g_avx', integrator=integrator)
output = p.run()
results['N'] = output['N'].tolist()
results['rho'] = output['rho'].tolist()
results[integrator] = output['integrator'].tolist()
print('Saving performance data to ' + filename)
with open(filename, 'w') as outfile:
json.dump(results, outfile, indent=4)
| Python | 49 | 25.32653 | 82 | /python/integrators.py | 0.457364 | 0.43876 |
thfabian/molec | refs/heads/master | #!usr/bin/env python3
# _
# _ __ ___ ___ | | ___ ___
# | '_ ` _ \ / _ \| |/ _ \/ __|
# | | | | | | (_) | | __/ (__
# |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
#
# Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
# Michel Breyer (mbreyer@student.ethz.ch)
# Florian Frei (flofrei@student.ethz.ch)
# Fabian Thuring (thfabian@student.ethz.ch)
#
# This file is distributed under the MIT Open Source License.
# See LICENSE.txt for details.
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import sys
import json
# seaborn formatting
sns.set_context("notebook", font_scale=1.1)
sns.set_style("darkgrid")
sns.set_palette('deep')
deep = ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"]
try:
filename = sys.argv[1]
except IndexError as ie:
print('usage: plot results.txt')
sys.exit(1)
# load results from json object
with open(filename, 'r') as infile:
results = json.load(infile)
N = np.array(results['N'])
rho = np.array(results['rho'])
del results['N']
del results['rho']
#----- plot runtime ------
fig = plt.figure()
ax = fig.add_subplot(1,1,1);
for k in sorted(results):
if 'cell_ref' in results:
ax.semilogx(N, np.array(results['cell_ref']) / np.array(results[k]), 'o-', label=k)
elif 'lf' in results:
ax.semilogx(N, np.array(results['lf']) / np.array(results[k]), 'o-', label=k)
ax.set_xlabel('Number of particles $N$')
ax.set_ylabel('Runtime Speedup',
rotation=0,
horizontalalignment = 'left')
ax.yaxis.set_label_coords(-0.055, 1.05)
ax.set_xlim([np.min(N)*0.9, np.max(N)*1.1])
ax.set_ylim([0.0, 1.2 * ax.get_ylim()[1]])
ax.legend(loc='upper right')
plt.savefig(filename[:filename.rfind('.')]+'-runtime.pdf')
#----- plot performance -----
flops = dict()
flops['cell_ref'] = lambda N, rho : 301 * N * rho * 2.5**3
flops['q'] = lambda N, rho : 301 * N * rho * 2.5**3
flops['q_g'] = lambda N, rho : 180 * N * rho * 2.5**3
flops['q_g_avx'] = lambda N, rho : N * (205 * rho * 2.5**3 + 24)
flops['lf'] = lambda N, rho : 9 * N
flops['lf2'] = lambda N, rho : 9 * N
flops['lf4'] = lambda N, rho : 9 * N
flops['lf8'] = lambda N, rho : 9 * N
flops['lf_avx'] = lambda N, rho : 9 * N
fig = plt.figure()
ax = fig.add_subplot(1,1,1);
for k in sorted(results):
ax.semilogx(N, flops[k](N,rho) / np.array(results[k]), 'o-', label=k)
ax.set_xlabel('Number of particles $N$')
ax.set_ylabel('Performance [Flops/Cycles]',
rotation=0,
horizontalalignment = 'left')
ax.yaxis.set_label_coords(-0.055, 1.05)
ax.set_xlim([np.min(N)*0.9, np.max(N)*1.1])
ax.set_ylim([-0.1, 1.4 * ax.get_ylim()[1]])
ax.legend(loc='upper right')
plt.savefig(filename[:filename.rfind('.')]+'-performance.pdf')
| Python | 99 | 27.141415 | 91 | /python/plot.py | 0.559943 | 0.521536 |
thfabian/molec | refs/heads/master | #!usr/bin/env python3
# _
# _ __ ___ ___ | | ___ ___
# | '_ ` _ \ / _ \| |/ _ \/ __|
# | | | | | | (_) | | __/ (__
# |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
#
# Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
# Michel Breyer (mbreyer@student.ethz.ch)
# Florian Frei (flofrei@student.ethz.ch)
# Fabian Thuring (thfabian@student.ethz.ch)
#
# This file is distributed under the MIT Open Source License.
# See LICENSE.txt for details.
from pymolec import *
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import os.path
# seaborn formatting
sns.set_context("notebook", font_scale=1.1)
sns.set_style("darkgrid")
sns.set_palette('deep')
deep = ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"]
def measure_performance():
forces = ['q'];
N = np.logspace(4,7,8).astype(np.int32)
steps = np.array([100, 100, 90, 80, 65, 50, 35, 20])
rhos = np.array([0.5, 1., 2., 4., 6.,8.,10.])
rc = 2.5
if os.path.isfile("performances-grid-forces-density.npy"):
print("Loading data from <performances-grid-forces-density.npy")
performances = np.load("performances-grid-forces-density.npy")
return performances, N, rhos
else:
performances = np.zeros((len(rhos), len(N)))
for rho_idx, rho in enumerate(rhos):
flops = N * rc**3 * rho * (18 * np.pi + 283.5)
p = pymolec(N=N, rho=rho, force=forces, steps=steps, integrator='lf8', periodic='c4')
output = p.run()
perf = flops / output['force']
performances[len(rhos)-1-rho_idx, :] = perf
print("Saving performance data to <performances-grid-forces-density.npy>")
np.save("performances-grid-forces-density", performances)
return performances, N, rhos
def plot_performance(performances, N, rhos):
fig = plt.figure()
ax = fig.add_subplot(1,1,1);
# Generate a custom diverging colormap
cmap = sns.diverging_palette(10, 133, n = 256, as_cmap=True)
ax = sns.heatmap(performances, linewidths=1,
yticklabels=rhos[::-1], xticklabels=N,
vmax=0.2*np.round(np.max(np.max(performances))*5),
vmin=0.2*np.round(np.min(np.min(performances))*5),
cmap=cmap, annot=False
)
cax = plt.gcf().axes[-1]
pos_old = cax.get_position()
pos_new = [pos_old.x0 - 0.01, pos_old.y0 + 0, pos_old.width, pos_old.height*((len(rhos)-1)*1./len(rhos))]
cax.set_position(pos_new)
cax.tick_params(labelleft=False, labelright=True)
cax.set_yticklabels(['Low', '', '', '', 'High'])
ax.text(len(N)+0.35, len(rhos), 'Performance\n[flops/cycle]', ha='left', va='top')
rho_labels_short = ['%.2f' % a for a in rhos]
ax.set_yticklabels(rho_labels_short)
N_labels_short = ['10$^{%1.2f}$' % a for a in np.array(np.log10(N))]
ax.set_xticklabels(N_labels_short)
ax.set_xlabel('Number of particles $N$')
ax.set_ylabel('Particle density',
rotation=0, horizontalalignment = 'left')
ax.yaxis.set_label_coords(0., 1.01)
plt.yticks(rotation=0)
filename = 'forces-grid.pdf'
print("saving '%s'" % filename )
plt.savefig(filename)
if __name__ == '__main__':
perf, N, rhos = measure_performance()
plot_performance(perf, N, rhos)
| Python | 108 | 30.379629 | 110 | /python/forces-grid.py | 0.558867 | 0.523753 |
thfabian/molec | refs/heads/master | #!usr/bin/env python3
# _
# _ __ ___ ___ | | ___ ___
# | '_ ` _ \ / _ \| |/ _ \/ __|
# | | | | | | (_) | | __/ (__
# |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
#
# Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
# Michel Breyer (mbreyer@student.ethz.ch)
# Florian Frei (flofrei@student.ethz.ch)
# Fabian Thuring (thfabian@student.ethz.ch)
#
# This file is distributed under the MIT Open Source License.
# See LICENSE.txt for details.
import numpy as np
import time, sys, os, subprocess
class pymolec:
def __init__(self, N=np.array([1000]), rho=1.25, steps=np.array([100]),
force="cell_ref", integrator="lf", periodic="ref"):
self.N = N
self.rho = rho
if hasattr(steps, "__len__"):
if len(N) != len(steps):
self.steps = np.full(len(N), steps[0], dtype=np.int)
else:
self.steps = steps
else:
self.steps = np.full(len(N), steps, dtype=np.int)
self.force = force
self.integrator = integrator
self.periodic = periodic
def run(self, path = None):
"""
runs a molec simulation for the given configurations and outputs a
dictionnary containing N, rho, force, integrator, periodic, simulation
"""
# Use default path
if not path:
script_path = os.path.join(os.path.dirname(os.path.abspath(__file__)))
if os.name == 'nt':
path = os.path.join(script_path, '..', 'build', 'molec.exe')
else:
path = os.path.join(script_path, '..', 'build', 'molec')
# Check if molec exists
if not os.path.exists(path):
raise IOError("no such file or directory: %s" % path)
times = np.zeros((4, len(self.N)))
print ("Running molec: %s" % path)
print ("rho = {0}, force = {1}, integrator = {2}, periodic = {3}".format(
self.rho, self.force, self.integrator, self.periodic))
output = {}
output['N'] = np.zeros(len(self.N))
output['rho'] = np.zeros(len(self.N))
output['force'] = np.zeros(len(self.N))
output['integrator'] = np.zeros(len(self.N))
output['periodic'] = np.zeros(len(self.N))
output['simulation'] = np.zeros(len(self.N))
for i in range(len(self.N)):
cmd = [path]
cmd += ["--N=" + str(self.N[i])]
cmd += ["--rho=" + str(self.rho)]
cmd += ["--step=" + str(self.steps[i])]
cmd += ["--force=" + self.force]
cmd += ["--integrator=" + self.integrator]
cmd += ["--periodic=" + self.periodic]
cmd += ["--verbose=0"]
# Print status
start = time.time()
print(" - N = %9i ..." % self.N[i], end='')
sys.stdout.flush()
try:
out = subprocess.check_output(cmd).decode(encoding='utf-8').split('\t')
print(" %20f s" % (time.time() - start))
output['N'][i] = int(out[0])
output['rho'][i] = float(out[1])
output['force'][i] = int(out[3])
output['integrator'][i] = int(out[5])
output['periodic'][i] = int(out[7])
output['simulation'][i] = int(out[9])
except subprocess.CalledProcessError as e:
print(e.output)
return output
def main():
p = pymolec()
print(p.run())
if __name__ == '__main__':
main()
| Python | 112 | 30.616072 | 87 | /python/pymolec.py | 0.468512 | 0.459475 |
thfabian/molec | refs/heads/master | #!usr/bin/env python3
# _
# _ __ ___ ___ | | ___ ___
# | '_ ` _ \ / _ \| |/ _ \/ __|
# | | | | | | (_) | | __/ (__
# |_| |_| |_|\___/|_|\___|\___| - Molecular Dynamics Framework
#
# Copyright (C) 2016 Carlo Del Don (deldonc@student.ethz.ch)
# Michel Breyer (mbreyer@student.ethz.ch)
# Florian Frei (flofrei@student.ethz.ch)
# Fabian Thuring (thfabian@student.ethz.ch)
#
# This file is distributed under the MIT Open Source License.
# See LICENSE.txt for details.
from pymolec import *
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
# seaborn formatting
sns.set_context("notebook", font_scale=1.1)
sns.set_style("darkgrid")
sns.set_palette('deep')
deep = ["#4C72B0", "#55A868", "#C44E52", "#8172B2", "#CCB974", "#64B5CD"]
def main():
periodics = ['ref', 'c4']
N = np.array([1000, 2000, 3000, 4000, 5000, 6000, 7000, 10000])
flops = 2 * N # mod plus addition
fig = plt.figure()
ax = fig.add_subplot(1,1,1);
for periodic in periodics:
p = pymolec(N=N, periodic=periodic )
output = p.run()
perf = flops / output['periodic']
ax.plot(N, perf, 'o-')
ax.set_xlim([np.min(N)-100, np.max(N)+100])
ax.set_ylim([0,2])
ax.set_xlabel('Number of particles')
ax.set_ylabel('Performance [Flops/Cycle]',
rotation=0,
horizontalalignment = 'left')
ax.yaxis.set_label_coords(-0.055, 1.05)
plt.legend(periodics)
filename = 'periodic.pdf'
print("saving '%s'" % filename )
plt.savefig(filename)
if __name__ == '__main__':
main()
| Python | 63 | 24.15873 | 73 | /python/periodic.py | 0.51735 | 0.463722 |
anurag3301/Tanmay-Bhat-Auto-Video-Liker | refs/heads/main | from selenium import webdriver
from selenium.common.exceptions import *
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from time import sleep
from getpass import getpass
import tkinter as tk
from tkinter import messagebox
class tanmay_bhat:
def __init__(self, username, password, channel_addr):
try:
#Check for Chrome webdriver in Windows
self.bot = webdriver.Chrome('driver/chromedriver.exe')
except WebDriverException:
try:
#Check for Chrome webdriver in Linux
self.bot = webdriver.Chrome('/usr/bin/chromedriver')
except WebDriverException:
print("Please set Chrome Webdriver path above")
exit()
self.username = username
self.password = password
self.channel_addr = channel_addr
def login(self):
bot = self.bot
print("\nStarting Login process!\n")
bot.get('https://stackoverflow.com/users/signup?ssrc=head&returnurl=%2fusers%2fstory%2fcurrent%27')
bot.implicitly_wait(10)
self.bot.find_element_by_xpath('//*[@id="openid-buttons"]/button[1]').click()
self.bot.find_element_by_xpath('//input[@type="email"]').send_keys(self.username)
self.bot.find_element_by_xpath('//*[@id="identifierNext"]').click()
sleep(3)
self.bot.find_element_by_xpath('//input[@type="password"]').send_keys(self.password)
self.bot.find_element_by_xpath('//*[@id="passwordNext"]').click()
WebDriverWait(self.bot, 900).until(EC.presence_of_element_located((By.XPATH, "/html/body/header/div/div[1]/a[2]/span")))
print("\nLoggedin Successfully!\n")
sleep(2)
self.bot.get(self.channel_addr + "/videos")
def start_liking(self):
bot = self.bot
scroll_pause = 2
last_height = bot.execute_script("return document.documentElement.scrollHeight")
while True:
bot.execute_script("window.scrollTo(0, document.documentElement.scrollHeight);")
sleep(scroll_pause)
new_height = bot.execute_script("return document.documentElement.scrollHeight")
if new_height == last_height:
print("\nScrolling Finished!\n")
break
last_height = new_height
print("\nScrolling")
all_vids = bot.find_elements_by_id('thumbnail')
links = [elm.get_attribute('href') for elm in all_vids]
links.pop()
for i in range(len(links)):
bot.get(links[i])
like_btn = bot.find_element_by_xpath('//*[@id="top-level-buttons"]/ytd-toggle-button-renderer[1]/a')
check_liked = bot.find_element_by_xpath('//*[@id="top-level-buttons"]/ytd-toggle-button-renderer[1]')
# Check if its already liked
if check_liked.get_attribute("class") == 'style-scope ytd-menu-renderer force-icon-button style-text':
like_btn.click()
print("Liked video! Bot Army Zindabad!!!\n")
sleep(0.5)
elif check_liked.get_attribute("class") == 'style-scope ytd-menu-renderer force-icon-button style-default-active':
print("Video already liked. You are a good Bot Army Member\n")
#************************************************** GUI AREA **********************************************
def start():
if email_entry.get() and password_entry.get() and url_entry.get():
bot_army = tanmay_bhat(email_entry.get(), password_entry.get(), url_entry.get())
root.destroy()
bot_army.login()
bot_army.start_liking()
else:
messagebox.showinfo('Notice', 'Please fill all the entries to proceed furthur')
def tanmay_url_inject():
url_entry.delete(0, tk.END)
url_entry.insert(0, "https://www.youtube.com/c/TanmayBhatYouTube")
root = tk.Tk()
root.resizable(False, False)
root.geometry('%dx%d+%d+%d' % (760, 330, (root.winfo_screenwidth()/2) - (760/2), (root.winfo_screenheight()/2) - (330/2)))
frame = tk.Frame(root, height=330, width=760)
head_label = tk.Label(frame, text='Youtube Video Liker', font=('verdana', 25))
email_label = tk.Label(frame, text='Email: ', font=('verdana', 15))
password_label = tk.Label(frame, text='Password: ', font=('verdana', 15))
email_entry = tk.Entry(frame, font=('verdana', 15))
password_entry = tk.Entry(frame, font=('verdana', 15), show="*")
url_label = tk.Label(frame, text='Channel\nURL', font=('verdana', 15))
url_entry = tk.Entry(frame, font=('verdana', 15))
tanmay_button = tk.Button(frame, text='Tanmay\nBhatt', font=('verdana', 15), command=tanmay_url_inject)
start_button = tk.Button(frame, text='Start Liking', font=('verdana', 20), command=start)
frame.pack()
head_label.place(y=15, relx=0.32)
email_label.place(x=15, y=95, anchor='w')
password_label.place(x=15, y=130, anchor='w')
email_entry.place(x=140, y=78, width=600)
password_entry.place(x=140, y=115, width=600)
url_label.place(x=15, y=190, anchor='w')
url_entry.place(x=140, y=175, width=600)
tanmay_button.place(x=400, y=240)
start_button.place(x=550, y=250)
root.mainloop()
"""
Comment out the GUI area and uncomment the Console Area to use Console controls
********************************************** Console Area *******************************************
print("HI BOT ARMYYYYYYY! How you doing?\nToday is the time to make our PROVIDER (BOT LEADER) proud by liking all his videos!\n\nLet's make hime proud!!\n\n")
print("Enter the link of the channel or just hit [ENTER] key for default Tanmay's Channel")
channel_addr = str(input("Channel Link: "))
username = str(input("\nEnter your YouTube/Google Email ID: "))
password = str(getpass("Enter your password: "))
if not channel_addr:
channel_addr = "https://www.youtube.com/c/TanmayBhatYouTube"
bot_army = tanmay_bhat(username, password, channel_addr)
bot_army.login()
bot_army.start_liking()
print("\n\nALL VIDEOS ARE LIKED!!! YOU CAN NOW OFFICIALLY CALL YOURSELF:\nA PROUD BOT ARMY MEMBERRRRR!!!!!!\n\n\nPress any key to end")
input()
""" | Python | 143 | 42.258739 | 158 | /main.py | 0.616406 | 0.59744 |
hauntshadow/CS3535 | refs/heads/master | """
dir_comp.py
Usage: In the functions following this, the parameters are described as follows:
dir: the directory to search
Program that parses all .mp3 files in the passed in directory,
gets the segment arrays from each .mp3 file and puts them into a
numpy array for later use. Each segment array is in the following format:
[12 values for segment pitch, 12 values for segment timbre, 1 value for loudness
max, 1 value for loudness start, and 1 value for the segment duration]
Author: Chris Smith
Date: 03.27.2015
"""
import matplotlib
matplotlib.use("Agg")
import echonest.remix.audio as audio
import matplotlib.pyplot as plt
import scipy.spatial.distance as distance
import os
import numpy as np
'''
Method that takes a directory, searches that directory, and returns a list of every .mp3 file in it.
'''
def get_mp3_files(dir):
list = []
for root, dirs, files in os.walk(dir):
for file in files:
name, extension = os.path.splitext(file)
if extension == ".mp3":
list.append(os.path.realpath(os.path.join(root, file)))
return list
'''
Method that takes two .mp3 files and compares every segment within song A to
every segment in song B and supplies a histogram that shows
the distances between segments (tuples of segments). Also supplies some data
about the songs that were parsed.
'''
def two_song_comp(fileA, fileB):
#Defines the threshold for comparisons
thres = 45
nameA = os.path.basename(os.path.splitext(fileA)[0])
nameB = os.path.basename(os.path.splitext(fileB)[0])
adj_listA = []
adj_listB = []
sim_seg_countA = 0
sim_seg_countB = 0
sim_countA = 0
sim_countB = 0
audiofileA = audio.AudioAnalysis(fileA)
audiofileB = audio.AudioAnalysis(fileB)
segmentsA = audiofileA.segments
segmentsB = audiofileB.segments
#Get each segment's array of comparison data for song A
segsA = np.array(segmentsA.pitches)
segsA = np.c_[segsA, np.array(segmentsA.timbre)]
segsA = np.c_[segsA, np.array(segmentsA.loudness_max)]
segsA = np.c_[segsA, np.array(segmentsA.loudness_begin)]
segsA = np.c_[segsA, np.ones(len(segsA))]
#Get each segment's array of comparison data for song B
segsB = np.array(segmentsB.pitches)
segsB = np.c_[segsB, np.array(segmentsB.timbre)]
segsB = np.c_[segsB, np.array(segmentsB.loudness_max)]
segsB = np.c_[segsB, np.array(segmentsB.loudness_begin)]
segsB = np.c_[segsB, np.ones(len(segsB))]
#Finish creating the adjacency list
for i in segmentsA:
adj_listA.append([])
for i in segmentsB:
adj_listB.append([])
#Finish getting the comparison data
for i in range(len(segsA)):
segsA[i][26] = segmentsA[i].duration
for i in range(len(segsB)):
segsB[i][26] = segmentsB[i].duration
#Get the euclidean distance for the pitch vectors, then multiply by 10
distances = distance.cdist(segsA[:,:12], segsB[:,:12], 'euclidean')
for i in range(len(distances)):
for j in range(len(distances[i])):
distances[i][j] = 10 * distances[i][j]
#Get the euclidean distance for the timbre vectors, adding it to the
#pitch distance
distances = distances + distance.cdist(segsA[:,12:24], segsB[:,12:24], 'euclidean')
#Get the rest of the distance calculations, adding them to the previous
#calculations.
for i in range(len(distances)):
for j in range(len(distances[i])):
distances[i][j] = distances[i][j] + abs(segsA[i][24] - segsB[j][24])
distances[i][j] = distances[i][j] + abs(segsA[i][25] - segsB[j][25]) + abs(segsA[i][26] - segsB[j][26]) * 100
i_point = 0
j_point = 0
#Use i_point and j_point for the indices in the 2D distances array
for i_point in range(len(distances)):
for j_point in range(len(distances[i])):
#Check to see if the distance between segment # i_point and
#segment # j_point is less than 45
if abs(distances[i_point][j_point]) <= thres:
#Add to the adjacency lists if not already there
if j_point not in adj_listA[i_point]:
adj_listA[i_point].append(j_point)
if i_point not in adj_listB[j_point]:
adj_listB[j_point].append(i_point)
j_point = j_point + 1
i_point = i_point + 1
j_point = 0
#Get the count of the similarities in the adjacency lists
for i in adj_listA:
if len(i) > 0:
sim_countA = sim_countA + len(i);
sim_seg_countA = sim_seg_countA + 1
for i in adj_listB:
if len(i) > 0:
sim_countB = sim_countB + len(i);
sim_seg_countB = sim_seg_countB + 1
#print i, "\n"
print "Num of segments with at least 1 match in song A: ", sim_seg_countA, " out of", len(segmentsA)
print "Percentage of segments with at least 1 match in song A: ", (sim_seg_countA / float(len(segmentsA)) * 100), "%"
print "Num of similar tuples: ", sim_countA, " out of ", len(segmentsA) *len(segmentsB)
print "Percentage of possible tuples that are similar: ", sim_countA / float(len(segmentsA) * len(segmentsB)) * 100, "%"
print "Num of segments with at least 1 match in song B: ", sim_seg_countB, " out of", len(segmentsB)
print "Percentage of segments with at least 1 match in song B: ", (sim_seg_countB / float(len(segmentsB)) * 100), "%"
#Get the number of bins. Calculated by taking the max range and dividing by 50
bins = int(np.amax(distances)) / thres
#Make the histogram with titles and axis labels. Plot the line x=thres for visual comparison.
plt.hist(distances.ravel(), bins = bins)
plt.title('Distances between Tuples of Segments' + nameA + nameB)
plt.xlabel('Distances')
plt.ylabel('Number of occurrences')
plt.axvline(thres, color = 'r', linestyle = 'dashed')
#Make each tick on the x-axis correspond to the end of a bin.
plt.xticks(range(0, int(np.amax(distances) + 2 * thres), thres))
#Make each tick on the y-axis correspond to each 25000th number up to the number of possible tuple combos / 2.
plt.yticks(range(0, (len(segmentsA) * len(segmentsB))/2 + 25000, 25000))
plt.gcf().savefig('Histograms/' + nameA + 'and' + nameB + '_histogram.png')
plt.close()
'''
Method that runs the comparison on every pair of .mp3 files in a directory
'''
def dir_comp(dir):
files = get_mp3_files(dir)
count = 0
total = sum(range(len(files) + 1))
for f1 in files:
for f2 in files:
nameA = os.path.basename(os.path.splitext(f1)[0])
nameB = os.path.basename(os.path.splitext(f2)[0])
if not os.path.isfile('Histograms/' + nameA + 'and' + nameB + '_histogram.png') and not os.path.isfile('Histograms/' + nameB + 'and' + nameA + '_histogram.png'):
two_song_comp(f1, f2)
print "Comparison completed!"
count = count + 1
print count, " out of ", total
print "Finished."
| Python | 161 | 42.770187 | 173 | /res_mod3/dir_comp.py | 0.642259 | 0.624663 |
hauntshadow/CS3535 | refs/heads/master | import numpy as np
def check(filename):
clusters = np.load(filename)
clusters = clusters[1]
truths = np.load("Results/groundtruths.npy")
error = 0
total = 0
for i in range(len(truths)):
for j in range(len(truths[i])):
if clusters[truths[i][j]] != clusters[i]:
error += 1
total += 1
print error
print total
| Python | 15 | 24.799999 | 53 | /ResultCheck/CheckTruths.py | 0.55814 | 0.54522 |
hauntshadow/CS3535 | refs/heads/master | """
h5_seg_to_array.py
Usage: In the functions following this, the parameters are described as follows:
dir: the directory to search
filename: the filename for saving/loading the results to/from
Program that parses all .h5 files in the passed in directory and subdirectories,
getting the segment arrays from each .h5 file and putting them into a
numpy array for later use. Each segment array is in the following format:
[12 values for segment pitch, 12 values for segment timbre, 1 value for loudness
max, 1 value for loudness start, and 1 value for the segment duration]
This program uses the hdf5_getters, which can be found here:
https://github.com/tbertinmahieux/MSongsDB/blob/master/PythonSrc/hdf5_getters.py
Author: Chris Smith
Date: 02.22.2015
"""
import os
import numpy as np
import hdf5_getters as getters
'''
Method that takes a directory, searches that directory, as well as any
subdirectories, and returns a list of every .h5 file.
'''
def get_h5_files(dir):
list = []
for root, dirs, files in os.walk(dir):
for file in files:
name, extension = os.path.splitext(file)
if extension == ".h5":
list.append(os.path.realpath(os.path.join(root, file)))
for subdir in dirs:
get_h5_files(subdir)
return list
'''
Method that takes a directory, gets every .h5 file in that directory (plus any
subdirectories), and then parses those files. The outcome is a Numpy array
that contains every segment in each file. Each row in the array of arrays
contains pitch, timbre, loudness max, loudness start, and the duration of each
segment.
'''
def h5_files_to_np_array(dir, filename):
list = get_h5_files(dir)
num_done = 0
seg_array = []
#Go through every file and get the desired information.
for file in list:
song = getters.open_h5_file_read(file)
seg_append = np.array(getters.get_segments_pitches(song))
seg_append = np.c_[ seg_append, np.array(getters.get_segments_timbre(song))]
seg_append = np.c_[seg_append, np.array(getters.get_segments_loudness_max(song))]
seg_append = np.c_[seg_append, np.array(getters.get_segments_loudness_start(song))]
start = np.array(getters.get_segments_start(song))
for i in range(0,len(start)-1):
if i != (len(start) - 1):
start[i] = start[i+1] - start[i]
start[len(start) - 1] = getters.get_duration(song) - start[len(start) - 1]
seg_append = np.c_[seg_append, start]
#Add the arrays to the bottom of the list
seg_array.extend(seg_append.tolist())
song.close()
num_done = num_done + 1
#Gives a count for every 500 files completed
if num_done % 500 == 0:
print num_done," of ",len(list)
#Convert the list to a Numpy array
seg_array = np.array(seg_array)
#Save the array in a file
seg_array.dump(filename)
print len(seg_array)," number of segments in the set."
return seg_array
'''
Method that opens the file with that filename. The file must contain a
Numpy array. This method returns the array.
'''
def open(filename):
data = np.load(filename)
return data
| Python | 87 | 35.793102 | 91 | /h5_array/h5_seg_to_array.py | 0.672915 | 0.659169 |
hauntshadow/CS3535 | refs/heads/master | """
timing.py
Usage: In the functions following this, the parameters are described as follows:
filename: the file that contains segment data
This file must have been a NumPy array of segment data that was saved. It is loaded through NumPy's load function.
Each segment array is in the following format:
[12 values for segment pitch, 12 values for segment timbre, 1 value for loudness
max, 1 value for loudness start, and 1 value for the segment duration]
Author: Chris Smith
Date: 04.11.2015
"""
import time
import scipy.spatial.distance as distance
import numpy as np
'''
Method that takes a file of segment data (a 2D NumPy array), and compares the first 850 segments to 1000, 10000, 100000, and
1000000 segments. The results are ignored, as this function times the comparisons.
'''
def comp_time(filename):
seg_array = np.load(filename)
song = seg_array[:850:].copy()
t1 = time.time()
distance.cdist(song, seg_array[:1000:],'euclidean')
t2 = time.time()
distance.cdist(song, seg_array[:10000:],'euclidean')
t3 = time.time()
distance.cdist(song, seg_array[:100000:],'euclidean')
t4 = time.time()
distance.cdist(song, seg_array[:1000000:],'euclidean')
t5 = time.time()
print "Time for comparisons between a song and 1000 segments: " + str(t2-t1)
print "Time for comparisons between a song and 10000 segments: " + str(t3-t2)
print "Time for comparisons between a song and 100000 segments: " + str(t4-t3)
print "Time for comparisons between a song and 1000000 segments: " + str(t5-t4)
| Python | 43 | 35.046513 | 124 | /res_mod4/timing.py | 0.715484 | 0.650323 |
hauntshadow/CS3535 | refs/heads/master | import matplotlib
matplotlib.use("Agg")
import numpy as np
import matplotlib.pyplot as plt
import time
from collections import Counter
def truth_generator(filename):
data = np.load(filename)
data.resize(100000, 27)
truths = []
for i in range(len(data)):
truths.append([])
t0 = time.time()
for i in range(0,100000,10000):
a = data[i:i+10000,]
a[:,:12:] *= 10
a[:,26] *= 100
for j in range(i,100000,10000):
b = data[j:j+10000,]
b[:,:12:] *= 10
b[:,26] *= 100
c = seg_distances(a,b)
for k in range(len(c)):
for l in range(len(c)):
if c[k,l] <= 80:
truths[k+i].append(l+j)
print "Done. Onto the next one..."
print time.time() - t0
np.save("Results/groundtruths", truths)
def histo_generator(filename):
data = np.load(filename)
labels = data[1]
counter = Counter()
for i in labels:
counter[i] += 1
if np.amax(len(counter)) / 50 >= 5:
bins = np.amax(len(counter)) / 50
else:
bins = 5
plt.hist(counter.values(), bins = bins)
plt.title('Number of members per cluster')
plt.xlabel('Number of members')
plt.ylabel('Number of occurrences')
ticks = range(0, bins)
#plt.xticks(ticks[0::50])
plt.gcf().savefig('Results/truthCountHistogram.png')
plt.close()
def seg_distances(u_, v_=None):
from scipy.spatial.distance import pdist, cdist, squareform
from numpy import diag, ones
if v_ is None:
d_ = pdist(u_[:, 0:12], 'euclidean')
d_ += pdist(u_[:, 12:24], 'euclidean')
d_ += pdist(u_[:, 24:], 'cityblock')
d_ = squareform(d_) + diag(float('NaN') * ones((u_.shape[0],)))
else:
d_ = cdist(u_[:, 0:12], v_[:, 0:12], 'euclidean')
d_ += cdist(u_[:, 12:24], v_[:, 12:24], 'euclidean')
d_ += cdist(u_[:, 24:], v_[:, 24:], 'cityblock')
return d_
| Python | 64 | 30.0625 | 71 | /ResultCheck/GroundTruthGenerate.py | 0.532696 | 0.480885 |
hauntshadow/CS3535 | refs/heads/master | """
seg_kmeans.py
This code performs K-Means clustering on a dataset passed in as a pickled
NumPy array.
There is a function (seg_kmeans) that performs K-Means on
the dataset not using another class's stuff. There is another function
(KMeans) that performs K-Means on the dataset by using Scikit-Learn's
K-Means class inside of the cluster package.
Both functions have the follwoing parameters:
1. filename: the file that contains the dataset (must be a pickled array)
2. clusters: the number of clusters to generate
3. iter: the max number of iterations to use
This also saves the results to an output in the Results folder.
Author: Chris Smith
Version: 4.19.2015
"""
import matplotlib
matplotlib.use("Agg")
import numpy as np
from numpy import random
import scipy.spatial.distance as distance
from sklearn import metrics
from sklearn import cluster
import matplotlib.pyplot as plt
import time
'''
Figures out which cluster center that the segment x is closest to.
'''
def classify(x, size, centroids):
list = np.zeros(size)
for i in range(size):
list[i] = np.sqrt(np.sum((centroids[i] - x) ** 2))
return np.argmin(list)
'''
Figures out the cluster member counts and the max distances from the centers in each cluster.
Also, histograms are generated.
'''
def score(centers, centroids):
counts = np.zeros(len(centers))
maxes = np.zeros(len(centers))
index = 0
np.asarray(centers)
for i in range(len(centers)):
counts[index] = len(centers[index])
index += 1
for i in range(len(centers)):
maxes[i] = distance.cdist(centers[i], np.asarray(centroids[i]).reshape((1,27)), 'euclidean').max()
if np.amax(counts)/50 >= 5:
bins = np.amax(counts) / 50
else:
bins = 5
plt.hist(counts.ravel(), bins = bins)
plt.title('Number of members per cluster')
plt.xlabel('Number of members')
plt.ylabel('Number of occurrences')
ticks = range(0, int(np.amax(counts)))
plt.xticks(ticks[0::50])
plt.gcf().savefig('Results/countHistogram.png')
plt.close()
if np.amax(maxes)/50 >= 5:
bins = np.amax(maxes) / 50
else:
bins = 5
plt.hist(maxes.ravel(), bins = bins)
plt.title('Max distance in cluster')
plt.xlabel('Max distances')
plt.ylabel('Number of occurrences')
ticks = range(0, int(np.amax(maxes)))
plt.xticks(ticks[0::50])
plt.gcf().savefig('Results/maxdistHistogram.png')
plt.close()
print "Counts of each cluster:"
print counts
print "------------------------------"
print "The max distance from each center to a cluster member:"
print maxes
print "------------------------------"
'''
Performs K-Means clustering on a dataset of music segments without using a pre-made function.
Saves the results to a .npy file in the Results folder.
'''
def seg_kmeans(filename, clusters, iter):
#Initialize everything
data = np.load(filename)
#Use the first 1 million segments
data.resize(1000000,27)
centroids = np.empty((clusters, 27))
copyroids = np.empty((clusters, 27))
for i in range(0, clusters):
sample = random.randint(0, len(data))
centroids[i] = data[sample]
#Start the algorithm
stop = False
attempt = 1
numlist = []
while not stop and attempt <= iter:
#Initialize the lists
numlist = []
for i in range(clusters):
numlist.append([])
print "Attempt Number: %d" % attempt
#Classify stuff
for row in range(len(data)):
closest = classify(data[row], clusters, centroids)
numlist[closest].append(data[row])
if row % 10000 == 0:
print row
#Redo the centroids
copyroids = centroids.copy()
for i in range(clusters):
if len(numlist[i]) > 0:
centroids[i].put(range(27), np.average(numlist[i], axis=0).astype(np.int32))
attempt += 1
if np.any(centroids-copyroids) == 0:
stop = True
score(numlist, centroids)
np.save("Results/clusterdata.npy", numlist)
'''
Performs the K-Means clustering algorithm that Scikit-Learn's cluster package provides.
Saves the output into a file called clusterdata.npy. This file is located in the Results folder.
'''
def KMeans(filename, clusters, iter):
data = np.load(filename)
data.resize(100000,27)
print "Loaded data"
t0 = time.time()
estimator = cluster.KMeans(n_clusters=clusters, n_init = 5, max_iter=iter, verbose=1, n_jobs=5)
estimator.fit(data)
print('%.2fs %i'
% ((time.time() - t0), estimator.inertia_))
saveddata = [estimator.cluster_centers_, estimator.labels_, estimator.inertia_]
np.save("Results/clusterdata.npy", saveddata)
| Python | 144 | 32.159721 | 106 | /res_mod5/seg_kmeans.py | 0.646283 | 0.629319 |
hauntshadow/CS3535 | refs/heads/master | """
Self_compare_dist.py
Usage: This program has a function called self_seg_compare().
This function takes a track id (named as a parameter in the function),
compares every segment to every other segment, and
prints out the following information:
1. The number of segments that have one or more matches
2. The number of possible combinations that match
3. Saves a histogram that describes the combinations
4. Returns the adjacency list for the segments in the song
Takes the segments of a song, compares them using the Infinite Jukebox's
fields and weights, and gives a percentage of segments that have another
segment within 45 of itself. It also saves a histogram of these
distances. The histogram only shows distances <= 800, and up to 600
matches in each bin.
This program uses the weights and ideas on how to compare
segments. The following is a link to access the Infinite Jukebox:
http://labs.echonest.com/Uploader/index.html
Author: Chris Smith
Date: 03.11.2015
"""
import matplotlib
matplotlib.use("Agg")
import echonest.remix.audio as audio
import matplotlib.pyplot as plt
import scipy.spatial.distance as distance
import numpy as np
'''
Method that uses a track id to compare every segment with
every other segment, supplies a histogram that shows
the distances between segments (tuples of segments),
and returns an adjacency list of segments in the song.
'''
def self_seg_compare():
#Defines the threshold for comparisons
thres = 45
adj_list = []
sim_seg_count = 0
sim_count = 0
track_id = "TRAWRYX14B7663BAE0"
audiofile = audio.AudioAnalysis(track_id)
segments = audiofile.segments
#Get each segment's array of comparison data
segs = np.array(segments.pitches)
segs = np.c_[segs, np.array(segments.timbre)]
segs = np.c_[segs, np.array(segments.loudness_max)]
segs = np.c_[segs, np.array(segments.loudness_begin)]
segs = np.c_[segs, np.ones(len(segs))]
#Finish creating the adjacency list
for i in segments:
adj_list.append([])
#Finish getting the comparison data
for i in range(len(segs)):
segs[i][26] = segments[i].duration
#Get the euclidean distance for the pitch vectors, then multiply by 10
distances = distance.cdist(segs[:,:12], segs[:,:12], 'euclidean')
for i in range(len(distances)):
for j in range(len(distances)):
distances[i][j] = 10 * distances[i][j]
#Get the euclidean distance for the timbre vectors, adding it to the
#pitch distance
distances = distances + distance.cdist(segs[:,12:24], segs[:,12:24], 'euclidean')
#Get the rest of the distance calculations, adding them to the previous
#calculations.
for i in range(len(distances)):
for j in range(len(distances)):
distances[i][j] = distances[i][j] + abs(segs[i][24] - segs[j][24])
distances[i][j] = distances[i][j] + abs(segs[i][25] - segs[j][25]) + abs(segs[i][26] - segs[j][26]) * 100
i_point = 0
j_point = 0
#Use i_point and j_point for the indices in the 2D distances array
for i_point in range(len(distances)):
for j_point in range(len(distances)):
if i_point != j_point:
#Check to see if the distance between segment # i_point and
#segment # j_point is less than 45
if abs(distances[i_point][j_point]) <= thres:
#Add to the adjacency lists if not already there
if j_point not in adj_list[i_point]:
adj_list[i_point].append(j_point)
if i_point not in adj_list[j_point]:
adj_list[j_point].append(i_point)
j_point = j_point + 1
i_point = i_point + 1
j_point = 0
#Get the count of the similarities in the adjacency lists
for i in adj_list:
if len(i) > 0:
sim_count = sim_count + len(i);
sim_seg_count = sim_seg_count + 1
#print i, "\n"
print "Num of segments with at least 1 match: ", sim_seg_count, " out of", len(segments)
print "Percentage of segments with at least 1 match: ", (sim_seg_count / float(len(segments)) * 100), "%"
print "Num of similar tuples: ", sim_count, " out of ", len(segments) ** 2 - len(segments)
print "Percentage of possible tuples that are similar: ", sim_count / float(len(segments) ** 2 - len(segments)) * 100, "%"
print "Note:This takes out comparisons between a segment and itself."
#Get the number of bins. Calculated by taking the max range and dividing by 50
bins = int(np.amax(distances)) / thres
#Make the histogram with titles and axis labels. Plot the line x=thres for visual comparison.
plt.hist(distances.ravel(), bins = bins)
plt.title('Distances between Tuples of Segments')
plt.xlabel('Distances')
plt.ylabel('Number of occurrences')
plt.axvline(thres, color = 'r', linestyle = 'dashed')
#Make each tick on the x-axis correspond to the end of a bin.
plt.xticks(range(0, int(np.amax(distances) + 2 * thres), thres))
#Make each tick on the y-axis correspond to each 25000th number up to the number of possible tuple combos / 2.
plt.yticks(range(0, (len(segments) ** 2 - len(segments))/2 + 25000, 25000))
plt.gcf().savefig('sim_histogram.png')
return adj_list
| Python | 119 | 43.823528 | 126 | /res_mod2/self_compare_dist.py | 0.661293 | 0.641237 |
hauntshadow/CS3535 | refs/heads/master | import numpy as np
from collections import Counter
def calculate(filename):
data = np.load(filename)
checked = data[1]
countClusters = Counter()
counter = Counter()
for i in checked:
countClusters[i] += 1
for i in countClusters.values():
counter[i] += 1
val = counter.values()
key = counter.keys()
sum = 0
for i in range(len(key)):
sum += val[i] * key[i] ** 2
sum += (len(checked) * len(countClusters.values()))
print sum
fin = sum * (4376.4/4999950000)
print fin
| Python | 21 | 24.952381 | 55 | /ResultCheck/CalcTime.py | 0.594495 | 0.557798 |
hauntshadow/CS3535 | refs/heads/master | #!/usr/bin/env python
# encoding: utf=8
"""
one.py
Digest only the first beat of every bar.
By Ben Lacker, 2009-02-18.
"""
'''
one_segment.py
Author: Chris Smith, 02-05-2015
Changes made to original one.py:
- Changes made to take the first segment out of every beat.
- Does not take the first beat from every bar anymore.
The original code is stored at this address: https://github.com/echonest/remix/blob/master/examples/one/one.py
'''
import echonest.remix.audio as audio
usage = """
Usage:
python one.py <input_filename> <output_filename>
Example:
python one.py EverythingIsOnTheOne.mp3 EverythingIsReallyOnTheOne.mp3
"""
def main(input_filename, output_filename):
audiofile = audio.LocalAudioFile(input_filename)
'''
This line got the bars of the song in the previous version:
bars = audiofile.analysis.bars
Now, this line gets the beats in the song:
'''
beats = audiofile.analysis.beats
collect = audio.AudioQuantumList()
'''
This loop got the first beat in each bar and appended them to a list:
for bar in bars:
collect.append(bar.children()[0])
Now, this loop gets the first segment in each beat and appends them to the list:
'''
for b in beats:
collect.append(b.children()[0])
out = audio.getpieces(audiofile, collect)
out.encode(output_filename)
if __name__ == '__main__':
import sys
try:
input_filename = sys.argv[1]
output_filename = sys.argv[2]
except:
print usage
sys.exit(-1)
main(input_filename, output_filename)
| Python | 64 | 23.84375 | 110 | /one_segment/one_segment.py | 0.665409 | 0.650314 |
HoeYeon/Basic_Cnn | refs/heads/master |
# coding: utf-8
# In[1]:
import numpy as np
import tensorflow as tf
import requests
import urllib
from PIL import Image
import os
import matplotlib.pyplot as plt
get_ipython().magic('matplotlib inline')
# In[ ]:
#Get image from url
#a = 1
#with open('Cat_image.txt','r') as f:
# urls = []
# for url in f:
# urls.append(url.strip())
# try:
# with urllib.request.urlopen(url) as url_:
# try:
# with open('temp.jpg', 'wb') as f:
# f.write(url_.read())
# img = Image.open('temp.jpg')
# name = "test{}.jpg".format(a)
# img.save(name)
# a += 1
# except:
# pass
# except:
# pass
#print("done")
#print(a)
# In[ ]:
## resize image to 28x28
#count = range(0,1033)
#for i in count:
# cat1 = Image.open('cat ({}).jpg'.format(i))
# new_image = cat1.resize((28,28))
# new_image.save('cat{}.jpg'.format(i))
#
#print('done')
# In[2]:
train = []
validation = []
test = []
##Get cat image##
os.chdir("C:\\Users\\USER\\python studyspace\\Deep learning\\Project\\cat_32")
print(os.getcwd())
#add cat image to train_set --> size 1200
for i in range(1,1201):
pic = Image.open('cat{}.jpg'.format(i))
pix = np.array(pic)
train.append(pix)
#train_set = np.array(train)
#add cat image to validation_set --> size 200
for i in range(1201,1401):
pic = Image.open('cat{}.jpg'.format(i))
pix = np.array(pic)
validation.append(pix)
#validation_set = np.array(validation)
#add cat image to test_set --> size 200
for i in range(1401,1601):
pic = Image.open('cat{}.jpg'.format(i))
pix = np.array(pic)
test.append(pix)
#test_set = np.array(test)
### Get horse image
os.chdir("C:\\Users\\USER\\python studyspace\\Deep learning\\Project\\monkey_32")
print(os.getcwd())
#add monkey image to train_set --> size 900
for j in range(1,901):
pic = Image.open('monkey{}.jpg'.format(j))
pix = np.array(pic)
train.append(pix)
#print(train)
train_set = np.array(train)
#add monkey image to validation_set --> size 200
for j in range(901,1101):
pic = Image.open('monkey{}.jpg'.format(j))
pix = np.array(pic)
validation.append(pix)
validation_set = np.array(validation)
#add monkey image to test_set --> size 200
for j in range(1101,1301):
pic = Image.open('monkey{}.jpg'.format(j))
pix = np.array(pic)
test.append(pix)
test_set = np.array(test)
os.chdir("C:\\Users\\USER\\python studyspace\\Deep learning\\Project")
# In[3]:
print(train_set.shape)
print(validation_set.shape)
print(test_set.shape)
# In[4]:
plt.imshow(train_set[0]) # cat image example
# In[5]:
plt.imshow(train_set[1600]) # monkey image example
# In[ ]:
#change into gray image
#train_set[[0],:,:,[2]] =train_set[[0],:,:,[0]]
#train_set[[0],:,:,[1]] = train_set[[0],:,:,[0]]
#plt.imshow(train_set[0])
# In[4]:
# Set train_labels
train_labels = np.zeros((2100))
train_labels[0:1200] = 0 ## 0 == cat
train_labels[1200:2100] = 1 ## 1 == monkey
# Set validation labels
validation_labels = np.zeros((400))
validation_labels[0:200] = 0 ## 0 == cat
validation_labels[200:600] = 1 ## 1 == monkey
#Set Test labels
test_labels = np.zeros((400))
test_labels[0:200] = 0 ## 0 == cat
test_labels[200:400] =1 ## 1 == monkey
# In[5]:
#Shuffle dataset & labels
def randomize(dataset, labels):
permutation = np.random.permutation(labels.shape[0])
shuffled_dataset = dataset[permutation,:,:,:]
shuffled_labels = labels[permutation]
return shuffled_dataset, shuffled_labels
train_set, train_labels = randomize(train_set, train_labels)
validation_set, validation_labels = randomize(validation_set, validation_labels)
test_set, test_labels = randomize(test_set, test_labels)
# In[6]:
num_labels =2
image_size = 32
num_channels = 3
## cause RGB image
## reformat all data set & labels
def reformat(dataset, labels):
dataset = dataset.reshape((-1, image_size,image_size,num_channels)).astype(np.float32)
labels = (np.arange(num_labels) == labels[:,None]).astype(np.float32)
return dataset, labels
train_set, train_labels = reformat(train_set, train_labels)
validation_set, validation_labels = reformat(validation_set, validation_labels)
test_set, test_labels = reformat(test_set, test_labels)
print('train_set : ',train_set.shape, train_labels.shape)
print('validation_set : ',validation_set.shape, validation_labels.shape)
print('test_set : ',test_set.shape, test_labels.shape)
# In[11]:
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels,1))
/ predictions.shape[0])
# In[9]:
batch_size = 128
learning_rate = 0.001
patch_size = 7
depth = 64
num_hidden = 128
graph = tf.Graph()
with graph.as_default():
tf_train_dataset = tf.placeholder(tf.float32,
shape=[None,image_size , image_size,3],name = 'train_dataset')
tf_train_labels = tf.placeholder(tf.float32, shape=[None, num_labels], name = 'train_label')
tf_valid_dataset = tf.constant(validation_set)
tf_test_dataset = tf.constant(test_set)
## Setting First Layer
## so w_conv1 has 64 filter which is 7x7x3 shape
W_conv1 = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, num_channels, depth], stddev=0.1))
# depth means number of filters
b_conv1 = tf.Variable(tf.zeros([depth]))
##Setting Second Layer
W_conv2 = tf.Variable(tf.truncated_normal(
[patch_size, patch_size, depth, depth], stddev = 0.1))
b_conv2 = tf.Variable(tf.zeros([depth]))
## Setting First FC Layer
W_fc1 = tf.Variable(tf.truncated_normal(
[image_size//4 * image_size // 4 * depth, num_hidden],stddev=0.1))
b_fc1 = tf.Variable(tf.constant(1.0, shape=[num_hidden]))
## Setting Second FC Layer
W_fc2 = tf.Variable(tf.truncated_normal(
[num_hidden, num_labels], stddev=0.1))
b_fc2 = tf.Variable(tf.constant(1.0, shape=[num_labels]))
def set_model(data):
L_conv1 = tf.nn.conv2d(data, W_conv1, [1,1,1,1], padding='SAME')
L_conv1 = tf.nn.relu(L_conv1+b_conv1)
#pooling
#pooling has no parameters to learn --> fixed function
L_conv1 = tf.nn.max_pool(L_conv1, ksize=[1,3,3,1],
strides=[1,2,2,1], padding='SAME')
#Normalization
L_conv1 = tf.nn.lrn(L_conv1, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
#L1 = tf.nn.dropout(L1, keep_prob = 0.7)
L_conv2 = tf.nn.conv2d(L_conv1,W_conv2, [1,1,1,1], padding='SAME')
L_conv2 = tf.nn.relu(L_conv2+b_conv2)
#pooling
L_conv2 = tf.nn.max_pool(L_conv2, ksize=[1,3,3,1],
strides=[1,2,2,1], padding='SAME')
#Normalization
L_conv2 = tf.nn.lrn(L_conv2, 4, bias=1.0, alpha=0.001 / 9.0, beta=0.75)
#L2 = tf.nn.dropout(L2, keep_prob = 0.7)
shape = L_conv2.get_shape().as_list()
reshape = tf.reshape(L_conv2, [-1, shape[1] * shape[2] * shape[3]])
L_fc1 = tf.nn.relu(tf.matmul(reshape, W_fc1)+b_fc1)
#L3 = tf.nn.dropout(L3, keep_prob = 0.7)
return tf.matmul(L_fc1, W_fc2) + b_fc2
logits = set_model(tf_train_dataset)
loss = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(labels=tf_train_labels, logits= logits))
optimizer = tf.train.AdamOptimizer(0.005).minimize(loss)
# y_pred = tf.nn.softmax(logits, name='y_pred')
train_prediction = tf.nn.softmax(logits, name='train_pred')
valid_prediction = tf.nn.softmax(set_model(tf_valid_dataset))
test_prediction = tf.nn.softmax(set_model(tf_test_dataset))
# In[12]:
num_steps = 1001
with tf.Session(graph=graph) as session:
saver = tf.train.Saver(tf.global_variables())
''' ckpt = tf.train.get_checkpoint_state('./model')
if ckpt and tf.train.checkpoint_exists(ckpt.model_checkpoint_path):
saver.restore(session, ckpt.model_checkpoint_path)
else:
session.run(tf.global_variables_initializer())'''
session.run(tf.global_variables_initializer())
print('Initialized')
for step in range(num_steps):
offset = (step * batch_size) % (train_labels.shape[0] - batch_size)
batch_data = train_set[offset:(offset + batch_size), :, :, :]
batch_labels = train_labels[offset:(offset + batch_size), :]
feed_dict = {tf_train_dataset : batch_data, tf_train_labels : batch_labels}
_, l, predictions = session.run(
[optimizer, loss, train_prediction], feed_dict=feed_dict)
if (step % 50 == 0):
print('Minibatch loss at step %d: %f' % (step, l))
print('Minibatch accuracy: %.1f%%' % accuracy(predictions, batch_labels))
print('Validation accuracy: %.1f%%' % accuracy(
valid_prediction.eval(), validation_labels))
saver.save(session, "./save2.ckpt")
print('Test accuracy: %.1f%%' % accuracy(test_prediction.eval(), test_labels))
# In[ ]:
| Python | 320 | 26.603125 | 99 | /Train_model.py | 0.633247 | 0.595338 |
HoeYeon/Basic_Cnn | refs/heads/master |
# coding: utf-8
# In[2]:
import numpy as np
import tensorflow as tf
import requests
import urllib
from PIL import Image
import os
import matplotlib.pyplot as plt
import cv2 as cv2
get_ipython().magic('matplotlib inline')
# In[3]:
os.chdir("C:\\Users\\USER\\python studyspace\\Deep learning\\Project")
pic = Image.open("cat_test.jpg")
new_image = pic.resize((32,32))
test1 = np.array(new_image)
test1 = test1.reshape(1,32,32,3)
print(test1.shape)
# In[5]:
plt.imshow(pic)
# In[6]:
sess = tf.Session()
saver = tf.train.import_meta_graph('save2.ckpt.meta')
saver.restore(sess, tf.train.latest_checkpoint('./'))
graph = tf.get_default_graph()
y_pred = graph.get_tensor_by_name("train_pred:0")
x = graph.get_tensor_by_name("train_dataset:0")
y_true = graph.get_tensor_by_name("train_label:0")
y_test_images = np.zeros((1,2))
feed_dict_testing = {x: test1, y_true: y_test_images}
result=sess.run(y_pred, feed_dict=feed_dict_testing)
# In[7]:
print(result)
# In[ ]:
| Python | 59 | 15.762712 | 70 | /Prediction.py | 0.683787 | 0.654582 |
gagan1411/COVID-19 | refs/heads/master | # -*- coding: utf-8 -*-
"""
Created on Sun May 10 23:34:29 2020
@author: HP USER
"""
import urllib.request, urllib.error, urllib.parse
import json
import sqlite3
import pandas as pd
from datetime import datetime
import matplotlib.pyplot as plt
import matplotlib
import numpy as np
#retrieve json file and decode it
jsonFile = urllib.request.urlopen('https://api.covid19india.org/data.json').read()
data = json.loads(jsonFile)
conn = sqlite3.connect('Covid19Data.sqlite')
cur = conn.cursor()
#create a table in database if the table does not exists
cur.executescript('''
CREATE TABLE IF NOT EXISTS dailyCases(
dailyConfirmed INTEGER NOT NULL,
dailyDeceased INTEGER NOT NULL,
dailyRecovered INTEGER NOT NULL,
date TEXT NOT NULL UNIQUE,
totalConfirmed INTEGER NOT NULL,
totalDeceased INTEGER NOT NULL,
totalRecovered INTEGER NOT NULL
);''')
#%%
#update the data in database for each date
for daily in data['cases_time_series']:
dailyData = list(daily.values())
cur.execute('''SELECT * FROM dailyCases WHERE date=?''', (dailyData[3], ))
result = cur.fetchone()
if result is None:
cur.execute('''
INSERT INTO dailyCases (dailyConfirmed, dailyDeceased, dailyRecovered, date,
totalConfirmed, totalDeceased, totalRecovered) VALUES ( ?, ?, ?, ?, ?, ?, ?)''',
(int(dailyData[0]), int(dailyData[1]), int(dailyData[2]), dailyData[3],
int(dailyData[4]), int(dailyData[5]), int(dailyData[6])))
elif result[4] < int(dailyData[4]):
cur.execute('''
UPDATE dailyCases
SET totalConfirmed=?
WHERE date=?''',
(int(dailyData[4]), dailyData[3]))
conn.commit()
#%%
total = pd.read_sql('SELECT * FROM dailyCases', conn)
#convert date to python datetime type object
def fun(x):
return datetime.strptime(x+str((datetime.today().year)), '%d %B %Y')
total['date'] = total['date'].apply(fun)
#plot figure for total cases for each day
fig = plt.figure()
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))
plt.plot(total['date'], total['totalConfirmed'], '-o', ms=1)
plt.title('Total cases in India for each day')
plt.xlabel('Dates', fontsize=12)
plt.ylabel('Total cases', labelpad=0.1, fontsize=12)
def slide(event):
date = int(event.xdata)
print(event.xdata)
dateIndex = date - dateLoc[0]+2
date = total['date'].iloc[dateIndex]
strDate = date.strftime('%d %b')
#text for displaying the total cases for each day
str = 'Total cases on {} were {}'.format(strDate, total['totalConfirmed'].iloc[dateIndex])
plt.cla()
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))
plt.plot(total['date'], total['totalConfirmed'], '-o', ms=1)
plt.text(x=dateLoc[0], y=50000, s=str)
plt.title('Total cases in India for each day')
plt.xlabel('Dates', fontsize=12)
plt.ylabel('Total cases', labelpad=0.1, fontsize=12)
plt.draw()
dateLoc = (plt.gca().xaxis.get_majorticklocs())
dateLoc = dateLoc.astype(np.int64)
fig.canvas.mpl_connect('button_press_event', slide)
#plot the figure for new cases reported for each day
fig2 = plt.figure()
fig2.set_figheight(9)
fig2.set_figwidth(16)
fig2.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))
plt.bar(total['date'], total['dailyConfirmed'], width=0.8, alpha=0.8)
plt.plot(total['date'], total['dailyConfirmed'], c='red', alpha=0.8)
plt.title('New cases reported in India for each day')
plt.xlabel('Dates', fontsize=12)
plt.ylabel('New cases reported', labelpad=10, fontsize=12)
def slide2(event):
date = int(round(event.xdata))
print(event.xdata)
dateIndex = date - dateLoc[0]+2
date = total['date'].iloc[dateIndex]
strDate = date.strftime('%d %b')
# print(plt.gcf().texts())
str = 'Total cases reported on {} were {}'.format(strDate, total['dailyConfirmed'].iloc[dateIndex])
plt.cla()
plt.gca().xaxis.set_major_formatter(matplotlib.dates.DateFormatter('%d %b'))
plt.bar(total['date'], total['dailyConfirmed'], alpha=0.8)
plt.plot(total['date'], total['dailyConfirmed'], c='red', alpha=0.8)
plt.annotate(xy=(event.xdata, total['dailyConfirmed'].iloc[dateIndex]),
xytext=(dateLoc[0], 4000), s=str,
arrowprops={'arrowstyle':'->'})
plt.title('New cases reported in India for each day')
plt.xlabel('Dates', fontsize=12)
plt.ylabel('New cases reported', fontsize=12, labelpad=10)
plt.draw()
fig2.canvas.mpl_connect('button_press_event', slide2)
plt.show()
conn.close()
| Python | 131 | 35.198475 | 103 | /retrieve&PlotData.py | 0.629592 | 0.610302 |
jbaquerot/Python-For-Data-Science | refs/heads/master | # IPython log file
import json
path = 'ch02/usagov_bitly_data2012-03-16-1331923249.txt'
records = [json.loads(line) for line in open(path)]
import json
path = 'ch2/usagov_bitly_data2012-03-16-1331923249.txt'
records = [json.loads(line) for line in open(path)]
import json
path = 'ch2/usagov_bitly_data2012-11-13-1352840290.txt'
records = [json.loads(line) for line in open(path)]
time_zones = [rec['tz'] for rec in records if 'tz' in rec]
get_ipython().magic(u'logstart')
ip_info = get_ipython().getoutput(u'ifconfig eth0 | grep "inet "')
ip_info[0].strip()
ip_info = get_ipython().getoutput(u'ifconfig en0 | grep "inet "')
ip_info[0].strip()
ip_info = get_ipython().getoutput(u'ifconfig en1 | grep "inet "')
ip_info[0].strip()
pdc
get_ipython().magic(u'debug')
def f(x, y, z=1):
tmp = x + y
return tmp / z
get_ipython().magic(u'debug (f, 1, 2, z = 3)')
get_ipython().magic(u'debug (f, 1, 2, z = 3)')
get_ipython().magic(u'debug (f, 1, 2, z = 3)')
def set_trace():
from IPython.core.debugger import Pdb
Pdb(color_scheme='Linux').set_trace(sys._getframe().f_back)
def debug(f, *args, **kwargs):
from IPython.core.debugger import Pdb
pdb = Pdb(color_scheme='Linux')
return pdb.runcall(f, *args, **kwargs)
debug (f, 1, 2, z = 3)
set_trace()
class Message:
def __init__(self, msg):
self.msg = msg
class Message:
def __init__(self, msg):
self.msg = msg
def __repr__(self):
return 'Message: %s' % self.msg
x = Message('I have a secret')
x
| Python | 47 | 31.021276 | 66 | /ipython_log.py | 0.646512 | 0.595349 |
solarkyle/lottery | refs/heads/main | import random
def lottery_sim(my_picks, num_tickets):
ticket = 1
winners = {3:0,4:0,5:0,6:0}
for i in range(num_tickets):
ticket+=1
drawing = random.sample(range(1, 53), 6)
correct = 0
for i in my_picks:
if i in drawing:
correct+=1
if correct == 3:
winners[3]+=1
elif correct == 4:
winners[4]+=1
elif correct == 5:
winners[5]+=1
elif correct == 6:
winners[6]+=1
return winners
lottery_sim([17,3,44,22,15,37], 100000) | Python | 27 | 21.185184 | 48 | /lottery.py | 0.473244 | 0.397993 |
valentecaio/caiotile | refs/heads/master | #!/usr/bin/python3
import argparse
import subprocess
import re
HEIGHT_OFFSET = 60
class Rectangle:
def __init__(self, x, y, w, h):
self.x = int(x) # origin x
self.y = int(y) # origin y
self.w = int(w) # width
self.h = int(h) # height
def __str__(self):
return str(self.x) + ',' + str(self.y) + ',' \
+ str(self.w) + ',' + str(self.h)
def __repr__(self):
return "position: (" + str(self.x) + \
"," + str(self.y) + ')'\
", size: " + str(self.w) + \
"," + str(self.h) + ')'
# example ['1366x768+1024+373', '1024x768+0+0']
def get_displays():
out = str(execute('xrandr'))
# remove occurrences of 'primary' substring
out = out.replace("primary ", "")
# we won't match displays that are disabled (no resolution)
out = out.replace("connected (", "")
start_flag = " connected "
end_flag = " ("
resolutions = []
for m in re.finditer(start_flag, out):
# start substring in the end of the start_flag
start = m.end()
# end substring before the end_flag
end = start + out[start:].find(end_flag)
resolutions.append(out[start:end])
displays = []
for r in resolutions:
width = r.split('x')[0]
height, x, y = r.split('x')[1].split('+')
displays.append(Rectangle(x, y, width, int(height)-HEIGHT_OFFSET))
return displays
def parse_arguments():
parser = argparse.ArgumentParser(description='Tile tool')
parser.add_argument('-t', '--tile', dest='tile',
choices=['left', 'right', 'top', 'bottom'],
help='tile relatively to display')
parser.add_argument('-w', '--tile-window', dest='tile_w',
choices=['left', 'right', 'top', 'bottom'],
help='tile relatively to window itself')
parser.add_argument('-s', '--switch-display', dest='switch_display',
action='store_true',
help='move window to next display')
parser.add_argument('-c', '--change-to-display', dest='display',
type=int, help='move window to specified display')
parser.add_argument('-m', '--maximize', dest='maximize',
action='store_true', help='maximize window')
return parser.parse_args()
def execute(cmd):
print('$ ' + cmd)
return subprocess.check_output(['bash', '-c', cmd])
def get_active_window():
cmd = 'xdotool getactivewindow getwindowgeometry'
flag_pos_start = "Position: "
flag_pos_end = " (screen:"
flag_geom_start = "Geometry: "
flag_geom_end = "\\n"
r = str(execute(cmd))
str_pos = r[r.find(flag_pos_start) + len(flag_pos_start) \
: r.find(flag_pos_end)]
str_geom = r[r.find(flag_geom_start) + len(flag_geom_start) \
: r.rfind(flag_geom_end)]
pos = str_pos.split(',')
geom = str_geom.split('x')
return Rectangle(pos[0], pos[1], geom[0], geom[1])
def window_is_in_display(w, d):
return (d.x <= w.x <= d.x+d.w) and (d.y <= w.y <= d.y+d.h)
def get_display(displays, active):
w = get_active_window()
for d in displays:
if window_is_in_display(w, d):
if active:
return d
else:
if not active:
return d
def get_active_display(displays):
return get_display(displays, True)
def get_inactive_display(displays):
return get_display(displays, False)
def set_window(x, y, w, h):
cmd_header = 'wmctrl -r ":ACTIVE:" -e 0,'
cmd = cmd_header + str(x) + ',' + str(y) + ',' + str(w) + ',' + str(h)
execute(cmd)
def tile(direction, basis, display):
x = basis.x
y = basis.y
w = basis.w
h = basis.h
if direction == 'left':
w = int(display.w/2)
x = display.x
elif direction == 'right':
w = int(display.w/2)
x = display.x + w
elif direction == 'top':
h = int(display.h/2)
y = display.y
elif direction == 'bottom':
h = int(display.h/2)
y = display.y + h
set_window(x, y, w, h)
def main():
args = parse_arguments()
displays = get_displays()
if args.tile:
display = get_active_display(displays)
tile(args.tile, display, display)
if args.tile_w:
display = get_active_display(displays)
window = get_active_window()
# the get is 2 pixels more than the real value
window.x -= 2
tile(args.tile_w, window, display)
if args.display is not None:
d = displays[args.display]
set_window(d.x, d.y, d.w, d.h)
if args.switch_display:
d = get_inactive_display(displays)
set_window(d.x, d.y, d.w, d.h)
if args.maximize:
d = get_active_display(displays)
set_window(d.x, d.y, d.w, d.h)
if __name__ == "__main__":
main()
| Python | 182 | 26.175825 | 74 | /caiotile.py | 0.538617 | 0.530732 |
Jmitch13/Senior-Honors-Project | refs/heads/main | import requests
import sqlite3
from sqlite3 import Error
from bs4 import BeautifulSoup
# Create the batter pool database
BatterPool = sqlite3.connect('TeamBatterPool.db')
positionList = ['c', '1b', '2b', 'ss', '3b', 'rf', 'cf', 'lf', 'dh']
yearList = ['2010', '2011', '2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
teamList = ["Los_Angeles_Angels", "Baltimore_Orioles", "Boston_Red_Sox", "White_Sox", "Cleveland_Indians", "Detroit_Tigers", "Kansas_City_Royals", "Minnesota_Twins", "New_York_Yankees", "Oakland_Athletics", "Seattle_Mariners", "Tamba_Bay_Rays", "Texas_Rangers", "Toronto_Blue_Jays", "Arizona_Diamondbacks", "Atlanta_Braves", "Chicago_Cubs", "Cincinatti_Reds", "Colarado_Rockies", "Miami_Marlins", "Houston_Astros", "Los_Angeles_Dodgers", "Milwaukee_Brewers", "Washingon_Nationals", "New_York_Mets", "Philadelphia_Phillies", "Pittsburgh_Pirates", "St_Louis_Cardinals", "San_Diego_Padres", "San_Francisco_Giants"]
source = "https://www.baseball-reference.com/players/t/troutmi01.shtml"
def batter_pool_table(team_name, year):
bp = BatterPool.cursor()
#concanate the string
table_values = '(Player_Name TEXT, Age INTEGER, Position TEXT, WAR REAL, WPA REAL, wRCplus REAL, PA INTEGER, AVG REAL, OBP REAL, SLG REAL, OPS REAL, BABIP REAL, wOBA REAL, BBperc REAL, Kperc REAL, SPD REAL, DEF REAL, Worth TEXT)'
bp.execute('CREATE TABLE IF NOT EXISTS _' + year + team_name + table_values)
bp.close()
def data_entry(team_name, year, player_name, age, position, war, wpa, rcplus, pa, avg, obp, slg, ops, babip, oba, bbpec, kperc, speed, defense, worth):
bp = BatterPool.cursor()
insertStatement = "INSERT INTO _" + year + team_name + " (Player_Name, Age, Position, WAR, WPA, wRCplus, PA, AVG, OBP, SLG, OPS, BABIP, wOBA, BBperc, Kperc, SPD, DEF, Worth) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
statTuple = (player_name, age, position, war, wpa, rcplus, pa, avg, obp, slg, ops, babip, oba, bbpec, kperc, speed, defense, worth)
bp.execute(insertStatement, statTuple)
BatterPool.commit()
bp.close()
def web_scrape(playerList):
source = requests.get("https://www.baseball-reference.com/players/g/guerrvl01.shtml#all_br-salaries").text
soup = BeautifulSoup(source, "html.parser")
table = soup.find('table', id = 'batting_value')
table_rows = table.find_all('tr')
#Scrape all the data from the table
for tr in table_rows:
td = tr.find_all('td')
#th = tr.find('th')
row = [i.text for i in td]
#row.append(th.text)
playerList.append(row)
'''
table = soup.find('table', id = 'batting_standard')
table_rows = table.find_all('tr')
#Scrape all the data from the table
for tr in table_rows:
td = tr.find_all('td')
th = tr.find('th')
row = [i.text for i in td]
row.append(th.text)
playerList.append(row)
'''
playerList = []
web_scrape(playerList)
print(playerList)
| Python | 55 | 52.981819 | 611 | /TeamBatterPool.py | 0.641534 | 0.624669 |
Jmitch13/Senior-Honors-Project | refs/heads/main | import requests
import sqlite3
from sqlite3 import Error
from bs4 import BeautifulSoup
# Create the pitcher pool database
PitcherPool = sqlite3.connect('TeamPitcherPool1.db')
yearList = ['2012', '2013', '2014', '2015', '2016', '2017', '2018', '2019']
teamList = ["Los_Angeles_Angels", "Baltimore_Orioles", "Boston_Red_Sox", "White_Sox", "Cleveland_Indians", "Detroit_Tigers", "Kansas_City_Royals", "Minnesota_Twins", "New_York_Yankees", "Oakland_Athletics", "Seattle_Mariners", "Tamba_Bay_Rays", "Texas_Rangers", "Toronto_Blue_Jays", "Arizona_Diamondbacks", "Atlanta_Braves", "Chicago_Cubs", "Cincinatti_Reds", "Colarado_Rockies", "Miami_Marlins", "Houston_Astros", "Los_Angeles_Dodgers", "Milwaukee_Brewers", "Washingon_Nationals", "New_York_Mets", "Philadelphia_Phillies", "Pittsburgh_Pirates", "St_Louis_Cardinals", "San_Diego_Padres", "San_Francisco_Giants"]
source = "https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=0&type=c,3,59,45,118,6,117,42,7,13,36,40,48,60,63&season=2011&month=0&season1=2011&ind=0&team=1&rost=0&age=0&filter=&players=0&startdate=2011-01-01&enddate=2011-12-31"
#Function to create the tables from 2012-2019
def pitcher_pool_table(year, team_name):
pp = PitcherPool.cursor()
#concatenate the string
table_values = '(Player_Name TEXT, Age INTEGER, IP REAL, WAR REAL, WPA REAL, FIPx REAL, FIPXminus REAL, ERA REAL, ERAminus REAL, WHIP REAL, Kper9 REAL, HRper9 REAL, GBperc REAL, Worth TEXT)'
pp.execute('CREATE TABLE IF NOT EXISTS _' + year + team_name + table_values)
pp.close()
#Function to enter the data into the respective SQLite table
def data_entry(team_name, year, player_name, age, innings_pitched, war, wpa, fipx, fipx_minus, era, era_minus, whip, kPer9, hrPer9, gb_percentage, worth):
pp = PitcherPool.cursor()
insertStatement = "INSERT INTO _" + year + team_name + " (Player_Name, Age, IP, WAR, WPA, FIPx, FIPXminus, ERA, ERAminus, WHIP, Kper9, HRper9, GBperc, Worth) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"
statTuple = (player_name, age, innings_pitched, war, wpa, fipx, fipx_minus, era, era_minus, whip, kPer9, hrPer9, gb_percentage, worth)
pp.execute(insertStatement, statTuple)
PitcherPool.commit()
pp.close()
#Function to web scrape FanGraphs for every the pitcher on every team
def web_scrape(playerList, year, team):
source = requests.get("https://www.fangraphs.com/leaders.aspx?pos=all&stats=pit&lg=all&qual=0&type=c,3,59,45,118,6,117,42,7,13,36,40,48,60,63&season=" + year + "&month=0&season1=" + year + "&ind=0&team=" + str(team + 1) + "&rost=0&age=0&filter=&players=0&startdate=2011-01-01&enddate=2011-12-31").text
soup = BeautifulSoup(source, "html.parser")
table = soup.find('table', class_ = 'rgMasterTable')
table_rows = table.find_all('tr')
#Scrape all the data from the table
for tr in table_rows:
td = tr.find_all('td')
row = [i.text for i in td]
if len(row) == 16:
playerList.append(row)
#main function to add the desired pitcher stats for every team from 2012 to 2019
def main():
counter = 0
#iterate through every year
for h in range(len(yearList)):
#iterate through every team
for i in range(30):
pitcher_pool_table(yearList[h], teamList[i])
playerList = []
web_scrape(playerList, yearList[h], i)
#iterate through every player
for k in range(len(playerList)):
counter += 1
data_entry(teamList[i], yearList[h], playerList[k][1], playerList[k][2], playerList[k][10], playerList[k][3], playerList[k][15], playerList[k][4], playerList[k][5], playerList[k][6], playerList[k][7], playerList[k][8], playerList[k][11], playerList[k][12], playerList[k][13], playerList[k][14])
print(counter)
if __name__ == "__main__":
main()
| Python | 60 | 63.133335 | 611 | /TeamPitcherPool.py | 0.660696 | 0.610287 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.