index int64 | repo_name string | branch_name string | path string | content string | import_graph string |
|---|---|---|---|---|---|
56,442 | adrianomiranda14/framework-predictor | refs/heads/main | /helpers.py | import pandas as pd
def lower_case(dataframe):
""" Transforms all values in columns to lower case """
for col in dataframe.columns:
if dataframe[col].dtypes == 'object':
dataframe[col] = dataframe[col].str.lower()
return dataframe
def clean_columns(path):
"This cleans column names of the csv passed through"
df = pd.read_csv(path, low_memory=False)
df.columns = [x.lower() for x in df.columns]
df.columns = [x.replace(" ","_") for x in df.columns]
df.columns = [x.replace(",","") for x in df.columns]
df.columns = [x.replace(")","") for x in df.columns]
df.columns = [x.replace("(","") for x in df.columns]
#df['job_role'] = df['job_role'].str.lower()
return df
def job_title_fun(vectorizer, model, string):
x = vectorizer.transform([string])
y = model.predict(x)
return y
def predict_job_title_prob(string):
x = vectorizer.transform([string])
y = nb.predict_proba(x)
z = nb.classes_[np.argsort(-y)]
a = z[0][0:3]
return a | {"/cleaning.py": ["/helpers.py"], "/main.py": ["/helpers.py"]} |
56,479 | douglasbastos/acrux_blog | refs/heads/master | /acrux_blog/tests/functional/test_views.py | #coding: utf-8
from django.test import TestCase
from django.contrib.auth.models import User
from ...views import Post, Tag, Base
class BaseTest(TestCase, Base):
@classmethod
def setUpClass(cls):
cls.author1 = User.objects.create(username='Author 1')
cls.author2 = User.objects.create(username='Author 2')
cls.tag = Tag.objects.create(name='Esportes')
cls.post = Post.objects.create(title='Um titulo teste', subtitled='Um subtitulo', slug='um-titulo-teste', content='Lorem Ipsum', tag_id=cls.tag.pk, author_id=cls.author1.pk)
@classmethod
def tearDownClass(cls):
cls.post.delete()
cls.tag.delete()
cls.author2.delete()
cls.author1.delete()
def test_authors_with_post(self):
self.assertEqual(self.authors_with_post.count(), 1)
| {"/acrux_blog/tests/functional/test_views.py": ["/acrux_blog/views.py"], "/acrux_blog/views.py": ["/acrux_blog/models.py"], "/acrux_blog/tests/functional/tests_urls.py": ["/acrux_blog/models.py"], "/acrux_blog/admin.py": ["/acrux_blog/models.py"]} |
56,480 | douglasbastos/acrux_blog | refs/heads/master | /acrux_blog/views.py | # coding: utf-8
from django.views.generic import ListView, DetailView
from django.contrib.auth.models import User
from .models import Post, Tag
class Base:
@property
def tags(self):
return Tag.objects.all()
@property
def authors_with_post(self):
posts = Post.objects.select_related('author').all()
authors_unique_id = set([post.author.pk for post in posts])
authors_id = []
for i in xrange(len(authors_unique_id)):
authors_id.append(authors_unique_id.pop())
return User.objects.filter(pk__in=authors_id)
class ListPosts(ListView, Base):
model = Post
template_name = 'acrux_blog/home.html'
context_object_name = 'posts'
paginate_by = 10
def get_queryset(self):
return Post.objects.order_by('-date_publication')
def get_context_data(self, **kwargs):
context = super(ListPosts, self).get_context_data(**kwargs)
context['tags'] = self.tags
context['authors'] = self.authors_with_post
return context
class ListPostsAuthor(ListPosts):
def get_queryset(self, **kwargs):
author = User.objects.get(username=self.kwargs['author'])
return Post.objects.filter(author_id=author.id).order_by('-date_publication')
class ListPostsTag(ListPosts):
def get_queryset(self):
tag = Tag.objects.get(slug=self.kwargs['tag'])
return Post.objects.filter(tag_id=tag.id).order_by('-date_publication')
class DetailPost(DetailView, Base):
model = Post
template_name = 'acrux_blog/post.html'
context_object_name = "post"
def get_context_data(self, **kwargs):
context = super(DetailPost, self).get_context_data(**kwargs)
context['tags'] = self.tags
context['tags_related'] = Post.objects.filter(tag_id=context['post'].tag_id)\
.exclude(slug=self.kwargs['slug'])[::-1][:5]
context['authors'] = self.authors_with_post
return context
| {"/acrux_blog/tests/functional/test_views.py": ["/acrux_blog/views.py"], "/acrux_blog/views.py": ["/acrux_blog/models.py"], "/acrux_blog/tests/functional/tests_urls.py": ["/acrux_blog/models.py"], "/acrux_blog/admin.py": ["/acrux_blog/models.py"]} |
56,481 | douglasbastos/acrux_blog | refs/heads/master | /acrux_blog/urls.py | # coding: utf-8
from django.conf.urls import url
from django.conf import settings
from django.views.decorators.cache import cache_page
from . import views
urlpatterns = [
# without cache
url(r'^$', views.ListPosts.as_view(), name='page_index'),
url(r'^post/author/(?P<author>[a-zA-Z0-9\-]+)/$', views.ListPostsAuthor.as_view(), name='author_posts'),
url(r'^post/tag/(?P<tag>[a-z0-9\-]+)$', views.ListPostsTag.as_view(), name='tags_posts'),
url(r'^post/slug/(?P<slug>[a-z0-9\-]+)/$', views.DetailPost.as_view(), name='post'),
# with cache
url(r'^cache/', cache_page(settings.TIME_CACHE)(views.ListPosts.as_view()), name='page_index_cache'),
url(r'^post_cache/author/(?P<author>[a-zA-Z0-9\-]+)/$', cache_page(settings.TIME_CACHE)(views.ListPostsAuthor.as_view()), name='author_posts_cache'),
url(r'^post_cache/tag/(?P<tag>[a-z0-9\-]+)$', cache_page(settings.TIME_CACHE)(views.ListPostsTag.as_view()), name='tags_posts_cache'),
url(r'^post_cache/slug/(?P<slug>[a-z0-9\-]+)/$', cache_page(settings.TIME_CACHE)(views.DetailPost.as_view()), name='post_cache'),
]
| {"/acrux_blog/tests/functional/test_views.py": ["/acrux_blog/views.py"], "/acrux_blog/views.py": ["/acrux_blog/models.py"], "/acrux_blog/tests/functional/tests_urls.py": ["/acrux_blog/models.py"], "/acrux_blog/admin.py": ["/acrux_blog/models.py"]} |
56,482 | douglasbastos/acrux_blog | refs/heads/master | /acrux_blog/migrations/0002_auto_20151019_2144.py | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import datetime
class Migration(migrations.Migration):
dependencies = [
('acrux_blog', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='tag',
name='slug',
field=models.SlugField(default=datetime.date(2015, 10, 19), verbose_name=b'Slug'),
preserve_default=False,
),
migrations.AlterField(
model_name='post',
name='date_edition',
field=models.DateTimeField(auto_now=True, verbose_name=b'\xc3\x9altima altera\xc3\xa7\xc3\xa3o', null=True),
),
migrations.AlterField(
model_name='post',
name='date_publication',
field=models.DateTimeField(auto_now_add=True, verbose_name=b'Criado em', null=True),
),
migrations.AlterField(
model_name='post',
name='slug',
field=models.SlugField(verbose_name=b'SlugSlug'),
),
migrations.AlterField(
model_name='post',
name='title',
field=models.CharField(unique=True, max_length=55, verbose_name=b'T\xc3\xadtulo'),
),
migrations.AlterField(
model_name='tag',
name='name',
field=models.CharField(unique=True, max_length=55),
),
]
| {"/acrux_blog/tests/functional/test_views.py": ["/acrux_blog/views.py"], "/acrux_blog/views.py": ["/acrux_blog/models.py"], "/acrux_blog/tests/functional/tests_urls.py": ["/acrux_blog/models.py"], "/acrux_blog/admin.py": ["/acrux_blog/models.py"]} |
56,483 | douglasbastos/acrux_blog | refs/heads/master | /acrux_blog/tests/functional/tests_urls.py | #coding: utf-8
from django.test import TestCase, Client
from django.contrib.auth.models import User
from ...models import Post, Tag
class PostTest(TestCase):
def setUp(self):
self.tag1 = Tag.objects.create(name='Esportes')
self.tag2 = Tag.objects.create(name='Cinema')
self.author1 = User.objects.create(username='author')
self.post1 = Post.objects.create(title='Um titulo teste', subtitled='Um subtitulo', slug='um-titulo-teste', content='Lorem Ipsum', tag_id=self.tag1.pk, author_id=self.author1.pk)
self.c = Client()
def tearDown(self):
self.post1.delete()
self.author1.delete()
self.tag2.delete()
self.tag1.delete()
def test_post_por_slug(self):
response = self.c.get('/um-titulo-teste/')
self.assertEqual(response.status_code, 200)
def test_post_sem_barra(self):
response = self.c.get('/um-titulo-teste')
self.assertEqual(response.status_code, 301)
def test_slug_nao_existe(self):
response = self.c.get('/um-titulo/')
self.assertEqual(response.status_code, 404)
def test_get_tags(self):
response = self.c.get('/um-titulo-teste/')
tags = response.context['tags'].values()
self.assertEqual(tags[0]['name'], u'Esportes')
def test_count_tags(self):
response = self.c.get('/um-titulo-teste/')
tags = response.context['tags'].values()
self.assertEqual(tags.count(), 2)
| {"/acrux_blog/tests/functional/test_views.py": ["/acrux_blog/views.py"], "/acrux_blog/views.py": ["/acrux_blog/models.py"], "/acrux_blog/tests/functional/tests_urls.py": ["/acrux_blog/models.py"], "/acrux_blog/admin.py": ["/acrux_blog/models.py"]} |
56,484 | douglasbastos/acrux_blog | refs/heads/master | /setup.py | # coding: utf-8
from setuptools import find_packages, setup
setup(
name='acrux_blog',
description='Blog que adiciona dados em um Banco Relacional e no Redis, para seu comparativo',
author='Douglas Bastos',
author_email='douglashsb@gmail.com',
url='',
version='0.0.1',
packages=find_packages(),
include_package_data=True,
install_requires=[
'redis==2.10.3'
]
)
| {"/acrux_blog/tests/functional/test_views.py": ["/acrux_blog/views.py"], "/acrux_blog/views.py": ["/acrux_blog/models.py"], "/acrux_blog/tests/functional/tests_urls.py": ["/acrux_blog/models.py"], "/acrux_blog/admin.py": ["/acrux_blog/models.py"]} |
56,485 | douglasbastos/acrux_blog | refs/heads/master | /acrux_blog/models.py | # coding: utf-8
from django.db import models
from django.contrib.auth.models import User
from django.template.defaultfilters import slugify
class Tag(models.Model):
name = models.CharField('Categoria', max_length=55, unique=True)
slug = models.SlugField('Slug')
def __unicode__(self):
return self.name
def save(self, *args, **kwargs):
self.slug = slugify(self.name)
super(Tag, self).save(*args, **kwargs)
class Post(models.Model):
title = models.CharField('Título', max_length=55, unique=True)
subtitled = models.CharField('Subtítulo', max_length=55)
slug = models.SlugField('Slug')
content = models.TextField('Texto')
date_publication = models.DateTimeField('Criado em', editable=False, auto_now_add=True, null=True)
date_edition = models.DateTimeField('Última alteração', editable=False, auto_now=True, null=True)
tag = models.ForeignKey(Tag)
author = models.ForeignKey(User, verbose_name='Autor do post')
class Meta:
verbose_name = u'Post'
verbose_name_plural = u'Posts'
def __unicode__(self):
return self.title
def save(self, *args, **kwargs):
self.slug = slugify(self.title)
super(Post, self).save(*args, **kwargs)
| {"/acrux_blog/tests/functional/test_views.py": ["/acrux_blog/views.py"], "/acrux_blog/views.py": ["/acrux_blog/models.py"], "/acrux_blog/tests/functional/tests_urls.py": ["/acrux_blog/models.py"], "/acrux_blog/admin.py": ["/acrux_blog/models.py"]} |
56,486 | douglasbastos/acrux_blog | refs/heads/master | /acrux_blog/admin.py | # coding: utf-8
from django.contrib import admin
from .models import Post, Tag
class TagAdmin(admin.ModelAdmin):
list_display = ('name',)
fieldsets = ((None, {
'fields': ('name',),
}),)
class PostAdmin(admin.ModelAdmin):
list_display = ('title', 'subtitled', 'slug',
'date_publication', 'date_edition',
'tag', 'author')
search_fields = ['title']
fieldsets = ((None, {
'fields': ('title', 'subtitled', 'content', 'tag', 'author'),
}),)
admin.site.register(Post, PostAdmin)
admin.site.register(Tag, TagAdmin)
| {"/acrux_blog/tests/functional/test_views.py": ["/acrux_blog/views.py"], "/acrux_blog/views.py": ["/acrux_blog/models.py"], "/acrux_blog/tests/functional/tests_urls.py": ["/acrux_blog/models.py"], "/acrux_blog/admin.py": ["/acrux_blog/models.py"]} |
56,490 | beppe712/honours-project | refs/heads/master | /HDL_gen/ssd_wrapper_gen.py | from hdl_utils import *
def gen_ssd_wrapper(r,outpath="ssd_wrapper.sv"):
outfile = open(outpath,'w+')
outfile.write(gen_heading())
outfile.write(gen_description("ssd_wrapper","Two Seven Segment Display Wrapper","ssd_driver"))
module_io = """
module ssd_wrapper(
input CLK,
input DONE,
input [{}:0] OUT,
output [6:0] SSD_A[1:0],
output SSD_C
);
""".format(r-1)
outfile.write(module_io)
digits_wrapper_1 = """
wire [15:0] digits;
assign digits = {{ {}'d0, OUT }};
""".format(16-r)
digits_wrapper_2 = """
wire [15:0] digits;
assign digits = OUT;
""".format()
if (r < 16):
outfile.write(digits_wrapper_1)
else:
outfile.write(digits_wrapper_2)
module_logic_2 = """
reg [20:0] counter_r;
initial
counter_r = 0;
always @(posedge CLK)
counter_r <= counter_r + 1;
assign SSD_C = counter_r[20];
genvar ssdNo;
generate
for (ssdNo = 0; ssdNo < 2; ssdNo = ssdNo + 1)
begin: SSDInstantiation
ssd_driver u_ssd_driver (
.done (DONE),
.ssd_input (digits[(ssdNo * 8)+7 : (ssdNo * 8)]),
.ssd_c (SSD_C),
.ssd_a (SSD_A[ssdNo])
);
end
endgenerate
""".format()
outfile.write(module_logic_2)
outfile.write("endmodule")
outfile.close()
def main():
r = int(raw_input("Insert number of CLUs: "))
gen_ssd_wrapper(r)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,491 | beppe712/honours-project | refs/heads/master | /DICE/utils.py | import re
import itertools
# Check string file
def check_string(string):
for char in string:
if char not in ['A','C','G','T']:
return False
return True
# Pattern for generating CBGs
def gen_pattern(pat, g):
m = len(pat)
wild = '.'
cbgs = list()
wild_indices = itertools.combinations(range(m+g),g)
for wild_index in wild_indices:
j = 0 # counter for pattern index
pattern = ''
for i in range(m+g):
if i in wild_index:
pattern += wild
else:
pattern += pat[j]
j += 1
cbgs.append(pattern)
return cbgs
def gen_pattern2(pat, g):
m = len(pat)
cbgs = list()
wild_indices = itertools.combinations(range(m+g),g)
for wild_index in wild_indices:
j = 0 # counter for pattern index
pattern = []
for i in range(m+g):
if i not in wild_index:
pattern += [(i,pat[j])]
j += 1
cbgs.append(pattern)
return cbgs
# Matching
def find_matches(cbgs, string):
matches = list()
m_plus_g = len(cbgs[0])
for i in range(len(string)-m_plus_g+1):
substring = string[i:i+m_plus_g]
for cbg in cbgs:
if re.search(cbg,substring):
matches.append((i, substring))
break
return matches
def find_matches2(cbgs, string, m_plus_g):
matches = list()
for i in range(len(string)-m_plus_g+1):
substring = string[i:i+m_plus_g]
for cbg in cbgs:
match = True
for index, letter in cbg:
if (substring[index] != letter):
match = False
break
if (match):
matches.append((i, substring))
break
return matches
# Convert string into 0 and 1's
def gen_bin_string(string):
bin_list = []
for char in string:
if char == 'A':
bin_list += ['00']
elif char == 'C':
bin_list += ['01']
elif char == 'G':
bin_list += ['10']
elif char == 'T':
bin_list += ['11']
return bin_list
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,492 | beppe712/honours-project | refs/heads/master | /DICE/rand_dna_gen.py | import sys
from random import randint
def gen_rand_dna(str_len, file_name):
outfile = open(file_name,'w+')
alphabet = ['A','C','G','T']
string = ""
for i in range(str_len):
string += alphabet[randint(0,3)]
outfile.write(string)
outfile.close()
def main():
str_len = int(raw_input("Insert string length: "))
file_name = raw_input("Insert output file name: ")
gen_rand_dna(str_len, file_name)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,493 | beppe712/honours-project | refs/heads/master | /HDL_gen/clu_gen.py | import sys
from hdl_utils import *
import itertools
def gen_cbg(m,g):
cbgs = list()
wild_indices = itertools.combinations(range(m+g),g)
for wild_index in wild_indices:
j = 0 # counter for pattern index
cbg = '('
for i in range(m+g):
if i not in wild_index:
cbg += '(STR[{}] == PAT[{}])'.format(i,j)
if (j < (m-1)):
cbg += ' && '
j += 1
cbg += ')'
cbgs.append(cbg)
return cbgs
def gen_clu(m,g,outpath="clu.sv"):
outfile = open(outpath,'w+')
outfile.write(gen_heading())
outfile.write(gen_description("clu","Comparison Logic Unit","None"))
module_io = """
module clu(
input [1:0] STR[0:{}],
input [1:0] PAT[0:{}],
output OUT
);
""".format(m+g-1,m-1)
outfile.write(module_io)
module_logic = """
assign OUT = ("""
cbgs = gen_cbg(m,g)
for (i,cbg) in enumerate(cbgs):
module_logic += cbg
if (i < (len(cbgs)-1)):
module_logic += """
|| """
module_logic += ");\n\n"
outfile.write(module_logic)
outfile.write("endmodule")
outfile.close()
def main():
m = int(raw_input("Insert length of the pattern: "))
g = int(raw_input("Insert maximum gap parameter: "))
gen_clu(m,g)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,494 | beppe712/honours-project | refs/heads/master | /HDL_gen/buf_gen.py | from hdl_utils import *
def gen_buf(m,g,r,str_len,outpath="buffer.sv",sim=False):
outfile = open(outpath,'w+')
outfile.write(gen_heading())
outfile.write(gen_description("buffer","String and Pattern buffer","None"))
module_io = """
module buffer(
input CLK,
input RESET,
input [1:0] BUTTONS,
output reg [1:0] STR[0:{}],
output reg [1:0] PAT[0:{}],
output reg DONE,
output [1:0] LEDS
);
""".format(m+g+r-2,m-1)
outfile.write(module_io)
count_times = (str_len-(m+g+r-1))/r + 1
count_size = count_times.bit_length()
def gen_count_case():
out = ""
for i in range(count_times):
out += """
{}'d{}: STR <= str_mem[{}:{}];""".format(count_size,i,i*r,i*r+m+g+r-2)
return out
module_logic = ""
if (sim):
module_logic = """
reg [1:0] str_mem[0:{}];
reg [1:0] pat_mem[0:{}];
reg [{}:0] count;
initial begin
$readmemb("pat.list", pat_mem);
$readmemb("string.list", str_mem);
count = 0;
DONE = 0;
end
always@(posedge CLK or posedge RESET) begin
if (RESET) begin
count <= 0;
DONE <= 0;
end
else if (count >= {})
DONE <= 1;
else
count <= count + 1;
end
always@(posedge CLK) begin
case (count){}
endcase
PAT <= pat_mem[0:{}];
end
assign LEDS = 2'b00;
""".format(str_len-1,m-1,count_size-1,count_times,gen_count_case(),m-1)
else:
module_logic = """
reg [1:0] str_mem[0:{}];
reg [1:0] pat_mem[0:{}];
reg [{}:0] count;
initial begin
$readmemb("pat.list", pat_mem);
$readmemb("string.list", str_mem);
count = 0;
DONE = 0;
end
always@(posedge CLK or posedge RESET) begin
if (RESET) begin
count <= 0;
DONE <= 0;
end
else if (count >= {})
DONE <= 1;
else if (((count[0] == 1'b0) && BUTTONS[0]) || ((count[0] == 1'b1) && BUTTONS[1]))
count <= count + 1;
end
always@(posedge CLK) begin
case (count){}
endcase
PAT <= pat_mem[0:{}];
end
assign LEDS = (count[0] == 1'b0) ? 2'b01 : 2'b10;
""".format(str_len-1,m-1,count_size-1,count_times,gen_count_case(),m-1)
outfile.write(module_logic)
outfile.write("endmodule")
outfile.close()
def main():
m = int(raw_input("Insert length of the pattern: "))
g = int(raw_input("Insert maximum gap parameter: "))
r = int(raw_input("Insert number of CLUs: "))
str_len = int(raw_input("Insert length of the string: "))
sim = False
if (raw_input("Is this module for simulation? ") in ['yes','y','Yes']):
sim = True
gen_buf(m,g,r,str_len,sim=sim)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,495 | beppe712/honours-project | refs/heads/master | /DICE/search.py | import sys
import time
import datetime
from utils import *
# Print
def print_matches(matches, resultpath):
if resultpath:
resultfile = open(resultpath, 'w+')
if not matches:
print("No matches found")
return
for match in matches:
print(match)
if resultpath:
resultfile.write(('{} {}\n'.format(match[0],match[1])))
if resultpath:
resultfile.close()
return
# Main
def main():
stringfpath = sys.argv[1]
patfpath = sys.argv[2]
resultpath = False
if len(sys.argv) > 3:
resultpath = sys.argv[3]
stringfile = open(stringfpath, 'r')
string = stringfile.read().replace('\n', '')
stringfile.close()
patfile = open(patfpath, 'r')
pat = patfile.read()
patfile.close()
string.upper()
if not check_string(string):
print('The string contains an invalid character')
g = int(raw_input("Insert maximum gap parameter: "))
cbgs = gen_pattern2(pat,g)
start_time = datetime.datetime.now()
matches = find_matches2(cbgs,string, len(pat)+g)
print('Elapsed time: {}'.format(datetime.datetime.now() - start_time))
print_matches(matches,resultpath)
return
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,496 | beppe712/honours-project | refs/heads/master | /DICE/pat_to_bin.py | from string_to_bin import *
def main():
stringfpath = sys.argv[1]
outpath = "vivado-project/vivado-project.srcs/sources_1/imports/DICE/pat.txt"
if len(sys.argv) > 2:
outpath = sys.argv[2]
make_bin(stringfpath,outpath)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,497 | beppe712/honours-project | refs/heads/master | /HDL_gen/top_gen.py | from hdl_utils import *
def gen_top(m,g,r,outpath="top.sv"):
outfile = open(outpath,'w+')
outfile.write(gen_heading())
outfile.write(gen_description("top","Top level module","clu, clu_complex, buffer, ssd_driver, ssd_wrapper"))
module_io = """
module top(
input CLK,
input RESET,
input [1:0] BUTTONS,
output [6:0] SSD_A_0,
output [6:0] SSD_A_1,
output [1:0] SSD_C,
output [1:0] LEDS
);
""".format()
outfile.write(module_io)
module_logic = """
wire [1:0] STR[0:{}];
wire [1:0] PAT[0:{}];
wire DONE;
wire [{}:0] OUT;
wire SSD_C_SINGLE;
buffer u_buffer(
.CLK (CLK),
.RESET (RESET),
.BUTTONS(BUTTONS),
.STR (STR),
.PAT (PAT),
.DONE (DONE),
.LEDS (LEDS)
);
clu_complex u_clu_complex(
.STR (STR),
.PAT (PAT),
.OUT (OUT)
);
ssd_wrapper u_ssd_wrapper(
.CLK (CLK),
.DONE (DONE),
.OUT (OUT),
.SSD_A ({{SSD_A_1, SSD_A_0}}),
.SSD_C (SSD_C_SINGLE)
);
assign SSD_C = {{2{{SSD_C_SINGLE}}}};
""".format(m+g+r-2,m-1,r-1)
outfile.write(module_logic)
outfile.write("endmodule")
outfile.close()
def main():
m = int(raw_input("Insert length of the pattern: "))
g = int(raw_input("Insert maximum gap parameter: "))
r = int(raw_input("Insert number of CLUs: "))
gen_top(m,g,r)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,498 | beppe712/honours-project | refs/heads/master | /decoder.py | import sys
from DICE.utils import *
def decode(string_file, m, g, r, result_file):
stringfile = open(string_file, 'r')
string = stringfile.read()
stringfile.close()
resultfile = open(result_file, 'r')
results = resultfile.readlines()
results = [line.strip() for line in results]
str_len = len(string)
for i,line in enumerate(results):
val = int(line, 16)
if (val > 0):
bin_array = [int(bin_val) for bin_val in bin(val)[2:]]
len_bin_array = len(bin_array)
for k in range(r-len_bin_array):
bin_array = [0] + bin_array
for j,bin_val in enumerate(bin_array):
if (bin_val == 1):
if (i*r+j+m+g <= str_len):
print("({}, '{}')".format(i*r+j,string[i*r+j:i*r+j+m+g]))
def main():
g = int(raw_input("Insert maximum gap parameter: "))
r = int(raw_input("Insert number of CLUs: "))
result_file = sys.argv[1]
string_file = sys.argv[2]
pat_path = sys.argv[3]
patfile = open(pat_path, 'r')
pat = patfile.read()
patfile.close()
m = len(pat)
decode(string_file, m, g, r, result_file)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,499 | beppe712/honours-project | refs/heads/master | /DICE/string_to_bin.py | import sys
from utils import *
def make_bin(stringfpath, outpath, m=0, g=0, r=0):
stringfile = open(stringfpath, 'r')
string = stringfile.read()
stringfile.close()
string.upper()
if not check_string(string):
print('The string contains an invalid character')
outfile = open(outpath, 'w+')
bin_list = gen_bin_string(string)
str_len = len(bin_list)
i = 0
if (r == 0):
# for pattern
for a in range(str_len):
outfile.write(bin_list[a] + '\n')
i = str_len
else:
# for strings
for j in range(m+g+r-1):
if (i < str_len):
outfile.write(bin_list[i] + '\n')
else: # Padding
outfile.write('00\n')
i += 1
outfile.write('\n')
while (i < str_len):
for j in range(r):
if (i < str_len):
outfile.write(bin_list[i] + '\n')
else: # Padding
outfile.write('00\n')
i += 1
outfile.write('\n')
outfile.close()
return i
# Main
def main():
stringfpath = sys.argv[1]
outpath = "string.list"
m = int(raw_input("Insert length of the pattern: "))
g = int(raw_input("Insert maximum gap parameter: "))
r = int(raw_input("Insert number of CLUs: "))
make_bin(stringfpath,outpath,m,g,r)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,500 | beppe712/honours-project | refs/heads/master | /HDL_gen/clu_complex_gen.py | from hdl_utils import *
def gen_clu_complex(m,g,r,outpath="clu_complex.sv"):
outfile = open(outpath,'w+')
outfile.write(gen_heading())
outfile.write(gen_description("clu_complex","Comparison Logic Unit","clu"))
module_io = """
module clu_complex(
input [1:0] STR[0:{}],
input [1:0] PAT[0:{}],
output [{}:0] OUT
);
""".format(m+g+r-2,m-1,r-1)
outfile.write(module_io)
module_logic = """
genvar cluNo;
generate
for (cluNo = 0; cluNo < {}; cluNo = cluNo + 1)
begin: CLUInstantiation
clu u_clu (.STR(STR[cluNo:cluNo + {}]), .PAT(PAT), .OUT(OUT[{} - cluNo]));
end
endgenerate
""".format(r, m+g-1, r-1)
outfile.write(module_logic)
outfile.write("endmodule")
outfile.close()
def main():
m = int(raw_input("Insert length of the pattern: "))
g = int(raw_input("Insert maximum gap parameter: "))
r = int(raw_input("Insert number of CLUs: "))
gen_clu_complex(m,g,r)
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,501 | beppe712/honours-project | refs/heads/master | /HDL_gen/hdl_utils.py | import time
import datetime
# indent_level = 0
def gen_heading():
return "`timescale 1ns / 1ps\n`default_nettype wire\n"
def gen_description(module="", descr="", depends=""):
comment_lines = "//////////////////////////////////////////////////////////////////////////////////\n"
company = "// Company: The University of Edinburgh\n"
engineer = "// Engineer: Giuseppe Li (s1402587)\n"
create_date = "// Create Date: {}\n".format(datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"))
design_name = "// Design name: FPGA accelerator for DNA pattern matching\n"
module_name = "// Module Name: {}\n".format(module)
project_name = "// Project Name: 4th Year Honours Project (BSc (Hons) Artificial Intelligence and Computer Science)\n"
description = "// Description: {}\n".format(descr)
dependences = "// Dependences: {}\n".format(depends)
additional_comments = "// Additional Comments: This file has been automatically generated\n"
return (comment_lines +
company +
engineer +
"//\n" +
create_date +
design_name +
module_name +
project_name +
description +
"//\n" +
dependences +
"//\n" +
additional_comments +
"//\n" +
comment_lines)
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,502 | beppe712/honours-project | refs/heads/master | /make_sys.py | from DICE.string_to_bin import make_bin
from HDL_gen.buf_gen import gen_buf
from HDL_gen.clu_complex_gen import gen_clu_complex
from HDL_gen.clu_gen import gen_clu
from HDL_gen.ssd_wrapper_gen import gen_ssd_wrapper
from HDL_gen.top_gen import gen_top
import os
import errno
import sys
from subprocess import call
strings_path = "vivado-project/vivado-project.srcs/sources_1/imports/HDL_gen/"
hdl_path = "vivado-project/vivado-project.srcs/sources_1/imports/HDL_gen/"
for path in [strings_path,hdl_path]:
if not os.path.exists(path):
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def make_sys(string_file, pat_file, g, r):
# Make DNA string binaries
m = make_bin(pat_file, strings_path + "pat.list")
str_len = make_bin(string_file, strings_path + "string.list", m, g, r)
print("Generated string binary files")
# Make HDL SystemVerilog files
gen_buf(m,g,r,str_len,hdl_path+"buffer.sv")
print("Generated buffer HDL")
gen_clu_complex(m,g,r,hdl_path+"clu_complex.sv")
print("Generated clu complex HDL")
gen_clu(m,g,hdl_path+"clu.sv")
print("Generated clu HDL")
gen_ssd_wrapper(r,hdl_path+"ssd_wrapper.sv")
print("Generated SSD wrapper HDL")
gen_top(m,g,r,hdl_path+"top.sv")
print("Generated top HDL")
def main():
string_file = sys.argv[1]
pat_file = sys.argv[2]
g = int(raw_input("Insert maximum gap parameter: "))
r = int(raw_input("Insert number of CLUs: "))
if (r > 16):
r = int(raw_input("The physical system supports up to 16 CLUs.\nInsert number of CLUs: "))
make_sys(string_file, pat_file, g, r)
if (len(sys.argv) > 3):
call(['bash','-c',"vivado -nolog -nojournal -mode batch -source setup.tcl"])
if __name__ == "__main__":
# execute only if run as a script
main()
| {"/decoder.py": ["/DICE/utils.py"], "/make_sys.py": ["/DICE/string_to_bin.py", "/HDL_gen/buf_gen.py", "/HDL_gen/clu_complex_gen.py", "/HDL_gen/clu_gen.py", "/HDL_gen/ssd_wrapper_gen.py", "/HDL_gen/top_gen.py"]} |
56,503 | williamium3000/pytorch-DQN | refs/heads/main | /test.py | import numpy as np
import torch
t = torch.tensor([[1, 4, 5], [2, 3, 7], [4, 6, 9]])
print(np.argmax(t, axis=1))
| {"/agent_pong.py": ["/Q_network_pong.py"], "/train.py": ["/Q_network.py"], "/eval.py": ["/Q_network.py"]} |
56,504 | williamium3000/pytorch-DQN | refs/heads/main | /agent_pong.py | import sys
import os
sys.path.append("DQN")
import Q_network_pong
import numpy as np
import torch
from torch import nn
from torch.optim import lr_scheduler
import copy
class DQN_agent():
def __init__(self, num_act, dim_obs, gamma, lr, e_greedy, e_greed_decrement):
self.model = Q_network_pong.Q_network(dim_obs, num_act)
self.target_model = Q_network_pong.Q_network(dim_obs, num_act)
self.target_model.load_state_dict(copy.deepcopy(self.model.state_dict()))
self.Loss = nn.MSELoss()
self.device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
self.dim_obs = dim_obs
self.num_act = num_act
self.gamma = gamma
self.lr = lr
self.global_step = 0
self.update_target_steps = 100 # 每隔1000个training steps再把model的参数复制到target_model中
self.optim = torch.optim.Adam(self.model.parameters(), self.lr)
self.scheduler = lr_scheduler.StepLR(self.optim, step_size=500, gamma=0.05)
self.e_greedy = e_greedy # 有一定概率随机选取动作,探索
self.e_greedy_decrement = e_greed_decrement # 随着训练逐步收敛,探索的程度慢慢降低
def sample(self, obs):
sample = np.random.rand()
if sample < self.e_greedy:
action = np.random.choice(self.num_act)
else:
action = self.predict(obs)
self.e_greedy = max(0.02, self.e_greedy - self.e_greedy_decrement)
return action
def predict(self, obs):
self.model.eval()
with torch.no_grad():
obs = np.expand_dims(obs, axis = 0)
obs = torch.tensor(obs, dtype = torch.float32)
# print("obs.shape:{}".format(obs.shape))
self.model.to(self.device)
obs = obs.to(self.device)
pred_Q = self.model(obs)
pred_Q = pred_Q.cpu()
# print("pred_Q.shape:{}".format(pred_Q.shape))
pred_Q = np.squeeze(pred_Q, axis=0)
# print("pred_Q.shape:{}".format(pred_Q.shape))
act = np.argmax(pred_Q).item() # 选择Q最大的下标,即对应的动作
return act
def learn(self, obs, act, reward, next_obs, terminal):
if self.global_step % self.update_target_steps == 0:
self.sync_target()
self.model.to(self.device)
self.model.train()
self.global_step += 1
act = np.expand_dims(act, -1)
reward = np.expand_dims(reward, -1)
terminal = np.expand_dims(terminal, -1)
obs, act, reward, next_obs, terminal = torch.tensor(obs, dtype = torch.float32), torch.tensor(act, dtype = torch.int64), torch.tensor(reward, dtype = torch.float32), torch.tensor(next_obs, dtype = torch.float32), torch.tensor(terminal, dtype = torch.float32)
obs, act, reward, next_obs, terminal = obs.to(self.device), act.to(self.device), reward.to(self.device), next_obs.to(self.device), terminal.to(self.device)
self.target_model.to(self.device)
next_pred_value = self.target_model(next_obs)
best_value = torch.max(next_pred_value, -1, keepdim = True)[0]
target = reward + (1.0 - terminal) * self.gamma * best_value
y = self.model(obs)
y = torch.gather(y, 1, act)
# print(y[0], target[0])
# print(target.shape)
# print(y.shape)
loss = self.Loss(y, target)
self.optim.zero_grad()
loss.backward()
# for param in self.model.parameters():
# param.grad.data.clamp_(-1, 1)
self.optim.step()
# self.scheduler.step()
def save(self, name):
torch.save(self.model, name + ".pth")
def load(self, path):
self.model = torch.load(path, map_location="cuda:0" if torch.cuda.is_available() else "cpu")
self.sync_target()
def sync_target(self):
print("sync model to target model")
self.target_model.load_state_dict(copy.deepcopy(self.model.state_dict()))
self.target_model.eval()
| {"/agent_pong.py": ["/Q_network_pong.py"], "/train.py": ["/Q_network.py"], "/eval.py": ["/Q_network.py"]} |
56,505 | williamium3000/pytorch-DQN | refs/heads/main | /Q_network.py | import torch
from torch import nn
import random
import numpy as np
import torchvision
import torchvision.utils
import torch
import torch.nn as nn
from torchvision import models
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
# 设置随机数种子
setup_seed(20)
class Q_network(nn.Module):
def __init__(self, num_obs, num_act):
super(Q_network, self).__init__()
self.fc1 = nn.Linear(num_obs, 128, True)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(128, 128, True)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(128, num_act, True)
def forward(self, x):
x = self.fc1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.relu2(x)
x = self.fc3(x)
return x
if __name__ == "__main__":
test = Q_network((210, 160, 3), 2)
t = torch.rand((10, 3, 210, 160))
print(test(t).size())
| {"/agent_pong.py": ["/Q_network_pong.py"], "/train.py": ["/Q_network.py"], "/eval.py": ["/Q_network.py"]} |
56,506 | williamium3000/pytorch-DQN | refs/heads/main | /train.py | import gym
import sys
import agent
import Q_network
import experience_replay
import torch
import numpy as np
import logging
from torch.utils.tensorboard import SummaryWriter
def run_episode(env, agent, rpm):
total_reward = 0
obs = env.reset()
step = 0
while True:
step += 1
action = agent.sample(obs)
next_obs, reward, done, _ = env.step(action)
rpm.append((obs, action, reward, next_obs, done))
# train model
if (len(rpm) > opt["MEMORY_WARMUP_SIZE"] and (step % opt["LEARN_FREQ"] == 0)):
(batch_obs, batch_action, batch_reward, batch_next_obs, batch_done) = rpm.sample(opt["BATCH_SIZE"])
agent.learn(batch_obs, batch_action, batch_reward, batch_next_obs, batch_done) # s,a,r,s',done
total_reward += reward
obs = next_obs
if done:
break
return total_reward, step
# 评估 agent, 跑 5 个episode,总reward求平均
def evaluate(times, env, agent, render=False):
with torch.no_grad():
eval_reward = []
for i in range(times):
obs = env.reset()
episode_reward = 0
while True:
action = agent.predict(obs) # 预测动作,只选最优动作
obs, reward, done, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
def train(episodes, env, env_name, agent, save):
rpm = experience_replay.ReplayMemory(opt["MEMORY_SIZE"])
while len(rpm) < opt["MEMORY_WARMUP_SIZE"]:
run_episode(env, agent, rpm)
for episode in range(episodes):
reward, steps = run_episode(env, agent, rpm)
writer.add_scalar(env_name + "-reward", reward, episode)
# reward, steps = run_episode_with_sarsa(env, agent, False)
print("train episode {} : reward {}, steps {}".format(episode + 1, reward, steps))
logging.warning("train episode {} : reward {}, steps {}".format(episode + 1, reward, steps))
if episode % 50 == 0:
eval_reward = evaluate(5, env, agent, render = False)
print("evaluate {} episodes : e_greedy {}, reward {}".format(5, agent.e_greedy, eval_reward))
logging.warning("evaluate 5 episodes : e_greedy {}, reward {}".format(agent.e_greedy, eval_reward))
if save:
agent.save(env_name)
return agent
opt = {
"LEARN_FREQ" : 5, # 训练频率,不需要每一个step都learn,攒一些新增经验后再learn,提高效率
"MEMORY_SIZE" : 200000, # replay memory的大小,越大越占用内存
"MEMORY_WARMUP_SIZE" : 500, # replay_memory 里需要预存一些经验数据,再开启训练
"BATCH_SIZE" : 128, # 每次给agent learn的数据数量,从replay memory随机里sample一批数据出来
"LEARNING_RATE" : 0.001, # 学习率
"GAMMA" : 0.99, # reward 的衰减因子,一般取 0.9 到 0.999 不等
"E_GREEDY" : 0.1,
"E_GREEDY_DECREMENT" : 1e-6, # 1e-6
"max_episode" : 2000
}
if __name__ == "__main__":
writer = SummaryWriter()
env_name = "CartPole-v0"
# env_name = "MountainCar-v0"
logging.basicConfig(filename = "{}.log".format(env_name))
env = gym.make(env_name)
logging.warning("DQN trained on {}".format(env_name))
logging.warning(opt)
num_act = env.action_space.n
num_obs = env.observation_space.shape[0]
dqn_agent = agent.DQN_agent(num_act, num_obs, opt["GAMMA"], opt["LEARNING_RATE"], opt["E_GREEDY"], opt["E_GREEDY_DECREMENT"])
# dqn_agent.load("CartPole-v0.pth")
# print("evaluate on {} episode: reward {}".format(20, evaluate(20, env, dqn_agent, True)))
train(opt["max_episode"], env, env_name, dqn_agent, True) | {"/agent_pong.py": ["/Q_network_pong.py"], "/train.py": ["/Q_network.py"], "/eval.py": ["/Q_network.py"]} |
56,507 | williamium3000/pytorch-DQN | refs/heads/main | /Q_network_pong.py | import torch
from torch import nn
import random
import numpy as np
import torchvision
import torchvision.utils
import torch
import torch.nn as nn
from torchvision import models
class ravel(nn.Module):
def __init__(self):
super(ravel, self).__init__()
def forward(self, x):
return x.view(x.shape[0], -1)
class Q_network(nn.Module):
def __init__(self, num_obs, num_act):
super(Q_network, self).__init__()
features = (num_obs[0] // 8) * (num_obs[1] // 8) * 64
self.backbone = nn.Sequential(
nn.Conv2d(num_obs[2], 32, 7, stride=1, padding=3),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(32, 64, 5, stride=1, padding=2),
nn.ReLU(),
nn.MaxPool2d(2, 2),
nn.Conv2d(64, 64, 3, stride=1, padding=1),
nn.ReLU(),
nn.MaxPool2d(2, 2),
ravel(),
nn.Linear(features, 64, True),
nn.ReLU(),
nn.Linear(64, num_act, True),
)
# self.backbone = models.resnet18(pretrained=True)
# feature_extraction = False
# if feature_extraction:
# for param in self.backbone.parameters():
# param.requires_grad = False
# self.backbone.fc = nn.Linear(in_features = 512, out_features = 256)
# self.relu = nn.ReLU()
# self.fc = nn.Linear(in_features = 256, out_features = num_act)
def forward(self, x):
x = self.backbone(x)
# x = self.relu(x)
# x = self.fc(x)
return x
if __name__ == "__main__":
test = Q_network((80, 80, 1), 2)
t = torch.rand((10, 1, 80, 80))
print(test(t).size())
| {"/agent_pong.py": ["/Q_network_pong.py"], "/train.py": ["/Q_network.py"], "/eval.py": ["/Q_network.py"]} |
56,508 | williamium3000/pytorch-DQN | refs/heads/main | /eval.py | import sys
import agent
import Q_network
import experience_replay
import gym
import numpy as np
import torch
def evaluate(times, env, agent, render=False):
with torch.no_grad():
eval_reward = []
for i in range(times):
obs = env.reset()
episode_reward = 0
while True:
action = agent.predict(obs) # 预测动作,只选最优动作
obs, reward, done, _ = env.step(action)
episode_reward += reward
if render:
env.render()
if done:
break
eval_reward.append(episode_reward)
return np.mean(eval_reward)
opt = {
"LEARN_FREQ" : 3, # 训练频率,不需要每一个step都learn,攒一些新增经验后再learn,提高效率
"MEMORY_SIZE" : 20000, # replay memory的大小,越大越占用内存
"MEMORY_WARMUP_SIZE" : 300, # replay_memory 里需要预存一些经验数据,再开启训练
"BATCH_SIZE" : 64, # 每次给agent learn的数据数量,从replay memory随机里sample一批数据出来
"LEARNING_RATE" : 0.001, # 学习率
"GAMMA" : 0.99, # reward 的衰减因子,一般取 0.9 到 0.999 不等
"E_GREEDY" : 0.1,
"E_GREEDY_DECREMENT" : 1e-6,
"max_episode" : 1000
}
if __name__ == "__main__":
env_name = "CartPole-v0"
# env_name = "MountainCar-v0"
env = gym.make(env_name)
num_act = env.action_space.n
num_obs = env.observation_space.shape[0]
dqn_agent = agent.DQN_agent(num_act, num_obs, opt["GAMMA"], opt["LEARNING_RATE"], opt["E_GREEDY"], opt["E_GREEDY_DECREMENT"])
dqn_agent.load("{}.pth".format(env_name))
print("evaluate on {} episode: reward {}".format(30, evaluate(5, env, dqn_agent, True))) | {"/agent_pong.py": ["/Q_network_pong.py"], "/train.py": ["/Q_network.py"], "/eval.py": ["/Q_network.py"]} |
56,509 | harzo/reward-calculator | refs/heads/master | /common.py | from decimal import Decimal
import typing
AVERAGE_BLOCK_TIME_SECONDS = 600
AVERAGE_BLOCKS_COUNT_PER_DAY = 144
DAY_IN_SECONDS = 24 * 60 * 60
def average(_list: typing.Collection):
if len(_list) == 0:
return 0
return sum(_list) / len(_list)
def share_to_hashrate(share: int, _seconds: int) -> Decimal:
if not share or not _seconds:
return Decimal(0)
return (Decimal(share << 32) / Decimal(_seconds)).quantize(Decimal('1.000'))
def difficulty_to_hashrate(difficulty: Decimal, duration: int = AVERAGE_BLOCK_TIME_SECONDS):
return difficulty * pow(2, 32) / duration
def satoshi_to_btc(satoshi: Decimal) -> Decimal:
return round(satoshi / Decimal(1e8), 8)
| {"/btccom.py": ["/common.py"], "/__main__.py": ["/btccom.py", "/common.py", "/reward.py"], "/tests/test_reward.py": ["/common.py", "/reward.py"], "/tests/test_btccom.py": ["/btccom.py"]} |
56,510 | harzo/reward-calculator | refs/heads/master | /btccom.py | import logging
from datetime import datetime, date, timedelta
from decimal import Decimal
from typing import TypedDict, TypeVar, Generic, Optional, Tuple, List, NamedTuple, Dict
import pytz
from apiclient import APIClient, retry_request, endpoint, JsonResponseHandler, JsonRequestFormatter
from common import DAY_IN_SECONDS, average
BTCCOM_API_URL = 'https://chain.api.btc.com/v3'
T = TypeVar('T')
class BtcComBlock(TypedDict):
height: int
version: int
mrkl_root: str
curr_max_timestamp: int
timestamp: int
bits: int
nonce: int
hash: str
size: int
pool_difficulty: int
difficulty: int
tx_count: int
reward_block: int
reward_fees: int
created_at: int
confirmations: int
extras: dict
prev_block_hash: Optional[str]
next_block_hash: Optional[str]
BtcComBlockList = List[BtcComBlock]
class BtcComResponse(Dict, Generic[T]):
data: T
err_code: int
err_no: int
message: str
status: str
@endpoint(base_url=BTCCOM_API_URL)
class BtcComEndpoint:
block_list = "block/date/{date}"
class BtcComClient(APIClient):
def __init__(self, host=BtcComEndpoint):
super().__init__(response_handler=JsonResponseHandler, request_formatter=JsonRequestFormatter)
self.endpoint = host
@retry_request
def get_block_list(self, d: date) -> BtcComResponse[BtcComBlockList]:
"""
:param: d Blocks returned mining date
:type: date
:return: block_list List of blocks
:type: BtcComResponse[BtcComBlock]
"""
return self.get(BtcComEndpoint.block_list.format(date=d.strftime("%Y%m%d")))
class BlocksStats(NamedTuple):
average_reward: Decimal
average_difficulty: Decimal
total_fees: Decimal
total_blocks: int
class BtcComData:
def __init__(self):
self.client = BtcComClient()
def get_blocks_stats(self, dt: datetime, delta: int = DAY_IN_SECONDS) -> BlocksStats:
response = self.client.get_block_list(dt.date())
if response['status'] != 'success':
logging.error(f'btccom.get_block_list unsuccessful: {response["message"]}')
return BlocksStats(Decimal(0), Decimal(0), Decimal(0))
rewards, fees, difficulty = [], [], []
total_blocks = 0
for block in response['data']:
block_dt = datetime.utcfromtimestamp(block['timestamp'])
block_dt = block_dt.replace(tzinfo=pytz.UTC)
if dt <= block_dt <= dt + timedelta(seconds=delta):
rewards.append(block['reward_block'])
fees.append(block['reward_fees'])
difficulty.append(block['difficulty'])
total_blocks += 1
return BlocksStats(Decimal(average(rewards)),
Decimal(average(difficulty)),
Decimal(sum(fees)),
total_blocks)
| {"/btccom.py": ["/common.py"], "/__main__.py": ["/btccom.py", "/common.py", "/reward.py"], "/tests/test_reward.py": ["/common.py", "/reward.py"], "/tests/test_btccom.py": ["/btccom.py"]} |
56,511 | harzo/reward-calculator | refs/heads/master | /__main__.py | import argparse
import logging
import pytz as pytz
import sys
from btccom import BtcComData
from common import difficulty_to_hashrate, AVERAGE_BLOCKS_COUNT_PER_DAY, DAY_IN_SECONDS, satoshi_to_btc
from datetime import datetime
from reward import calculate_fpps
parser = argparse.ArgumentParser(prog='reward-calculator', description='Rewards Calculator')
parser.add_argument('hashrate', nargs='?', type=float, help='Mining hashrate (H/s) used to calculate reward')
parser.add_argument('datetime', nargs='?', type=datetime.fromisoformat, help='Date (and time) when mining has started')
parser.add_argument('-m', '--method', nargs='?', choices=['fpps', 'pps'], default='fpps',
help='Reward calculation method')
parser.add_argument('-p', '--period', nargs='?', choices=['day', 'hour'], default='day',
help='Reward calculation period (mining duration from the start)')
logging.basicConfig(format='reward-calculator: %(levelname)s: %(message)s', level=logging.DEBUG)
if __name__ == '__main__':
args = parser.parse_args()
if not args.hashrate:
logging.error(f'missing `hashrate` argument')
sys.exit()
elif not args.datetime:
logging.error(f'missing `datetime` argument')
sys.exit()
hashrate = args.hashrate
dt = args.datetime
dt = dt.replace(tzinfo=pytz.UTC, hour=0, minute=0, second=0, microsecond=0)
btccom_data = BtcComData()
blocks_stats = btccom_data.get_blocks_stats(dt, delta=DAY_IN_SECONDS)
difficulty = blocks_stats.average_difficulty
reward_per_block = blocks_stats.average_reward
total_fees = blocks_stats.total_fees
total_blocks = blocks_stats.total_blocks
network_hashrate = difficulty_to_hashrate(difficulty)
block_rewards = AVERAGE_BLOCKS_COUNT_PER_DAY * reward_per_block
print(f'Difficulty: {difficulty} ({difficulty / pow(10, 12)} x 10^12)')
print(f'Network hashrate: {network_hashrate} ({network_hashrate / pow(10, 18)} EH)')
print(f'Average coinbase rewards: {satoshi_to_btc(block_rewards)} from {AVERAGE_BLOCKS_COUNT_PER_DAY} blocks')
print(f'Total txs fees: {satoshi_to_btc(total_fees)} from {total_blocks} blocks')
print(f'\nMining stats:')
print(f'- Hashrate: {hashrate} ({hashrate / pow(10, 15)} PH)')
fpps_reward = calculate_fpps(hashrate, network_hashrate, block_rewards, total_fees, total_blocks,
AVERAGE_BLOCKS_COUNT_PER_DAY)
print(f'- FPPS reward: {satoshi_to_btc(fpps_reward)} BTC')
| {"/btccom.py": ["/common.py"], "/__main__.py": ["/btccom.py", "/common.py", "/reward.py"], "/tests/test_reward.py": ["/common.py", "/reward.py"], "/tests/test_btccom.py": ["/btccom.py"]} |
56,512 | harzo/reward-calculator | refs/heads/master | /reward.py | from decimal import Decimal
def calculate_pps(user_hashrate: int, network_hashrate: int, blocks_rewards: Decimal):
if not network_hashrate:
return Decimal(0)
return Decimal(user_hashrate)/Decimal(network_hashrate) * Decimal(blocks_rewards)
def calculate_fpps(user_hashrate: int, network_hashrate: int, blocks_rewards: Decimal, blocks_fees: Decimal,
actual_blocks_count: int, avg_blocks_count: int):
if not network_hashrate:
return Decimal(0)
pps_reward = calculate_pps(user_hashrate, network_hashrate, blocks_rewards)
fee_reward = Decimal(user_hashrate) / Decimal(network_hashrate) * Decimal(
blocks_fees/actual_blocks_count * avg_blocks_count)
return pps_reward + fee_reward
| {"/btccom.py": ["/common.py"], "/__main__.py": ["/btccom.py", "/common.py", "/reward.py"], "/tests/test_reward.py": ["/common.py", "/reward.py"], "/tests/test_btccom.py": ["/btccom.py"]} |
56,513 | harzo/reward-calculator | refs/heads/master | /tests/test_reward.py | from decimal import Decimal
import pytest
from common import AVERAGE_BLOCKS_COUNT_PER_DAY, share_to_hashrate, difficulty_to_hashrate
from reward import calculate_fpps
daily_stats = [
(20823531150111.52, AVERAGE_BLOCKS_COUNT_PER_DAY, 153, 6.25, 146.92, 54262120448, 0.01878856),
(20823531150111.52, AVERAGE_BLOCKS_COUNT_PER_DAY, 150, 6.25, 134.241, 131915972608, 0.04526282),
(20823531150111.52, AVERAGE_BLOCKS_COUNT_PER_DAY, 159, 6.25, 166.88621843, 131827384320, 0.04621152),
(20823531150111.52, AVERAGE_BLOCKS_COUNT_PER_DAY, 169, 6.25, 143.84104968, 146015387648 + 33910784, 0.04980496),
(21349873786209.2, AVERAGE_BLOCKS_COUNT_PER_DAY, 159, 6.25, 118.30299223, 157934997504, 0.05173821),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 156, 6.25, 84.69085027, 173906698240, 0.05511371),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 162, 6.25, 134.17229408, 170159624192, 0.05619136),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 143, 6.25, 196.39438398, 170671919104, 0.06070141),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 143, 6.25, 168.16327939, 168169627648 + 2544932061184, 0.93995668),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 129, 6.25, 185.34915975, 166377984000 + 6673153871872, 2.45279678),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 142, 6.25, 202.39953166, 164935897088 + 6684052074496, 2.45252406),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 157, 6.25, 144.37461607, 160511279104 + 6673859739648, 2.28602635),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 148, 6.25, 104.20186702, 128912842752 + 12720912191488, 4.16893203),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 121, 6.25, 120.02466217, 154536284160 + 19335180537856, 6.58489969),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 121, 6.25, 120.02466217, 154536284160 + 19335180537856, 6.58489969),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 138, 6.25, 141.62327207, 172597325824 + 19545865674752, 6.69375414),
(21434395961348.92, AVERAGE_BLOCKS_COUNT_PER_DAY, 137, 6.25, 147.75229392, 160268738560 + 20616356478976, 7.10359012),
(21455226865438.402, AVERAGE_BLOCKS_COUNT_PER_DAY, 153, 6.25, 160.80575280, 155456135168 + 19682727641088, 6.75075298),
(21724134900047.27, AVERAGE_BLOCKS_COUNT_PER_DAY, 146, 6.25, 147.82061349, 132320653312 + 24581014765568, 8.26177230),
(21724134900047.27, AVERAGE_BLOCKS_COUNT_PER_DAY, 141, 6.25, 115.10198626, 142570143744 + 24444839677952, 7.99767897),
(21724134900047.27, AVERAGE_BLOCKS_COUNT_PER_DAY, 133, 6.25, 131.78857835, 144470233088 + 24289250353152, 8.14402609),
(21724134900047.27, AVERAGE_BLOCKS_COUNT_PER_DAY, 159, 6.25, 189.53161494, 98967334912 + 21950965149696, 7.55363100)
]
@pytest.mark.parametrize("difficulty,avg_blocks,total_blocks,reward_per_block,total_fees,shares,reward", daily_stats)
def test_calculate_fpps_daily(difficulty, avg_blocks, total_blocks, reward_per_block, total_fees, shares, reward):
day_in_seconds = 24 * 60 * 60
hashrate = share_to_hashrate(shares, day_in_seconds)
network_hashrate = difficulty_to_hashrate(Decimal(difficulty))
block_rewards = AVERAGE_BLOCKS_COUNT_PER_DAY * reward_per_block
fpps_reward = calculate_fpps(Decimal(hashrate), network_hashrate, block_rewards, Decimal(total_fees), total_blocks,
avg_blocks)
assert round(fpps_reward, 8) == round(Decimal(reward), 8)
| {"/btccom.py": ["/common.py"], "/__main__.py": ["/btccom.py", "/common.py", "/reward.py"], "/tests/test_reward.py": ["/common.py", "/reward.py"], "/tests/test_btccom.py": ["/btccom.py"]} |
56,514 | harzo/reward-calculator | refs/heads/master | /tests/test_btccom.py | from datetime import datetime
from decimal import Decimal
from btccom import BtcComClient, BtcComData
client = BtcComClient()
data = BtcComData()
def test_get_blocks_list_on_01012021():
d = datetime.strptime('2021-01-01', "%Y-%m-%d").date()
block_list = client.get_block_list(d)
assert len(block_list['data']) == 149
assert block_list['data'][0]['hash'] == '0000000000000000000e5ac8accffaa7ba73e200354b799133a29464cac7b8a6'
assert block_list['data'][-1]['hash'] == '00000000000000000003f8a45967fe6a84a22a9deb86ddea0b2b78b0b859ea1d'
def test_get_blocks_stats_on_01012021():
d = datetime.strptime('2021-01-01', "%Y-%m-%d")
stats = data.get_blocks_stats(d)
assert stats.average_difficulty == Decimal(18599593048299)
assert stats.average_reward == Decimal(625000000)
assert stats.total_fees == Decimal(4894270140)
assert stats.total_blocks == 149
| {"/btccom.py": ["/common.py"], "/__main__.py": ["/btccom.py", "/common.py", "/reward.py"], "/tests/test_reward.py": ["/common.py", "/reward.py"], "/tests/test_btccom.py": ["/btccom.py"]} |
56,515 | ataddei/r22sdf | refs/heads/master | /r22sdf.py | from myhdl import *
from math import *
from numpy import *
def Butterfly (i_data_a,i_data_b,o_data_a,o_data_b):
@always_comb
def bf():
o_data_a.next=i_data_a+i_data_b
o_data_b.next=i_data_a-i_data_b
return bf
def Butterfly21 (i_control_s,i_data_aa,i_data_bb,o_data_aa,o_data_bb):
selfc=Signal(complex(0,0))
selfd=Signal(complex(0,0))
u_bf=Butterfly(i_data_aa,i_data_bb,selfc,selfd)
@always_comb
def bf21():
if i_control_s:
o_data_aa.next=selfd
o_data_bb.next=selfc
else:
o_data_aa.next=i_data_bb
o_data_bb.next=i_data_aa
return instances()
def Butterfly22 (i_control_t,i_control_s,i_data_aa,i_data_bb,o_data_aa,o_data_bb):
selfa=Signal(complex(0,0))
selfb=Signal(complex(0,0))
selfc=Signal(complex(0,0))
selfd=Signal(complex(0,0))
u_bf=Butterfly(selfa,selfb,selfc,selfd)
@always_comb
def bf22():
if i_control_s==False:
selfa.next=i_data_aa
selfb.next=i_data_bb
o_data_aa.next=i_data_bb
o_data_bb.next=i_data_aa
else:
selfa.next=i_data_aa
if i_control_t:
selfb.next=i_data_bb
else:
selfb.next=i_data_bb*complex(0,-1)
o_data_aa.next=selfd
o_data_bb.next=selfc
return instances()
def stage(i_data,reset,clock,o_data,counter_pin,index,N=1,FFT=16):
fifo1=[Signal(complex(0,0)) for ii in range(int(2**(2*N-1)))]
fifo2=[Signal(complex(0,0)) for ii in range(int(2**(2*N-2)))]
a=Signal(complex(0,0))
b=Signal(complex(0,0))
c=Signal(complex(0,0))
d=Signal(complex(0,0))
counter_s=Signal(0)
counter_t=Signal(0)
#counter_tw=Signal(modbv(0,0,FFT))
@always_comb
def control_muxes():
counter_s.next=counter_pin.next[2*(N-1)+1]
counter_t.next=counter_pin.next[2*(N-1)]
u_bf22=Butterfly22(counter_s,counter_t,fifo2[len(fifo2)-1],b,c,d)
u_bf21=Butterfly21(counter_s,fifo1[len(fifo1)-1],i_data,a,b)
@always (clock.posedge,reset)
def fifos():
if reset==False:
#print b,o_data,control_t,control_s
for i in range(len(fifo1)):
fifo1[i].next = a if i==0 else fifo1[i-1]
for i in range(len(fifo2)):
fifo2[i].next = c if i==0 else fifo2[i-1]
@always_comb
def out_twiddle():
counter_tw=mod(counter_pin+(2**(2*(N-1))),FFT)
if (N!=1):
o_data.next=d*(e**(complex(0,-2*pi*index[counter_tw]/(1.0*FFT))))
else:
o_data.next=d
@always (clock.negedge)
def print_values():
if (N==1):
counter_tw=mod(counter_pin+6,FFT)
#print N,counter_pin,counter_s,counter_t,i_data,b,d,index[counter_tw],'counter_tw: ',counter_tw
return instances()
def r22sdf_top(i_data,reset,clock,o_data,N=1):
FFT=2**(2*N)
counter=Signal(modbv(0,0,2**(N*2)))
stage_data_in_wire=[Signal(complex(0,0)) for ii in range(N)]
stage_data_out_wire=[Signal(complex(0,0)) for ii in range(N)]
butterflies=[None for i in range(N)]
index=twiddle_calc(2**(2*N))
@always(clock.posedge,reset)
def counter_seq():
if reset==True:
counter.next=0
else:
counter.next=counter+1
#print counter,stage_data_in_wire[0],stage_data_out_wire[0]
for i in range(N):
if i==0:
stage_data_in_wire[i]=i_data
else:
stage_data_in_wire[i]=stage_data_out_wire[i-1]
if i==(N-1):
stage_data_out_wire[i]=o_data
butterflies[i]=stage(stage_data_in_wire[i],reset,clock,stage_data_out_wire[i],counter,[0]*FFT,1,FFT)
else:butterflies[i]=stage(stage_data_in_wire[i],reset,clock,stage_data_out_wire[i],counter,index[i],N-i,FFT)
return instances()
def twiddle_calc(N=16):
k=range(int(ceil((log2(N)/log2(4)))-1))
a=[range(N/2**(2*i)) for i in k]
t=[]
for j in k:
m=(N/2**(2+2*j))
p=[]
p=p+([0 for i in range(m)])
p=p+([2*(2**(2*j))*(i-m) for i in range(m,2*m)])
p=p+([(2**(2*j))*(i-2*m) for i in range(2*m,3*m)])
p=p+([(3*2**(2*j))*(i-3*m) for i in range(3*m,4*m)])
if j>0:
t.append(p*(j*4))
else:
t.append(p)
return t if k else [0]*N
NB_TW=8
FFT=16
p=twiddle_calc(FFT)
tw_fid=open('twiddles_r22sdf_{}.v'.format(FFT),'w')
tw_fid.write('wire signed [NB_TW-1:0] twiddle_real [{}-1:0][{}-1:0];\n'.format(FFT,len(p)))
tw_fid.write('wire signed [NB_TW-1:0] twiddle_imag [{}-1:0][{}-1:0];\n'.format(FFT,len(p)))
for i in range(len(p)):
for j in range(len(p[i])):
var=(e**(complex(0,-2*pi*p[i][j]/(FFT))))
tw_fid.write( "assign twiddle_real[{}][{}]={};\n".format(j,i,int(real(var)*(2**NB_TW-1)/2)))
tw_fid.write( "assign twiddle_imag[{}][{}]={};\n".format(j,i,int(imag(var)*(2**NB_TW-1)/2)))
tw_fid.close()
p=twiddle_calc(64)
| {"/r22sdf_tb.py": ["/r22sdf.py", "/test_r22sdf.py"]} |
56,516 | ataddei/r22sdf | refs/heads/master | /r22sdf_tb.py | import myhdl
from myhdl import *
from math import *
from numpy import *
from r22sdf import *
import unittest
from random import *
from test_r22sdf import gen_bitreverse
N_LOG2 = 4
N=2**N_LOG2
TF_WDTH = 8
DIN_WDTH = 8
META_WDTH = 1
DOUT_WDTH = 11
def gen_check(stim):
fft_reference = (fft.fft( [ complex(i[0],i[1]) for i in stim] ) )
fft_reference_r = [ fft_reference[i] for i in gen_bitreverse(int(log(N)/log(2)))]
out_re=tuple([ int ( real(i).round())/N for i in fft_reference_r])
out_im=tuple([ int ( imag(i).round())/N for i in fft_reference_r])
return out_re,out_im
impulse = (2**DIN_WDTH-1)/2
zero = 0
stim = [[impulse]*2 if i in range(6) else [zero]*2 for i in range(2**N_LOG2) ]
stim_re = tuple([i[0] for i in stim])
stim_im = tuple([i[1] for i in stim])
t_check_re,t_check_im = gen_check(stim)
def tb():
clk = Signal(bool(0))
reset = Signal(bool(0))
din_meta = Signal(intbv(0)[META_WDTH:])
din_re = Signal(intbv(-1)[ DIN_WDTH:])
din_im = Signal(intbv(-1)[ DIN_WDTH:])
din_nd = Signal(bool(1))
dout_meta = Signal(intbv(0)[META_WDTH:])
dout_re = Signal(intbv(0)[DOUT_WDTH:])
dout_im = Signal(intbv(0)[DOUT_WDTH:])
check_re = Signal(intbv(-1)[DOUT_WDTH:])
check_im = Signal(intbv(-1)[DOUT_WDTH:])
dout_nd = Signal(bool(1))
stim_counter = Signal(modbv(0,0,N))
@instance
def tbclk():
clk.next = False
while True:
yield delay(3)
clk.next = not clk
@instance
def stimulus():
reset.next = True
for i in range(2**N_LOG2):
yield delay(1)
yield clk.posedge
reset.next=False
@always(clk.posedge)
def stim():
if (reset==False):
stim_counter.next = stim_counter+1
stim_counter_aux = stim_counter+N-1
din_re.next[:] = stim_re[stim_counter]
din_im.next[:] = stim_im[stim_counter]
check_re.next[:] = t_check_re[stim_counter]
check_im.next[:] = t_check_im[stim_counter]
else:
stim_counter.next=0
return instances()
toVerilog(tb)
| {"/r22sdf_tb.py": ["/r22sdf.py", "/test_r22sdf.py"]} |
56,517 | ataddei/r22sdf | refs/heads/master | /test_r22sdf.py | import myhdl
from myhdl import *
from math import *
from numpy import *
from r22sdf import *
import unittest
from random import *
import pdb
Ntest = 0
class TestDefault(unittest.TestCase):
def setUp(self):
self.N=4
self.n_bf=1
self.latency=self.N-1
self.collect=[]
self.a=Signal(complex(0,0))
self.d=Signal(complex(0,0))
self.reset=ResetSignal(1,1,False)
self.clock = Signal(bool(0))
self.uut = r22sdf_top(self.a,self.reset,self.clock,self.d,N=1)
self.gg=self.input_generator()
def tearDown(self):
self.N =[]
self.latency =[]
self.collect =[]
self.a =[]
self.d =[]
self.reset =[]
self.clock =[]
self.uut =[]
self.gg =[]
def input_generator(self):
self.inputs=[complex(1,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
def runTest(self):
"""Verifies butterfly r2^2 functional behavior as a serial FFT N={}""".format(self.N)
global Ntest
def _test():
#self.uut = r22sdf_top(self.a,self.reset,self.clock,self.d,N=1)
uut = r22sdf_top(self.a,self.reset,self.clock,self.d,N=self.n_bf)
@always(delay(1))
def clkgen():
self.clock.next = not self.clock
@instance
def stimulus():
self.reset.next = True
self.a.next=next(self.gg)
for i in range(self.N):
yield delay(1)
yield delay(1)
self.reset.next=False
'''Driving stimulus in positive cycle and strobing in negative cycle to avoid race coinditions'''
while True:
yield delay(1)
@always(self.clock.posedge)
def stim():
if (self.reset==False):
self.a.next= next(self.gg)
@always(self.clock.negedge)
def strobe():
if (self.reset==False):
self.collect.append(self.d.val)
return uut, clkgen, stimulus, stim, strobe
traceSignals.name = "test_r22sdf_{}".format(Ntest)
trc = traceSignals(_test)
sim=Simulation(trc)
sim.run(self.N**2)
Ntest += 1
# hack?? not sure why this lock exists or if it should
myhdl._simulator._tracing = 0
self.check()
def check(self):
'''Checks sorted values'''
self.fft_reference = (fft.fft(self.inputs[0:self.N]))
self.fft_test = self.collect[self.latency-1:self.latency-1+self.N]
self.fft_test_o = [ self.fft_test[i] for i in gen_bitreverse(int(log(self.N)/log(2)))]
self.fft_reference_r = [complex128(i).round(8) for i in self.fft_reference]
self.fft_test_r = [complex128(i).round(8) for i in self.fft_test_o]
self.assertListEqual(self.fft_reference_r,self.fft_test_r)
class TestDefaultRandomReal(TestDefault):
def input_generator(self):
self.inputs=[complex128(complex(random(),0.0)).round(8) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultRandom(TestDefault):
def input_generator(self):
self.inputs=[complex128(complex(round(random()-0.5,5),round(random()-0.5,5))).round(8) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultZero(TestDefault):
def input_generator(self):
self.inputs=[complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultImpulse0(TestDefault):
def input_generator(self):
self.inputs=[(complex(random(),random())) if i==0 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultImpulse1(TestDefault):
def input_generator(self):
self.inputs=[complex(random(),random()) if i==1 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultImpulse2(TestDefault):
def input_generator(self):
self.inputs=[complex(1,0) if i==2 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultImpulse3(TestDefault):
def input_generator(self):
self.inputs=[complex(random(),0) if i==3 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT (TestDefault):
def setUp(self):
self.N=16
self.n_bf=2
self.latency=self.N-1
self.collect=[]
self.a=Signal(complex(0,0))
self.d=Signal(complex(0,0))
self.reset=ResetSignal(1,1,False)
self.clock = Signal(bool(0))
self.uut = r22sdf_top(self.a,self.reset,self.clock,self.d,N=2)
self.gg=self.input_generator()
def tearDown(self):
self.N =[]
self.latency =[]
self.collect =[]
self.a =[]
self.d =[]
self.reset =[]
self.clock =[]
self.uut =[]
self.gg =[]
def input_generator(self):
self.inputs=[complex(1,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTZero(TestDefaultFFT):
def input_generator(self):
self.inputs=[complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTImpulse0(TestDefaultFFT):
def input_generator(self):
self.inputs=[(complex(random(),random())) if i==0 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTImpulse1(TestDefaultFFT):
def input_generator(self):
self.inputs=[complex(random(),random()) if i==1 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTImpulse2(TestDefaultFFT):
def input_generator(self):
self.inputs=[complex(1,0) if i==2 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTImpulse3(TestDefaultFFT):
def input_generator(self):
self.inputs=[complex(random(),0) if i==3 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTImpulse3(TestDefaultFFT):
def input_generator(self):
self.inputs=[complex(random(),0) if i==3 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTImpulseRandom(TestDefaultFFT):
def input_generator(self):
imp=[randint(0,self.N-1), randint(0,self.N-1) ]
self.inputs=[complex128(complex(1,0)) if i in imp else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTRandom(TestDefaultFFT):
def input_generator(self):
self.inputs=[complex128(complex(round(random()-0.5,5),round(random()-0.5,5))).round(8) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTSaw(TestDefaultFFT):
def input_generator(self):
self.inputs=[complex128(complex(i,i)).round(8) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFTRandomImpulseSweep(TestDefaultFFTImpulseRandom):
def setUp(self,idx=0):
self.N=16
self.n_bf=2
self.latency=self.N-1
self.collect=[]
self.a=Signal(complex(0,0))
self.d=Signal(complex(0,0))
self.reset=ResetSignal(1,1,False)
self.clock = Signal(bool(0))
self.uut = r22sdf_top(self.a,self.reset,self.clock,self.d,N=2)
self.idx=idx
self.inputs=[complex(1,0) if i == idx else complex(0,0) for i in range(self.N)]*2
self.gg=self.input_generator()
def input_generator(self):
for i in self.inputs:
yield i
def runTest(self):
self.setUp()
self.count=[]
for i in range(16):
try:
self.setUp(i)
super(TestDefaultFFTRandomImpulseSweep,self).runTest()
self.tearDown()
except Exception as e:
#print e,"Exception Failed in: ",self.idx
self.count.append(i)
if len(self.count)>0:
print "Failing impulses: ",self.count
raise self.failureException
# creating a new test suite
FFT16Suite = unittest.TestSuite()
# adding a test case
FFT16Suite.addTest(unittest.makeSuite(TestDefaultFFT))
FFT16Suite.addTest(unittest.makeSuite(TestDefaultFFTImpulse0))
FFT16Suite.addTest(unittest.makeSuite(TestDefaultFFTImpulse1))
FFT16Suite.addTest(unittest.makeSuite(TestDefaultFFTImpulse2))
FFT16Suite.addTest(unittest.makeSuite(TestDefaultFFTImpulse3))
FFT16Suite.addTest(unittest.makeSuite(TestDefaultFFTImpulseRandom))
FFT16Suite.addTest(unittest.makeSuite(TestDefaultFFTRandom))
FFT16Suite.addTest(unittest.makeSuite(TestDefaultFFTZero))
class TestDefaultFFT64 (TestDefaultFFTImpulse0):
def setUp(self):
self.N=64
self.n_bf=3
self.latency=self.N-1
self.collect=[]
self.a=Signal(complex(0,0))
self.d=Signal(complex(0,0))
self.reset=ResetSignal(1,1,False)
self.clock = Signal(bool(0))
self.uut = r22sdf_top(self.a,self.reset,self.clock,self.d,N=3)
self.gg=self.input_generator()
def tearDown(self):
self.N =[]
self.latency =[]
self.collect =[]
self.a =[]
self.d =[]
self.reset =[]
self.clock =[]
self.uut =[]
self.gg =[]
## def input_generator(self):
## self.inputs=[complex(1,0) for i in range(self.N)]*4
## for i in self.inputs:
## yield i
class TestDefaultFFT64Zero(TestDefaultFFT64):
def input_generator(self):
self.inputs=[complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64Impulse0(TestDefaultFFT64):
def input_generator(self):
self.inputs=[(complex(random(),random())) if i==0 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64Impulse1(TestDefaultFFT64):
def input_generator(self):
self.inputs=[complex(random(),random()) if i==1 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64Impulse2(TestDefaultFFT64):
def input_generator(self):
self.inputs=[complex(1,0) if i==2 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64Impulse3(TestDefaultFFT64):
def input_generator(self):
self.inputs=[complex(random(),0) if i==3 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64Impulse3(TestDefaultFFT64):
def input_generator(self):
self.inputs=[complex(random(),0) if i==3 else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64ImpulseRandom(TestDefaultFFT64):
def input_generator(self):
imp=[randint(0,self.N-1), randint(0,self.N-1) ]
self.inputs=[complex128(complex(1,0)) if i in imp else complex(0,0) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64Random(TestDefaultFFT64):
def input_generator(self):
self.inputs=[complex128(complex(round(random()-0.5,5),round(random()-0.5,5))).round(8) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64Saw(TestDefaultFFT64):
def input_generator(self):
self.inputs=[complex128(complex(i,i)).round(8) for i in range(self.N)]*2
for i in self.inputs:
yield i
class TestDefaultFFT64RandomImpulseSweep(TestDefaultFFT64ImpulseRandom):
def setUp(self,idx=0):
self.N=16
self.n_bf=2
self.latency=self.N-1
self.collect=[]
self.a=Signal(complex(0,0))
self.d=Signal(complex(0,0))
self.reset=ResetSignal(1,1,False)
self.clock = Signal(bool(0))
self.uut = r22sdf_top(self.a,self.reset,self.clock,self.d,N=2)
self.idx=idx
self.inputs=[complex(1,0) if i == idx else complex(0,0) for i in range(self.N)]*2
self.gg=self.input_generator()
def input_generator(self):
for i in self.inputs:
yield i
def runTest(self):
self.setUp()
self.count=[]
for i in range(16):
try:
self.setUp(i)
super(TestDefaultFFT64RandomImpulseSweep,self).runTest()
self.tearDown()
except Exception as e:
#print e,"Exception Failed in: ",self.idx
self.count.append(i)
if len(self.count)>0:
print "Failing impulses: ",self.count
raise self.failureException
def gen_bitreverse(N=4):
a=array([0])
for i in range(N):
a=array(list(a*2)*2)+array([0]*len(a)+[1]*len(a))
return list(a)
if __name__ == '__main__':
unittest.main()
#comment for merging again cfelton-master
| {"/r22sdf_tb.py": ["/r22sdf.py", "/test_r22sdf.py"]} |
56,520 | edv862/djangonto | refs/heads/master | /apps/rdf_manager/migrations/0002_auto_20180531_1756.py | # Generated by Django 2.0.5 on 2018-05-31 17:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('rdf_manager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='ontology',
name='namespaces',
field=models.ManyToManyField(blank=True, related_name='ontology_namespaces', to='rdf_manager.NameSpace'),
),
migrations.AlterField(
model_name='ontology',
name='ontology_files',
field=models.ManyToManyField(blank=True, to='rdf_manager.OntoFile'),
),
]
| {"/apps/rdf_manager/admin.py": ["/apps/rdf_manager/models.py"]} |
56,521 | edv862/djangonto | refs/heads/master | /apps/rdf_manager/apps.py | from django.apps import AppConfig
class RdfManagerConfig(AppConfig):
name = 'rdf_manager'
| {"/apps/rdf_manager/admin.py": ["/apps/rdf_manager/models.py"]} |
56,522 | edv862/djangonto | refs/heads/master | /apps/rdf_manager/admin.py | from django.contrib import admin
from .models import NameSpace, Ontology, OntoFile
admin.site.register(NameSpace)
admin.site.register(Ontology)
admin.site.register(OntoFile)
| {"/apps/rdf_manager/admin.py": ["/apps/rdf_manager/models.py"]} |
56,523 | edv862/djangonto | refs/heads/master | /graph.py | import rdflib
from rdflib.namespace import Namespace, NamespaceManager
import pprint
graph = rdflib.Graph()
graph.parse('912-onto-ontologies/912-onto/root-ontology.owl')
#graph.parse('n3-ontology.rdf', format='n3')
# aux = graph.subject_objects(
# predicate=rdflib.term.URIRef('http://www.w3.org/2000/01/rdf-schema#label')
# )
#for s,p,o in graph:
# print(s,p,o)
# Adding custom namespaces for imported ontologies.
# TODO: Generalize to allow registering them from a py file.
ssn_namespace = Namespace('http://www.w3.org/ns/ssn/')
sosa_namespace = Namespace('http://www.w3.org/ns/sosa/')
mssn_namespace = Namespace('http://mssn.sigappfr.org/mssn/')
emergency_namespace = Namespace('http://webprotege.stanford.edu/')
graph.bind('ssn', ssn_namespace)
graph.bind('sosa', sosa_namespace)
graph.bind('mssn', mssn_namespace)
graph.bind('911-onto', emergency_namespace)
# all_ns = [n for n in graph.namespace_manager.namespaces()]
# for x in all_ns:
# print(x)
#aux = graph.value(
# subject=rdflib.term.URIRef('http://webprotege.stanford.edu/RDWcD1WossrmXH0BeGQAE8d'),
# predicate=rdflib.term.URIRef('http://mssn.sigappfr.org/mssn/above'),
#)
# print(graph.namespace_manager.normalizeUri(aux))
# Normalizing data.
# for x in graph.objects():
# try:
# print(graph.namespace_manager.normalizeUri(x))
# except:
# print("No se pudo normalizar.")
#aux = graph.value(
# subject=rdflib.term.URIRef('http://webprotege.stanford.edu/RDWcD1WossrmXH0BeGQAE8d'),
# predicate=rdflib.term.URIRef('http://mssn.sigappfr.org/mssn/above'),
#)
print(graph.serialize(format='xml').decode('utf8'))
| {"/apps/rdf_manager/admin.py": ["/apps/rdf_manager/models.py"]} |
56,524 | edv862/djangonto | refs/heads/master | /apps/rdf_manager/models.py | from django.db import models
class NameSpace(models.Model):
name = models.CharField(max_length=30)
uri = models.CharField(max_length=50)
def __str__(self):
return self.name
class Ontology(NameSpace):
namespaces = models.ManyToManyField(
'Namespace',
related_name='ontology_namespaces',
blank=True,
)
ontology_files = models.ManyToManyField('OntoFile', blank=True)
class Meta:
verbose_name_plural = "Ontologies"
class OntoFile(models.Model):
file = models.FileField()
| {"/apps/rdf_manager/admin.py": ["/apps/rdf_manager/models.py"]} |
56,525 | edv862/djangonto | refs/heads/master | /apps/rdf_manager/rdf_functions.py | import rdflib
from rdflib.namespace import Namespace, NamespaceManager
def load_graph(fname):
# Create and load Graph from file
graph = rdflib.Graph()
graph.parse('912-onto-ontologies/912-onto/root-ontology.owl')
# Define & Bind custom ontologies namespaces to the graph
ssn_namespace = Namespace('http://www.w3.org/ns/ssn/')
sosa_namespace = Namespace('http://www.w3.org/ns/sosa/')
mssn_namespace = Namespace('http://mssn.sigappfr.org/mssn/')
emergency_namespace = Namespace('http://webprotege.stanford.edu/')
graph.bind('ssn', ssn_namespace)
graph.bind('sosa', sosa_namespace)
graph.bind('mssn', mssn_namespace)
graph.bind('911-onto', emergency_namespace)
return graph
| {"/apps/rdf_manager/admin.py": ["/apps/rdf_manager/models.py"]} |
56,528 | chawallid/UCF101FewShot | refs/heads/master | /test.py | import torch
import os
import sys
import argparse
import torch.nn.functional as F
from models import R2Plus1D, Resnet
from torch.utils.data import DataLoader
from torch.autograd import Variable
from UCF101 import UCF101, CategoriesSampler
from utils import printer, mean_confidence_interval
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frames-path", type=str, default="../Data/UCF101_frames/")
parser.add_argument("--labels-path", type=str, default="./UCF101_few_shot_labels/")
parser.add_argument("--load-path", type=str, default="./save/train1")
parser.add_argument("--use-best", action="store_true")
parser.add_argument("--frame-size", type=int, default=112)
parser.add_argument("--num-epochs", type=int, default=10)
parser.add_argument("--test-iter-size", type=int, default=600)
parser.add_argument("--metric", type=str, default="cosine")
parser.add_argument("--sequence-length", type=int, default=35)
parser.add_argument("--num-layers", type=int, default=1)
parser.add_argument("--hidden-size", type=int, default=512)
parser.add_argument("--bidirectional", action="store_true")
parser.add_argument("--model", type=str, default='resnet')
parser.add_argument("--way", type=int, default=5)
parser.add_argument("--shot", type=int, default=1)
parser.add_argument("--query", type=int, default=5)
args = parser.parse_args()
# check options
assert args.model in ["resnet", "r2plus1d"], "'{}' model is invalid".format(setname)
assert args.metric in ["cosine", "euclidean", "relation"], "'{}' metric is invalid.".format(args.metric)
if args.use_best:
load_path = os.path.join(args.load_path, "best.pth")
else:
load_path = os.path.join(args.load_path, "last.pth")
# load_path check
assert os.path.exists(load_path), "'{}' file is not exists !!".format(load_path)
test_dataset = UCF101(
model=args.model,
frames_path=args.frames_path,
labels_path=args.labels_path,
frame_size=args.frame_size,
sequence_length=args.sequence_length,
setname='test',
# pad option
random_pad_sample=False,
pad_option='default',
# frame sampler option
uniform_frame_sample=True,
random_start_position=False,
max_interval=7,
random_interval=False,
)
print("[test] number of videos / classes: {} / {}".format(len(test_dataset), test_dataset.num_classes))
print("total testing episodes: {}".format(args.num_epochs * args.test_iter_size))
test_sampler = CategoriesSampler(test_dataset.classes, args.test_iter_size, args.way, args.shot, args.query)
# in windows has some issue when try to use DataLoader in pytorch, i don't know why..
test_loader = DataLoader(dataset=test_dataset, batch_sampler=test_sampler, num_workers=0 if os.name == 'nt' else 4, pin_memory=True)
if args.model == 'resnet':
model = Resnet(
way=args.way,
shot=args.shot,
query=args.query,
num_layers=args.num_layers,
hidden_size=args.hidden_size,
bidirectional=args.bidirectional,
)
if args.model == 'r2plus1d':
model = R2Plus1D(
way=args.way,
shot=args.shot,
query=args.query,
metric=args.metric,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
model.load_state_dict(torch.load(load_path))
model.eval()
total_loss = 0
epoch_acc = 0
total_acc = []
print("test... {}-way {}-shot {}-query".format(args.way, args.shot, args.query))
with torch.no_grad():
for e in range(1, args.num_epochs+1):
test_acc = []
test_loss = []
for i, (datas, _) in enumerate(test_loader):
datas = datas.to(device)
pivot = args.way * args.shot
shot, query = datas[:pivot], datas[pivot:]
labels = torch.arange(args.way).repeat(args.query).to(device)
# one_hot_labels = Variable(torch.zeros(args.way*args.query, args.way).scatter_(1, labels.view(-1, 1), 1)).to(device)
pred = model(shot, query)
# calculate loss
loss = F.cross_entropy(pred, labels).item()
test_loss.append(loss)
total_loss = sum(test_loss)/len(test_loss)
# calculate accuracy
acc = 100 * (pred.argmax(1) == labels).type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor).mean().item()
test_acc.append(acc)
total_acc.append(acc)
epoch_acc = sum(test_acc)/len(test_acc)
printer("test", e, args.num_epochs, i+1, len(test_loader), loss, total_loss, acc, epoch_acc)
# get mean confidence interval per epochs
m, h = mean_confidence_interval(test_acc, confidence=0.95)
print(" => {} episodes [{:.2f} +-{:.2f}]".format(args.test_iter_size, m, h))
# get total mean confidence interval
m, h = mean_confidence_interval(total_acc, confidence=0.95)
print("total {} episodes Result: {:.2f}+-{:.2f}".format(args.num_epochs * args.test_iter_size, m, h)) | {"/test.py": ["/models.py", "/utils.py"], "/models.py": ["/utils.py"], "/train.py": ["/utils.py", "/models.py"]} |
56,529 | chawallid/UCF101FewShot | refs/heads/master | /models.py | import torch
import torch.nn as nn
import torch.nn.functional as F
from torchvision.models import resnet18, resnet50
from torchvision.models.video import r2plus1d_18
from torchvision.models.video.resnet import BasicBlock, Conv2Plus1D
from utils import freeze_all, freeze_layer, freeze_bn, initialize_linear, initialize_3d
# torch.backends.cudnn.enabled = False
class R2Plus1D(nn.Module):
def __init__(self, way=5, shot=1, query=5, metric="cosine"):
super(R2Plus1D, self).__init__()
self.way = way
self.shot = shot
self.query = query
self.metric = metric
# r2plus1d_18
model = r2plus1d_18(pretrained=True)
# encoder(freezing)
self.encoder_freeze = nn.Sequential(
model.stem,
model.layer1,
model.layer2,
model.layer3,
)
self.encoder_freeze.apply(freeze_all)
# encoder(for cosine similarity)
if self.metric == "cosine" or self.metric == "euclidean":
self.encoder_tune = nn.Sequential(
model.layer4,
nn.AdaptiveAvgPool3d(output_size=(1, 1, 1)),
)
# ralation module
if self.metric == "relation":
self.relation1 = nn.Sequential(
BasicBlock(512, 256, Conv2Plus1D, stride=2, downsample=self._downsample(512, 256)),
nn.AdaptiveAvgPool3d(output_size=(1, 1, 1)),
)
self.relation2 = nn.Sequential(
nn.Linear(256, 128),
nn.Softplus(),
nn.Linear(128, 1),
# torch.sigmoid(),
)
self.relation2.apply(initialize_linear)
# scaler
self.scaler = nn.Parameter(torch.tensor(5.0))
def _downsample(self, inplanes, outplanes):
return nn.Sequential(
nn.Conv3d(inplanes, outplanes, kernel_size=1, stride=2, bias=False),
nn.BatchNorm3d(outplanes),
)
def forward(self, shot, query):
x = torch.cat((shot, query), dim=0)
# encoder
x = x.transpose(1, 2).contiguous() # b, c, d, h, w
x = self.encoder_freeze(x)
if self.metric == "cosine" or self.metric == "euclidean":
x = self.encoder_tune(x).squeeze()
shot, query = x[:shot.size(0)], x[shot.size(0):]
# make prototype
shot = shot.reshape(self.shot, self.way, -1).mean(dim=0)
# euclidean distance
if self.metric == "euclidean":
shot = shot.unsqueeze(0).repeat(self.way*self.query, 1, 1)
query = query.unsqueeze(1).repeat(1, self.way, 1)
logits = -((shot - query)**2).sum(dim=-1)
# cosine similarity
if self.metric == "cosine":
shot = F.normalize(shot, dim=-1)
query = F.normalize(query, dim=-1)
logits = torch.mm(query, shot.t())
if self.metric == "relation":
# b, c, d, h, w
shot, query = x[:shot.size(0)], x[shot.size(0):]
shot = shot.reshape([self.shot, self.way] + list(shot.size()[1:])).sum(dim=0)
# q, s(way), c, d, h, w
# shot shot shot
# query ---o-----x-----x--
# query ---x-----o-----x--
# query ---x-----x-----o--
# change shot shape
shot = shot.unsqueeze(0).repeat(self.way*self.query, 1, 1, 1, 1, 1)
shot = shot.reshape([-1] + list(shot.size()[2:]))
# change query shape
query = query.unsqueeze(1).repeat(1, self.way, 1, 1, 1, 1)
query = query.reshape([-1] + list(query.size()[2:]))
relation_pair = torch.cat((shot, query), dim=1) # relation pair (cat by channels)
logits = self.relation1(relation_pair).squeeze()
logits = self.relation2(logits)
logits = logits.reshape(self.way*self.query, self.way)
return logits * self.scaler
class Resnet(nn.Module):
def __init__(self, way=5, shot=1, query=5, hidden_size=512, num_layers=1, bidirectional=True):
super(Resnet, self).__init__()
self.way = way
self.shot = shot
self.query = query
# resnet18(freezing)
model = resnet18(pretrained=True)
self.encoder_freeze = nn.Sequential(
model.conv1,
model.bn1,
model.relu,
model.maxpool,
model.layer1,
model.layer2,
model.layer3,
model.layer4,
model.avgpool,
)
self.encoder_freeze.apply(freeze_all)
self.last_dim = model.fc.in_features
# gru
self.gru = nn.GRU(input_size=self.last_dim, hidden_size=hidden_size, batch_first=True, num_layers=num_layers, dropout=0.5 if num_layers > 1 else 0, bidirectional=bidirectional)
# linear
self.linear = nn.Linear(int(hidden_size*2) if bidirectional else hidden_size, hidden_size)
self.linear.apply(initialize_linear)
# scaler
self.scaler = nn.Parameter(torch.tensor(5.0))
def forward(self, shot, query):
x = torch.cat((shot, query), dim=0)
b, d, c, h, w = x.shape
# encoder
x = x.view(b * d, c, h, w)
x = self.encoder_freeze(x)
# gru
x = x.view(b, d, self.last_dim)
x = (self.gru(x)[0]).mean(1) # this may be helful for generalization
# linear
x = self.linear(x)
shot, query = x[:shot.size(0)], x[shot.size(0):]
# make prototype
shot = shot.reshape(self.shot, self.way, -1).mean(dim=0)
# cosine similarity
shot = F.normalize(shot, dim=-1)
query = F.normalize(query, dim=-1)
logits = torch.mm(query, shot.t())
return logits * self.scaler | {"/test.py": ["/models.py", "/utils.py"], "/models.py": ["/utils.py"], "/train.py": ["/utils.py", "/models.py"]} |
56,530 | chawallid/UCF101FewShot | refs/heads/master | /few_sequence_detector.py | import os
from glob import glob
frames_path = "../Data/UCF101/UCF101_frames/"
hit_sequence_length = 35
frames_path_list = glob(os.path.join(frames_path, "*"))
# check total videos(from frames path)
print("total videos: {}".format(len(frames_path_list)))
counter = 0
for frame_path in frames_path_list:
sequence_length = len(glob(os.path.join(frame_path, "*")))
if hit_sequence_length > sequence_length:
print("hit ! {}, sequence length: {}".format(frame_path, sequence_length))
counter += 1
print("total {} videos has short sequence rather than {}".format(counter, hit_sequence_length)) | {"/test.py": ["/models.py", "/utils.py"], "/models.py": ["/utils.py"], "/train.py": ["/utils.py", "/models.py"]} |
56,531 | chawallid/UCF101FewShot | refs/heads/master | /splitter.py | import os
import glob
import numpy as np
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--frames-path", type=str, default="../UCF101FrameExtractor/UCF101_frames/")
parser.add_argument("--save-path", type=str, default="./UCF101_few_shot_labels/")
parser.add_argument("--categories", type=str, default="./categories.txt")
parser.add_argument("--number-of-train", type=int, default=71)
parser.add_argument("--number-of-test", type=int, default=30)
args = parser.parse_args()
# check frames path
assert os.path.exists(args.frames_path), "'{}' path does not exist.".format(args.frames_path)
# check save directory
assert os.path.exists(args.save_path) == False, "'{}' directory is alreay exist !!".format(args.frames_path)
os.makedirs(args.save_path)
with open(args.categories) as f:
categories = f.read().splitlines()
categories = np.random.permutation(categories)
video_names = []
for d in glob.glob(args.frames_path + "*"):
if os.path.isdir(d):
video_names.append(d.split("\\" if os.name == 'nt' else "/")[-1])
video_names = pd.DataFrame(video_names)
# save train labels
with open(os.path.join(args.save_path, "train.csv"), 'w') as f:
first = True
for i, c in enumerate(categories[:args.number_of_train]):
print("writing... {} ".format(c))
lines = np.concatenate(video_names[video_names[0].str.contains("_" + c + "_")].values.tolist(), axis=0)
for line in lines:
f.write(str(i+1) + ',' + line) if first else f.write('\n' + str(i+1) + ',' + line)
first = False
# save test labels
with open(os.path.join(args.save_path, "test.csv"), 'w') as f:
first = True
for i, c in enumerate(categories[args.number_of_train:]):
print("writing... {} ".format(c))
lines = np.concatenate(video_names[video_names[0].str.contains("_" + c + "_")].values.tolist(), axis=0)
for line in lines:
f.write(str(i+1) + ',' + line) if first else f.write('\n' + str(i+1) + ',' + line)
first = False | {"/test.py": ["/models.py", "/utils.py"], "/models.py": ["/utils.py"], "/train.py": ["/utils.py", "/models.py"]} |
56,532 | chawallid/UCF101FewShot | refs/heads/master | /train.py | import torch
import os
import sys
import argparse
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torch.utils.tensorboard import SummaryWriter
from torch.autograd import Variable
from utils import path_check, args_print_save, printer
from models import R2Plus1D, Resnet
from UCF101 import UCF101, CategoriesSampler
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--frames-path", type=str, default="../Data/UCF101/UCF101_frames/")
parser.add_argument("--labels-path", type=str, default="./UCF101_few_shot_labels/")
parser.add_argument("--save-path", type=str, default="./save/train1/")
parser.add_argument("--tensorboard-path", type=str, default="./tensorboard/train1")
parser.add_argument("--frame-size", type=int, default=112)
parser.add_argument("--num-epochs", type=int, default=30)
parser.add_argument("--train-iter-size", type=int, default=100)
parser.add_argument("--val-iter-size", type=int, default=200)
parser.add_argument("--metric", type=str, default="cosine")
# ===========================UCF101.py options==================================
# pad options
parser.add_argument("--random-pad-sample", action="store_true")
parser.add_argument("--pad-option", type=str, default="default")
# frame options
parser.add_argument("--uniform-frame-sample", action="store_true")
parser.add_argument("--random-start-position", action="store_true")
parser.add_argument("--max-interval", type=int, default=7)
parser.add_argument("--random-interval", action="store_true")
# ===============================================================================
parser.add_argument("--sequence-length", type=int, default=35)
parser.add_argument("--model", type=str, default="resnet")
parser.add_argument("--num-layers", type=int, default=1)
parser.add_argument("--hidden-size", type=int, default=512)
parser.add_argument("--bidirectional", action="store_true")
parser.add_argument("--learning-rate", type=float, default=1e-4)
parser.add_argument("--scheduler-step-size", type=int, default=10)
parser.add_argument("--scheduler-gamma", type=float, default=0.9)
parser.add_argument("--way", type=int, default=5)
parser.add_argument("--shot", type=int, default=1)
parser.add_argument("--query", type=int, default=5)
args = parser.parse_args()
# check options
assert args.model in ["resnet", "r2plus1d"], "'{}' model is invalid.".format(args.model)
assert args.metric in ["cosine", "euclidean", "relation"], "'{}' metric is invalid.".format(args.metric)
# path to save
path_check(args.save_path)
# path to tensorboard
writer = SummaryWriter(args.tensorboard_path)
# print args and save it in the save_path
args_print_save(args)
train_dataset = UCF101(
model=args.model,
frames_path=args.frames_path,
labels_path=args.labels_path,
frame_size=args.frame_size,
sequence_length=args.sequence_length,
setname='train',
# pad options
random_pad_sample=args.random_pad_sample,
pad_option=args.pad_option,
# frame sample options
uniform_frame_sample=args.uniform_frame_sample,
random_start_position=args.random_start_position,
max_interval=args.max_interval,
random_interval=args.random_interval,
)
# do not use the autoaugment on the validation or test dataset
val_dataset = UCF101(
model=args.model,
frames_path=args.frames_path,
labels_path=args.labels_path,
frame_size=args.frame_size,
sequence_length=args.sequence_length,
setname='test',
# pad options
random_pad_sample=False,
pad_option='default',
# frame sample options
uniform_frame_sample=True,
random_start_position=False,
max_interval=7,
random_interval=False,
)
print("[train] number of videos / classes: {} / {}, [val] number of videos / classes: {} / {}".format(len(train_dataset), train_dataset.num_classes, len(val_dataset), val_dataset.num_classes))
print("total training episodes: {}".format(args.num_epochs * args.train_iter_size))
train_sampler = CategoriesSampler(train_dataset.classes, args.train_iter_size, args.way, args.shot, args.query)
val_sampler = CategoriesSampler(val_dataset.classes, args.val_iter_size, args.way, args.shot, args.query)
# in windows has some issue when try to use DataLoader in pytorch, i don't know why...
train_loader = DataLoader(dataset=train_dataset, batch_sampler=train_sampler, num_workers=0 if os.name == 'nt' else 4, pin_memory=True)
val_loader = DataLoader(dataset=val_dataset, batch_sampler=val_sampler, num_workers=0 if os.name == 'nt' else 4, pin_memory=True)
# select a model, i prepaired two models for simple testing
if args.model == "resnet":
model = Resnet(
way=args.way,
shot=args.shot,
query=args.query,
num_layers=args.num_layers,
hidden_size=args.hidden_size,
bidirectional=args.bidirectional,
)
if args.model == "r2plus1d":
model = R2Plus1D(
way=args.way,
shot=args.shot,
query=args.query,
metric=args.metric,
)
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
model = model.to(device)
optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=0.9, weight_decay=5e-4)
# optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=args.scheduler_step_size, gamma=args.scheduler_gamma)
best = 0 # top1 best accuracy
total_loss = 0
total_acc = 0
n_iter_train = 0
n_iter_val = 0
print("train... {}-way {}-shot {}-query".format(args.way, args.shot, args.query))
for e in range(1, args.num_epochs+1):
train_acc = []
train_loss = []
model.train()
for i, (datas, _) in enumerate(train_loader):
datas = datas.to(device)
pivot = args.way * args.shot
shot, query = datas[:pivot], datas[pivot:]
labels = torch.arange(args.way).repeat(args.query).to(device)
pred = model(shot, query)
# calculate loss
# onehot_labels = Variable(torch.zeros(args.way*args.query, args.way).scatter_(1, torch.arange(args.way).repeat(args.query).view(-1, 1), 1)).to(device)
loss = F.cross_entropy(pred, labels)
# loss = F.mse_loss(pred, onehot_labels)
train_loss.append(loss.item())
total_loss = sum(train_loss)/len(train_loss)
# update weight
optimizer.zero_grad()
loss.backward()
optimizer.step()
# calculate accuracy
acc = (pred.argmax(1) == labels).type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor).mean().item()
train_acc.append(acc)
total_acc = sum(train_acc) / len(train_acc)
# print result
printer("train", e, args.num_epochs, i+1, len(train_loader), loss.item(), total_loss, acc * 100, total_acc * 100)
# tensorboard
writer.add_scalar("Loss/train", loss.item(), n_iter_train)
writer.add_scalar("Accuracy/train", acc, n_iter_train)
n_iter_train += 1
print("")
val_acc = []
val_loss = []
model.eval()
with torch.no_grad():
for i, (datas, _) in enumerate(val_loader):
datas = datas.to(device)
pivot = args.way * args.shot
shot, query = datas[:pivot], datas[pivot:]
labels = torch.arange(args.way).repeat(args.query).to(device)
pred = model(shot, query)
# calculate loss
# onehot_labels = Variable(torch.zeros(args.way*args.query, args.way).scatter_(1, torch.arange(args.way).repeat(args.query).view(-1, 1), 1)).to(device)
loss = F.cross_entropy(pred, labels).item()
# loss = F.mse_loss(pred, onehot_labels).item()
val_loss.append(loss)
total_loss = sum(val_loss)/len(val_loss)
# calculate accuracy
acc = (pred.argmax(1) == labels).type(torch.cuda.FloatTensor if torch.cuda.is_available() else torch.FloatTensor).mean().item()
val_acc.append(acc)
total_acc = sum(val_acc)/len(val_acc)
# print result
printer("val", e, args.num_epochs, i+1, len(val_loader), loss, total_loss, acc * 100, total_acc * 100)
# tensorboard
writer.add_scalar("Loss/val", loss, n_iter_val)
writer.add_scalar("Accuracy/val", acc, n_iter_val)
n_iter_val += 1
if total_acc > best:
best = total_acc
torch.save(model.state_dict(), os.path.join(args.save_path, "best.pth"))
torch.save(model.state_dict(), os.path.join(args.save_path, "last.pth"))
print("Best: {:.2f}%".format(best * 100))
lr_scheduler.step() | {"/test.py": ["/models.py", "/utils.py"], "/models.py": ["/utils.py"], "/train.py": ["/utils.py", "/models.py"]} |
56,533 | chawallid/UCF101FewShot | refs/heads/master | /utils.py | import os
import sys
import shutil
import scipy.stats as stats
import numpy as np
import torch.nn as nn
def printer(status, epoch, num_epochs, batch, num_batchs, loss, loss_mean, acc, acc_mean):
sys.stdout.write("\r[{}]-[Epoch {}/{}] [Batch {}/{}] [Loss: {:.4f} (mean: {:.4f}), Acc: {:.2f}% (mean: {:.2f}%)] ".format(
status,
epoch,
num_epochs,
batch,
num_batchs,
loss,
loss_mean,
acc,
acc_mean
)
)
def path_check(path):
if os.path.exists(path):
while True:
print("'{}' path is already exist, do you want continue after remove ? [y/n]".format(path))
response = input()
if response == 'y' or response == 'n':
break
if response == 'y':
shutil.rmtree(path)
os.makedirs(path)
if response == 'n':
print("this script was terminated by user")
sys.exit()
else:
os.makedirs(path)
def args_print_save(args):
# print
print("=================================================")
[print("{}:{}".format(arg, getattr(args, arg))) for arg in vars(args)]
print("=================================================")
# save
with open(os.path.join(args.save_path, "args.txt"), "w") as f:
f.write("=================================================\n")
[f.write("{}:{}\n".format(arg, getattr(args, arg))) for arg in vars(args)]
f.write("=================================================\n")
def mean_confidence_interval(data, confidence=0.95):
a = 1.0 * np.array(data)
n = len(a)
m, se = np.mean(a), stats.sem(a)
h = se * stats.t.ppf((1 + confidence) / 2., n-1)
return m, h # m +-h
def freeze_all(model):
for param in model.parameters():
param.requires_grad = False
def freeze_layer(model, layer):
n = 0
for module in model.children():
n += 1
if n == num:
freeze_all(module)
def freeze_bn(model):
for module in model.modules():
if isinstance(module, nn.BatchNorm3d) or isinstance(module, nn.BatchNorm2d) :
module.eval()
def initialize_linear(model):
if type(model) == nn.Linear:
nn.init.xavier_uniform_(model.weight)
model.bias.data.fill_(0.01)
def initialize_3d(model):
if type(model) == nn.Conv3d:
nn.init.xavier_uniform_(model.weight)
if model.bias != None:
model.bias.data.fill_(0.01)
if type(model) == nn.BatchNorm3d:
nn.init.constant_(model.weight, 1)
nn.init.constant_(model.bias, 0) | {"/test.py": ["/models.py", "/utils.py"], "/models.py": ["/utils.py"], "/train.py": ["/utils.py", "/models.py"]} |
56,558 | Niektory/Danmaku | refs/heads/master | /scripts/client.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from __future__ import print_function
import time
from config import version
from network import ClientConnection
class Client(object):
def run(self):
print("Welcome to {} {} client!".format(version.game_name, version.version))
with ClientConnection() as connection:
while not connection.connection.closed:
time.sleep(0.05)
while connection.connection.read():
pass
message = raw_input("> ")
while connection.connection.read():
pass
if message == "quit":
return
connection.connection.send(message)
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,559 | Niektory/Danmaku | refs/heads/master | /scripts/incident.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from deck import Deck
class IncidentDeck(Deck):
def __init__(self):
Deck.__init__(self, "Incident Deck")
self.deck = ["crisis of faith", "crossing to Higan", "endless party", "eternal night",
"five impossible requests", "great barrier weakening", "great fairy wars",
"Lily White", "overdrive", "rekindle blazing hell", "Saigyou Ayakashi blooming",
"scarlet weather rhapsody", "spring snow", "undefined fantastic object",
"voyage to Makai", "worldly desires"]
self.shuffle()
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,560 | Niektory/Danmaku | refs/heads/master | /run_server.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from __future__ import print_function
from scripts.error import LogException
if __name__ == '__main__':
with LogException():
from scripts.server import Server
server = Server()
server.run()
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,561 | Niektory/Danmaku | refs/heads/master | /scripts/deck.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
import random
class Deck(object):
def __init__(self, name="deck"):
self.deck = []
self.name = name
def shuffle(self):
random.shuffle(self.deck)
def draw(self):
return self.deck.pop()
def findCard(self, to_find):
for card in self.deck:
if to_find == card.ID:
return card
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,562 | Niektory/Danmaku | refs/heads/master | /scripts/server.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from __future__ import print_function
from config import version
from network import ServerConnections
from gamesession import GameSession
class Server(object):
def run(self):
print("Welcome to {} {} server!".format(version.game_name, version.version))
with ServerConnections() as connections:
game_session = GameSession()
history_processed = 0
message = None
while message != "shutdown":
connections.prune()
connections.accept()
connections.broadcast()
user, message = connections.read()
# list the players
if message == "players":
for player in game_session.players:
connections.message(user, "{} ({} life)".format(player.name, player.life))
# join the game as player
elif message == "join":
if game_session.addPlayer(user):
connections.broadcast("{} joined the game".format(user))
# start the game with the players that joined so far
elif message == "start":
if game_session.start():
connections.broadcast("Starting a {}-player game"
.format(len(game_session.players)))
# perform a game action
elif message.startswith("act:"):
game_session.playerInput(user, message.split(":",1)[1])
# list the cards in hand
elif message == "hand":
if game_session.findPlayer(user):
connections.message(user, game_session.findPlayer(user).hand.deck)
game_session.run()
while history_processed < len(game_session.history):
print("History:", game_session.history[history_processed])
history_processed += 1
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,563 | Niektory/Danmaku | refs/heads/master | /scripts/network.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from __future__ import print_function
import socket
import select
IP = "127.0.0.1"
PORT = 12347
BUFFER_SIZE = 4096
class ServerConnections(object):
def __enter__(self):
self.connections = []
self.users = []
self.server_socket = socket.socket()
self.server_socket.bind(("", PORT))
self.server_socket.listen(5)
self.server_socket.setblocking(0)
return self
def accept(self):
while select.select([self.server_socket], [], [], 0)[0]:
connection, address = self.server_socket.accept()
print("Connection address:", address)
self.connections.append(Connection(connection, address))
def prune(self):
for connection in self.connections[:]:
if connection.closed:
self.connections.remove(connection)
for user in self.users:
if not user.connection:
continue
if user.connection.closed:
user.connection = None
def findUser(self, to_find):
for user in self.users:
if to_find in (user.name, user.connection):
return user
def read(self):
for connection in self.connections:
message = connection.read()
if not message:
continue
# login as user
if message.startswith("user:") and message.count(":") >= 2 and message.split(":")[1]:
self.login(message.split(":")[1], message.split(":",2)[2], connection)
# list users
elif message == "users":
for user in self.users:
if user.connection:
connection.send("{} : {}".format(user.name, user.connection.address))
else:
connection.send("{} : not connected".format(user.name))
# list connections
elif message == "connections":
for i_connection in self.connections:
connection.send(i_connection.address)
# broadcast to all users
elif self.findUser(connection) and message.startswith("say:"):
self.broadcast("{} says: {}"
.format(self.findUser(connection).name, message.split(":",1)[1]))
# unhandled message
elif self.findUser(connection):
return self.findUser(connection).name, message
# no messages
return "", ""
def login(self, name, password, connection):
# if already logged in, log out first
for user in self.users:
if connection == user.connection:
user.connection = None
# if an user already exists check if password matches
for user in self.users:
if name == user.name:
if password == user.password:
user.connection = connection
print("{} logged in as {}".format(connection.address, name))
connection.send("Logged in as {}".format(name))
else:
print("{} failed to log in as {}: wrong password"
.format(connection.address, name))
connection.send("Failed to log in: wrong password")
return
# no user with this name: create a new user
self.users.append(User(name, password, connection))
print("{} logged in as {}".format(connection.address, name))
connection.send("Logged in as {}".format(name))
def message(self, user, message=""):
self.findUser(user).connection.send(message)
def broadcast(self, message=""):
for connection in self.connections:
connection.send(message)
def __exit__(self, exc_type, exc_value, traceback):
for connection in self.connections:
connection.close()
self.server_socket.close()
class User(object):
def __init__(self, name, password, connection):
self.name = name
self.password = password
self.connection = connection
class ClientConnection(object):
def __enter__(self):
client_socket = socket.socket()
client_socket.connect((IP, PORT))
self.connection = Connection(client_socket, (IP, PORT))
return self
def __exit__(self, exc_type, exc_value, traceback):
self.connection.close()
class Connection(object):
def __init__(self, socket, address):
self.socket = socket
self.address = address
self.read_buffer = ""
self.send_buffer = ""
self.closed = False
def read(self):
# read from the socket and add data to the buffer
if not self.closed and select.select([self.socket], [], [], 0)[0]:
try:
data = self.socket.recv(BUFFER_SIZE)
except socket.error as msg:
print("Socket error: {}".format(msg))
self.close()
else:
if data:
#print("Received data:", data)
self.read_buffer += data
else:
self.close()
# check if we got a complete message
if len(self.read_buffer) < 3:
return
try:
message_length = int(self.read_buffer[:3])
except ValueError:
print("Error: Malformed message")
self.close()
return
if len(self.read_buffer) < 3 + message_length:
return
# return a single message
message, self.read_buffer \
= self.read_buffer[3:3+message_length], self.read_buffer[3+message_length:]
print("Received message: {}".format(message))
return message
def send(self, message=""):
# add message to the buffer
if message:
if len(str(message)) > 999:
print("Error: Message too long")
else:
self.send_buffer += str(len(str(message))).zfill(3) + str(message)
# attempt sending the buffer
if not self.closed and self.send_buffer \
and select.select([], [self.socket], [], 0)[1]:
try:
characters_sent = self.socket.send(self.send_buffer)
except socket.error as msg:
print("Socket error: {}".format(msg))
self.close()
else:
if characters_sent:
self.send_buffer = self.send_buffer[characters_sent:]
else:
self.close()
def close(self):
if self.closed:
return
self.socket.close()
self.closed = True
print("{} disconnected".format(self.address))
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,564 | Niektory/Danmaku | refs/heads/master | /scripts/maindeck.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from __future__ import print_function
import copy
from deck import Deck
class MainDeck(Deck):
def __init__(self, discard_pile):
Deck.__init__(self, "Main Deck")
self.deck = []
for card, number in DEFAULT_MAIN_DECK.iteritems():
for i in xrange(number):
self.deck.append(card)
self.shuffle()
self.discard_pile = discard_pile
def draw(self):
try:
return Deck.draw(self)
except IndexError:
print("Main Deck empty; making a new one from the cards in the discard pile")
self.deck = self.discard_pile.deck
self.discard_pile.deck = []
self.shuffle()
return Deck.draw(self)
class MainDeckCard(object):
Action = None
Reaction = None
Item = None
danmaku = False
class ActionBase(object):
@staticmethod
def conditionsSatisfied(state):
return False
@staticmethod
def illegalPlay(state):
return False
@staticmethod
def payCosts(state):
pass
class ItemBase(object):
pass
class OneUp(MainDeckCard):
ID = "1up"
name = "1UP"
# healing
# action [target player]
# > [that player] gains 1 life
# reaction [player reduced to 0 life]
# > [that player] gains 1 life
class Bomb(MainDeckCard):
ID = "bomb"
name = "Bomb"
# invocation
# > activate spell card
# reaction [danmaku card played]
# > cancel [that card]
# reaction [spell card activated]
# > cancel [that spell card]
class Borrow(MainDeckCard):
ID = "borrow"
name = '"Borrow"'
# action [target item]
# > gain control of [that item]
class CaptureSpellCard(MainDeckCard):
ID = "capture spell card"
name = "Capture Spell Card"
# invocation [target player]
# > activate [that player]'s spellcard
class Focus(MainDeckCard):
ID = "focus"
name = "Focus"
# defense
# item
# > passive: [owner] has +2 distance
class Graze(MainDeckCard):
ID = "graze"
name = "Graze"
# dodge
# reaction [owner attacked]
# > avoid [that attack]
# reaction [other player attacked][discard target danmaku card]
# > avoid [that attack]
class Grimoire(MainDeckCard):
ID = "grimoire"
name = "Grimoire"
# action
# > [owner] draws 2 cards
class Kourindou(MainDeckCard):
ID = "kourindou"
name = "Kourindou"
# action [discard any number of cards]
# > draw 1 + [number of discarded cards]
class LaserShot(MainDeckCard):
ID = "laser shot"
name = "Laser Shot"
# danmaku
# action [target player]
# > unavoidable attack [that player]
class LastWord(MainDeckCard):
ID = "last word"
name = "Last Word"
# danmaku
# action
# > attack [all other players]
class MasterPlan(MainDeckCard):
ID = "master plan"
name = "Master Plan"
# action
# > resolve [current incident]
# > look at the top 3 cards of [target deck], place them on the [top or bottom] in [any order]
class Melee(MainDeckCard):
ID = "melee"
name = "Melee"
# danmaku
# action [target player]
# > attack [that player]
# > [that player] can [discard target danmaku card] to copy this action
class MiniHakkero(MainDeckCard):
ID = "mini hakkero"
name = "Mini-Hakkero"
# artifact
# item
# > passive: [owner] has +3 range
# > action [discard 2 cards]
# >> activate spell card
class Party(MainDeckCard):
ID = "party"
name = "Party"
# action
# > temp draw [number of active players] cards
# > for each active player place [one of the drawn cards] in their hand
# > draw a card
class Power(MainDeckCard):
ID = "power"
name = "Power"
# powerup
# item
# > passive: [owner] has +1 range
# > passive: [owner] has +1 danmaku limit per round
class SealAway(MainDeckCard):
ID = "seal away"
name = "Seal Away"
# danmaku
# action [target player]
# > optional: force to discard [target item that player controls]
# > if [that player] is in range, attack [that player]
class Shoot(MainDeckCard):
ID = "shoot"
name = "Shoot"
danmaku = True
# danmaku
# action [discard any number of danmaku cards]
# [target player in range (extended by discarded cards)]
# > attack [that player]
class Action(ActionBase):
@staticmethod
def conditionsSatisfied(state):
target = state.session.findPlayer(state.message.split(":")[0])
# check if target is another active player
if not target or target == state.player or target.defeated:
return False
temp_hand = copy.deepcopy(state.player.hand)
# check if all the cards to discard are in hand and of danmaku type
for to_discard in state.message.split(":")[1:]:
card = temp_hand.findCard(to_discard)
if not card or not card.danmaku:
return False
temp_hand.deck.remove(card)
# check if in range (modified by number of discarded cards)
if state.session.distance(state.player, target) \
> state.player.range + state.message.count(":"):
return False
state.targets = target
return True
@staticmethod
def payCosts(state):
for to_discard in state.message.split(":")[1:]:
state.player.hand.deck.remove(state.player.hand.findCard(to_discard))
@staticmethod
def execute(state):
state.targets.life -= 1
class SorcerersSutraScroll(MainDeckCard):
ID = "sorcerers sutra scroll"
name = "Sorcerer's Sutra Scroll"
# artifact
# item
# > on play: draw a card
# > passive: [owner] draws +1 card during her draw step
# > passive: [owner] max hand size = 7 (+role modifier)
class SpiritualAttack(MainDeckCard):
ID = "spiritual attack"
name = "Spiritual Attack"
# invocation
# > activate spell card
class Stopwatch(MainDeckCard):
ID = "stopwatch"
name = "Stopwatch"
# artifact
# item
# > passive: [owner] has +1 distance
# > passive: [owner] has +2 danmaku limit per round
class SupernaturalBorder(MainDeckCard):
ID = "supernatural border"
name = "Supernatural Border"
# defense
# powerup
# item
# > reaction [owner attacked]
# >> flip the [top card of the deck]
# >> if [that card] is a spring or summer card, avoid that attack
class Tempest(MainDeckCard):
ID = "tempest"
name = "Tempest"
# action
# > all players discard their hand and draw 3 cards
class Voile(MainDeckCard):
ID = "voile"
name = "Voile"
# action
# > draw 3 cards
# > place a card from your hand on top of the deck
"""
DEFAULT_MAIN_DECK = {
OneUp: 2,
Bomb: 4,
CaptureSpellCard: 1,
Focus: 3,
Graze: 12,
Grimoire: 2,
Kourindou: 2,
LaserShot: 1,
LastWord: 1,
MasterPlan: 1,
Melee: 1,
MiniHakkero: 1,
Party: 1,
Power: 7,
SealAway: 4,
Shoot: 23,
SorcerersSutraScroll: 1,
SpiritualAttack: 6,
Stopwatch: 1,
SupernaturalBorder: 2,
Tempest: 1,
Voile: 1,
Borrow: 2
}
"""
DEFAULT_MAIN_DECK = {
Shoot: 23
}
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,565 | Niektory/Danmaku | refs/heads/master | /run_client.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from __future__ import print_function
from scripts.error import LogException
if __name__ == '__main__':
with LogException():
from scripts.client import Client
client = Client()
client.run()
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,566 | Niektory/Danmaku | refs/heads/master | /scripts/gamesession.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from __future__ import print_function
from player import Player
import gamestate
class GameSession(object):
def __init__(self):
self.state = [gamestate.PlayTurns(self), gamestate.DealHands(self),
gamestate.InitLife(self), gamestate.RevealHeroine(self),
gamestate.DealCharacters(self), gamestate.DealRoles(self),
gamestate.InitDecks(self), gamestate.PreGame(self)]
self.history = []
self.players = []
def addPlayer(self, name):
# can only add players if the game wasn't started yet
if isinstance(self.state[-1], gamestate.PreGame) and not self.findPlayer(name):
self.players.append(Player(name))
return True
return False
def findPlayer(self, to_find):
for player in self.players:
if to_find == player.name:
return player
@property
def current_player(self):
return self.players[self.current_player_i]
@property
def active_players(self):
return [player for player in self.players if not player.defeated]
def distance(self, player1, player2):
i = self.players.index(player1)
dist1 = 0
while i % len(self.players) != self.players.index(player2):
i += 1
if not self.players[i % len(self.players)].defeated:
dist1 += 1
i = self.players.index(player2)
dist2 = 0
while i % len(self.players) != self.players.index(player1):
i += 1
if not self.players[i % len(self.players)].defeated:
dist2 += 1
return min(dist1, dist2)
def start(self):
# start the game if there's enough players
#if self.state[-1] == "waiting for players" and 4 <= len(self.players) <= 8:
if isinstance(self.state[-1], gamestate.PreGame): # for testing
self.state.pop()
self.history.append("start game")
self.history.append(len(self.players))
for player in self.players:
self.history.append(player.name)
return True
return False
def run(self):
self.state[-1].run()
def playerInput(self, name, message):
self.state[-1].playerInput(self.findPlayer(name), message)
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,567 | Niektory/Danmaku | refs/heads/master | /scripts/character.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from deck import Deck
class CharacterDeck(Deck):
def __init__(self):
Deck.__init__(self, "Character Deck")
self.deck = ["Alice", "Cirno", "Reimu", "Byakuren", "Tenshi", "Meiling", "Suika",
"Sakuya", "Keine", "Nitori", "Yuuka", "Marisa", "Sanae", "Satori", "Youmu",
"Futo", "Patchouli", "Reisen", "Utsuho", "Remilia", "Aya", "Miko", "Eirin",
"Yukari", "Player 2"]
self.shuffle()
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,568 | Niektory/Danmaku | refs/heads/master | /scripts/player.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from deck import Deck
DEFAULT_LIFE = 4
DEFAULT_HAND_SIZE = 4
DEFAULT_RANGE = 1
class Player(object):
def __init__(self, name):
self.name = name
self.role = None
self.character = None
self.life = DEFAULT_LIFE
self.hand = Deck()
self.defeated = False
@property
def max_life(self):
return DEFAULT_LIFE
@property
def max_hand_size(self):
return DEFAULT_HAND_SIZE
@property
def range(self):
return DEFAULT_RANGE
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,569 | Niektory/Danmaku | refs/heads/master | /scripts/config/version.py | # -*- coding: utf-8 -*-
game_name = "Danmaku!!"
version = "0.0"
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,570 | Niektory/Danmaku | refs/heads/master | /scripts/role.py | # -*- coding: utf-8 -*-
# Copyright 2018 Tomasz "Niektóry" Turowski
from deck import Deck
class RoleDeck(Deck):
def __init__(self, players, simplified=False):
Deck.__init__(self, "Role Deck")
partner_deck = PartnerDeck(players, simplified)
stage_boss_deck = StageBossDeck(players, simplified)
ex_boss_deck = ExBossDeck(players, simplified)
self.deck = \
[stage_boss_deck.draw(), stage_boss_deck.draw(), ex_boss_deck.draw(), "heroine"]
if players >= 5:
self.deck.append(partner_deck.draw())
if players >= 6:
self.deck.append(stage_boss_deck.draw())
if players >= 7:
self.deck.append(partner_deck.draw())
if players == 8:
self.deck.append("rival")
#self.shuffle() # commented for testing
class PartnerDeck(Deck):
def __init__(self, players, simplified=False):
Deck.__init__(self, "Partner role cards")
self.deck = []
if players >= 5:
self.deck.extend(("partner", "partner"))
if not simplified:
self.deck.append("ex midboss")
if players >= 7 and not simplified:
self.deck.append("one true partner")
self.shuffle()
class StageBossDeck(Deck):
def __init__(self, players, simplified=False):
Deck.__init__(self, "Stage Boss role cards")
self.deck = ["stage boss", "stage boss", "stage boss"]
if players >= 5 and not simplified:
self.deck.extend(("final boss", "challenger", "anti-heroine"))
self.shuffle()
class ExBossDeck(Deck):
def __init__(self, players, simplified=False):
Deck.__init__(self, "Extra Boss role cards")
self.deck = ["ex boss"]
if not simplified:
self.deck.append("phantom boss")
self.shuffle()
| {"/run_server.py": ["/scripts/server.py"], "/run_client.py": ["/scripts/client.py"]} |
56,571 | hypebeast/bundesliga-cli | refs/heads/master | /bundesliga/cli.py | # -*- coding: utf-8 -*-
import click
from .openligadb import OpenLigaDB
from . import helpers
pass_openligadb = click.make_pass_decorator(OpenLigaDB)
@click.group(invoke_without_command=True)
@click.pass_context
def cli(ctx):
"""
Bundesliga results and stats for hackers.
bundesliga-cli is a CLI tool that provides access to Bundesliga
results and stats.
Uses openligadb-json-api.heroku.com API which is itself a JSON wrapper
around the OpenligaDB API (http://www.openligadb.de).
"""
ctx.obj = OpenLigaDB()
@cli.command()
@click.option('--matchday', '-d', help='Defines the matchday')
@click.option('--league', '-l', help='Defines the league (e.g. bl1, bl2, bl3)')
@click.option('--season', '-s', help='Defines the season (e.g. 2014, 2013)')
@pass_openligadb
def matchday(openligadb, season, matchday, league):
"""
Match results for the given matchday.
Get all available league shortcuts with 'buli leagues'.
Season format: e.g. 2014 or 2011
"""
if not league:
league = openligadb.ERSTE_LIGA
if not matchday:
matchday = openligadb.getNextMatchday(league)
if not season:
season = openligadb.getCurrentSeason(league) # ['name']
matches = openligadb.getMatchdayResults(matchday, season, league)
matches = helpers.process_matches(matches)
table = helpers.create_results_table()
for match in matches:
table.add_row(match)
print(table)
@cli.command()
@click.option('--league', '-l', help='Defines the league')
@pass_openligadb
def next(openligadb, league):
"""
Shows the match results for the next/current matchday.
Get all available league shortcuts with 'buli leagues'.
"""
if not league:
league = openligadb.ERSTE_LIGA
matchday = openligadb.getNextMatchday(league)
season = openligadb.getCurrentSeason(league)
matches = openligadb.getMatchdayResults(matchday=matchday, season=season,
league=league)
matches = helpers.process_matches(matches)
table = helpers.create_results_table()
for match in matches:
table.add_row(match)
print(table)
@cli.command()
@click.option('--league', '-l', help='Defines the league')
@pass_openligadb
def last(openligadb, league):
"""
Shows the match results for the last matchday.
Get all available league shortcuts with 'buli leagues'.
"""
matchday = openligadb.getRecentMatchday()
if not league:
league = openligadb.ERSTE_LIGA
season = openligadb.getCurrentSeason(league)
matches = openligadb.getMatchdayResults(matchday=matchday, season=season,
league=league)
matches = helpers.process_matches(matches)
table = helpers.create_results_table()
for match in matches:
table.add_row(match)
print(table)
@cli.command()
@click.option('--league', '-l', help='Defines the league')
@click.option('--season', '-s', help='Defines the season')
@pass_openligadb
def table(openligadb, league, season):
"""
Shows the league table.
By default the league table for the 1. Bundesliga and the currrent
season is displayed.
Get all available league shortcuts with 'buli leagues'.
Season format: e.g. 2014 or 2011
"""
if not league:
league = openligadb.ERSTE_LIGA
if not season:
season = openligadb.getCurrentSeason(league) # ['name']
table_stats = openligadb.getTable(season, league)
rows = helpers.process_table_stats(table_stats)
click.echo(helpers.create_table_table(rows))
@cli.command()
@click.option('--league', '-l', help='Defines the league')
@click.option('--season', '-s', help='Defines the season')
@pass_openligadb
def teams(openligadb, league, season):
"""
Shows the teams for a league and season.
If no season is specified, the current season will be used.
If no league is specified, the 1. Fussball Bundesliga will be used.
League format: 'bl1' for 1. Bundesliga, 'bl2' for 2. Bundesliga, etc.
Get all available league shortcuts with 'buli leagues'.
Season format: e.g. 2014 or 2011
"""
if not league:
league = openligadb.ERSTE_LIGA
if not season:
season = openligadb.getCurrentSeason(league)
table = helpers.create_teams_table()
teams = openligadb.getTeams(season, league)
for team in teams:
row = [team['TeamName']]
table.add_row(row)
print(table)
# depreceated
# @pass_openligadb
# def leagues(openligadb):
# """
# Shows all available soccer leagues.
# The 'league shortcut' can be used to specify the league option for
# the other options.
# """
# table = helpers.create_leagues_table()
# leagues = openligadb.getAvailLeagues()
# for l in leagues:
# row = [l['leagueName'], l['leagueSaison'], l['leagueShortcut']]
# table.add_row(row)
# print(table)
if __name__ == '__main__':
cli(obj={})
| {"/bundesliga/cli.py": ["/bundesliga/openligadb.py"]} |
56,572 | hypebeast/bundesliga-cli | refs/heads/master | /bundesliga/helpers.py | # -*- coding: utf-8 -*-
import datetime
from prettytable import PrettyTable
def parseDateTime(time):
return datetime.datetime.strptime(time, '%Y-%m-%dT%H:%M:%S')
def from_utc(utcTime, fmt="%Y-%m-%dT%H:%M:%S.%fZ"):
"""
Convert UTC ISO-8601 time string to time.struct_time
"""
return datetime.datetime.strptime(utcTime, fmt)
def create_results_table():
x = PrettyTable(["Home", "Away", "Result", "Goals", "Status", "Date"])
x.align["Home"] = "l"
x.align["Away"] = "l"
x.align["Date"] = "l"
x.align["Goals"] = "l"
return x
def create_table_table(rows):
x = PrettyTable(["Rank", "Club", "Matches", "Wins", "Draws", "Losses",
"Goals", "GD", "Points"], align="r")
x.align["Rank"] = "r"
x.align["Club"] = "l"
x.align["Matches"] = "r"
x.align["Wins"] = "r"
x.align["Draws"] = "r"
x.align["Losses"] = "r"
x.align["GD"] = "r"
x.align["Points"] = "r"
for row in rows:
x.add_row(row)
return x
def create_leagues_table():
x = PrettyTable(["Name", "Season", "Shortcut"])
x.align["Name"] = "l"
return x
def create_teams_table():
x = PrettyTable(["Name"])
x.align["Name"] = "l"
return x
def current_season():
return "2014"
def process_matches(matches):
if not matches:
return
results = []
for match in matches:
match_result = []
# match = match['Matchdata']
# now = datetime.datetime.utcnow()
now = datetime.datetime.now()
okParsingMatchDate = False
try:
# matchDate = parseDateTime(match['MatchDateTimeUTC'])
matchDate = parseDateTime(match['MatchDateTime'])
date = matchDate.strftime('%H:%M %d.%m.%Y')
okParsingMatchDate = True
except:
date = "-"
status = "-"
if okParsingMatchDate:
if matchDate > now:
status = "Not started"
else:
if match['MatchIsFinished'] == True:
status = 'Finished'
else:
status = 'Running'
if status == 'Not started':
score = '-:-'
else:
score = '0:0'
if match['Goals']:
goalsInfo = []
for goals in match['Goals']:
score = (str(goals['ScoreTeam1']) + ':' +
str(goals['ScoreTeam2']))
if goals['GoalGetterName']:
goalsInfo.append(score + ' ' + goals['GoalGetterName'])
else:
goalsInfo.append(score)
goalsInfo = ', '.join(goalsInfo)
maxLength = 50
if len(goalsInfo) > maxLength:
index = goalsInfo.rfind(',', 0, maxLength-1)
while index < 0:
index = goalsInfo.rfind(',', 0, maxLength)
maxLength += 1
goalsInfo = (goalsInfo[:index] + '\n' +
goalsInfo[index+2:])
else:
goalsInfo = ''
points = parse_points(match, score)
if not points:
points = '-:-'
match_result.append(match['Team1']['TeamName'])
match_result.append(match['Team2']['TeamName'])
match_result.append(points)
match_result.append(goalsInfo)
match_result.append(status)
match_result.append(date)
results.append(match_result)
return results
def parse_points(match, score):
fulltime = None
halftime = None
if match['MatchResults']:
halftime = ['(' + str(x['PointsTeam1']) +
':' + str(x['PointsTeam2']) + ')'
for x in match['MatchResults']
if x['ResultName'] == 'Halbzeitergebnis']
fulltime = [str(x['PointsTeam1']) + ':' + str(x['PointsTeam2'])
for x in match['MatchResults']
if x['ResultName'] == 'Endergebnis']
if fulltime:
result = fulltime[0] + ' ' + halftime[0]
elif halftime:
result = score + ' ' + halftime[0]
else:
result = score
return result
def process_table_stats(stats):
results = []
rank = 1
for club in stats:
count_matches = club['wins'] + club['losses'] + club['draws']
goals = str(club['goals']) + ':' + str(club['received_goals'])
gd = int(club['goals']) - int(club['received_goals'])
club_result = []
club_result.append(rank)
club_result.append(club['team_name'])
club_result.append(count_matches)
club_result.append(club['wins'])
club_result.append(club['draws'])
club_result.append(club['losses'])
club_result.append(goals)
club_result.append(gd)
club_result.append(club['points'])
rank += 1
results.append(club_result)
return results
| {"/bundesliga/cli.py": ["/bundesliga/openligadb.py"]} |
56,573 | hypebeast/bundesliga-cli | refs/heads/master | /setup.py | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import os, sys, subprocess
import bundesliga
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
subprocess.call(['python', 'setup.py', 'sdist', 'upload', '--sign'])
sys.exit()
README = open('README.md').read()
LICENSE = open("LICENSE").read()
setup(
name='bundesliga-cli',
version=bundesliga.__version__,
description='Bundesliga results and stats for hackers',
long_description=(README),
author='Sebastian Ruml',
author_email='sebastian@sebastianruml.name',
url='https://github.com/hypebeast/bundesliga-cli',
include_package_data=True,
install_requires=[
'click >= 3.3.0',
'prettytable >= 0.7.2'
],
license=(LICENSE),
keywords='python, cli, bundesliga, germany',
packages=['bundesliga'],
scripts=['bin/buli']
)
| {"/bundesliga/cli.py": ["/bundesliga/openligadb.py"]} |
56,574 | hypebeast/bundesliga-cli | refs/heads/master | /bundesliga/openligadb.py | # -*- coding: utf-8 -*-
import json
import re
from urllib.request import urlopen
class OpenLigaDB:
"""
Simple wrapper for http://openligadb-json-api.heroku.com.
"""
ERSTE_LIGA = 'bl1'
ZWEITE_LIGA = 'bl2'
DRITTE_LIGA = 'bl3'
FUSSBALL_SPORT_ID = 1
def __init__(self):
# self.openLigaDBApiUrl = 'http://openligadb-json-api.herokuapp.com/api'
self.openLigaDBApiUrl = 'https://www.openligadb.de/api'
def getMatchdayResults(self, matchday=0, season="", league=""):
"""
Returns the results for the given matchday
"""
if matchday == 0:
matchday = self.getNextMatchday()
if season == "":
season = self.getSeason()
if league == "":
league = self.ERSTE_LIGA
requestUrl = (self.openLigaDBApiUrl + '/getmatchdata/' +
league + '/' + season + '/' + str(matchday))
data = json.load(urlopen(requestUrl))
# return data['GetMatchdataByGroupLeagueSaisonResult']
return data
def getTable(self, season, league=ERSTE_LIGA):
"""
Returns the table for the given season and league.
"""
# 1. Build dictionary with all teams (key is the team id)
teams = self.getTeams(season, league)
table = {}
for team in teams:
table[team['TeamId']] = {
'team_name': team['TeamName'],
'points': 0,
'wins': 0,
'losses': 0,
'draws': 0,
'goals': 0,
'received_goals': 0
}
matchData = self.getMatchDataByLeagueSaison(season, league)
for match in matchData:
if not match['MatchIsFinished']:
continue
team1 = match['Team1']['TeamId']
team2 = match['Team2']['TeamId']
# team2 = match['idTeam2']
goals_team1 = int(match['MatchResults'][-1]['PointsTeam1'])
goals_team2 = int(match['MatchResults'][-1]['PointsTeam2'])
teamData1 = table[team1]
teamData2 = table[team2]
teamData1['goals'] += goals_team1
teamData2['goals'] += goals_team2
teamData1['received_goals'] += goals_team2
teamData2['received_goals'] += goals_team1
if goals_team1 > goals_team2:
teamData1['points'] += 3
teamData1['wins'] += 1
teamData2['losses'] += 1
elif goals_team1 < goals_team2:
teamData2['points'] += 3
teamData2['wins'] += 1
teamData1['losses'] += 1
else:
teamData1['points'] += 1
teamData2['points'] += 1
teamData1['draws'] += 1
teamData2['draws'] += 1
return sorted([value for key, value in table.items()],
key=lambda k: (k['points'],
k['goals']-k['received_goals'],
k['goals']),
reverse=True)
def getNextMatchday(self, league=ERSTE_LIGA):
"""
Returns the next matchday (consider that the next matchday could be
the current matchday).
"""
requestUrl = (self.openLigaDBApiUrl +
'/getmatchdata/' + league)
data = json.load(urlopen(requestUrl))
# return data['GetCurrentGroupResult']['groupOrderID']
return data[0]['Group']['GroupOrderID']
def getRecentMatchday(self):
"""
Returns the recent matchday.
"""
previousMatchday = int(self.getNextMatchday()) - 1
if previousMatchday < 1:
previousMatchday = 1
return str(previousMatchday)
def getCurrentSeason(self, league=ERSTE_LIGA):
requestUrl = (self.openLigaDBApiUrl +
'/getmatchdata/' + league)
data = json.load(urlopen(requestUrl))
leagueName = data[0]['LeagueName']
season = re.sub(r'.*?(\d{4}).*', r'\1', leagueName)
return season
def getTeams(self, season, league=ERSTE_LIGA):
"""
Returns a list of all teams for the given season and league.
"""
requestUrl = (self.openLigaDBApiUrl +
'/getavailableteams/' + league + '/' + season)
data = json.load(urlopen(requestUrl))
teams = []
for team in data:
teams.append(team)
return teams
def getMatchDataByLeagueSaison(self, season, league=ERSTE_LIGA):
"""
"""
requestUrl = (self.openLigaDBApiUrl +
'/getmatchdata/' + league + '/' + season)
data = json.load(urlopen(requestUrl))
matchData = []
for match in data:
matchData.append(match)
return matchData
def getMatchesByTeam(self, team):
pass
# depreceated
# def getAvailLeagues(self):
# requestUrl = self.openLigaDBApiUrl + '/avail_leagues'
# data = json.load(urlopen(requestUrl))
# leagues = []
# for league in data['GetAvailLeaguesResult']:
# league = league['League']
# if (league['leagueSportID'] == self.FUSSBALL_SPORT_ID and
# 'test' not in league['leagueName'].lower() and
# league['leagueID'] != 532):
# leagues.append(league)
# return leagues
| {"/bundesliga/cli.py": ["/bundesliga/openligadb.py"]} |
56,577 | YamiOmar88/flow-networks | refs/heads/master | /main.py | # main.py
# Author: Yamila Omar
# Date: 22nd May 2018
from graphfile import GraphFile
from graph import Graph
from capacity import Capacity
from fordfulkerson import FordFulkerson
# Input data
# ==========
graph_to_study = input("Choose graph to study: F1, F2 or F3? ")
# Load graph
# ==========
filename = "data/" + graph_to_study + ".txt"
edges = GraphFile(filename).read_edges_from_file()
F = Graph(edges)
# Get edges capacity
# ==================
nodes_capacity = GraphFile("data/nodes_capacity.txt").read_nodes_capacity_from_file()
C = Capacity(nodes_capacity, 'i', 'f')
C_edges = C.get_edges_capacity(F, "weight")
for k,v in C_edges.items():
if ("i" in k) or ("f" in k):
pass
else:
C_edges[k] = int(v)
C_edges = {k:v for k,v in C_edges.items() if v > 0}
# Flow Network
# ============
flow_network = Graph(C_edges.copy())
antiparallel_edges = flow_network.find_antiparallel_edges()
counter = 100
while len(antiparallel_edges) > 0:
edge = antiparallel_edges.pop(0)
anti = (edge[1],edge[0])
antiparallel_edges.remove( anti )
w = flow_network.edges[anti]
flow_network.deleteEdge(anti[0], anti[1])
flow_network.addEdge(i=edge[1], j=counter, w_ij=w)
flow_network.addEdge(i=counter, j=edge[0], w_ij=w)
counter += 1
# Maximum Flow
# ============
flow, residual_network = FordFulkerson(flow_network, startNode='i', endNode='f')
# Final flow
# ==========
flow = {k:v for k,v in flow.items() if v > 0}
flow_fraction = {k:round(v/C_edges[k],2) for k,v in flow.items()}
# Total items to produce daily
# ============================
count = 0
for k,v in flow.items():
if k[1] == "f": count += v
print("Total items to produce per day: ", count)
# Save flow fraction
# ==================
filename = "results/flow_fraction_" + graph_to_study + ".txt"
outFile = GraphFile(filename)
outFile.write_graph_to_file(flow_fraction)
# Bottlenecks
# ===========
C_graph = Graph(C_edges.copy())
ingoing, outgoing = C_graph.adjacencyList
for i,j_list in outgoing.items():
bottleneck = True
if len(j_list) == 0: continue
for j in j_list:
fraction = flow_fraction.get((i,j), 0)
if fraction != 1:
bottleneck = False
break
print("Is {} a bottleneck? {}".format(i, bottleneck)) | {"/main.py": ["/graphfile.py", "/graph.py"]} |
56,578 | YamiOmar88/flow-networks | refs/heads/master | /graphfile.py | # Useful functions
# Author: Yamila M. Omar
# Date: 4/4/2019
# ======================
class GraphFile:
def __init__(self, fileName):
'''Initialize class with name of file.'''
self.filename = fileName
def read_edges_from_file(self):
'''Read graph from file. The file contains one edge (i,j)
and its weight w_ij per line as follows:
i j w_ij'''
edges = {}
with open(self.filename) as fileHandle:
for line in fileHandle:
line = line.strip().split()
if len(line) != 3: continue
i, j, w_ij = line[0], line[1], line[2]
try:
i = int(i)
except ValueError:
pass
try:
j = int(j)
except ValueError:
pass
w_ij = float(w_ij)
edges[(i,j)] = w_ij
return edges
def write_graph_to_file(self, G):
'''Write graph G to file. G must be a dictionary.
Keys are tuples (i,j) of edges and values are weights w_ij.'''
with open(self.filename, 'w') as f:
for k,v in G.items():
i, j, w_ij = str(k[0]), str(k[1]), str(v)
f.write(i + ' ' + j + ' ' + w_ij + '\n')
return True
def read_dictionary_from_file(self, separator=' '):
'''This function reads a dictionary from a file. The file
must contain just two columns: key and value separated by
separator (default: space).'''
my_dict = dict()
with open(self.filename) as f:
for line in f:
line = line.strip().split(separator)
k, v = line[0], line[1]
my_dict[k] = v
return my_dict
def read_centrality_values_from_file(self, separator=' '):
'''Read centrality values from file. The file must contain
one node per line and its centrality value as follows:
i c_i'''
d = self.read_dictionary_from_file()
C = dict()
for k,v in d.items():
v = float(v)
try:
k = int(k)
except:
pass
C[k] = v
return C
def read_nodes_capacity_from_file(self, separator=' '):
'''Read nodes capacity from file. The file must contain
one node per line and its capacity value: i c_i. The
input variable separator (default=' ') allows to handle
files where the separator is other than white space. It is
currently implemented for integer node names. '''
d = self.read_dictionary_from_file(separator)
nodes_capacity = dict()
for k,v in d.items():
k,v = int(k), int(v)
nodes_capacity[k] = v
return nodes_capacity
def write_centrality_values_to_file(self, C):
'''Write centrality values to file. C must be a dictionary.
Keys are nodes i and values are centrality values c_i.'''
with open(self.filename, 'w') as f:
for k,v in C.items():
i, c_i = str(k), str(v)
f.write(i + ' ' + c_i + '\n')
return True | {"/main.py": ["/graphfile.py", "/graph.py"]} |
56,579 | YamiOmar88/flow-networks | refs/heads/master | /graph.py | # Entropy Centrality
# Author: Yamila M. Omar
# Date: 5/4/2019
from math import log
class Graph:
def __init__(self, edges=dict()):
'''Initializes a Graph. Variables:
- edges: dictionary with edge tuples as keys (i,j) and
weight w_ij as values.'''
self.edges = edges
def addEdge(self, i, j, w_ij):
'''Allows to add and edge (i,j) and its weight w_ij to the graph'''
self.edges[(i,j)] = w_ij
def deleteEdge(self, i, j):
'''Allows to delete an edge (i,j) and its associated weight'''
try:
self.edges.pop((i,j))
except KeyError:
print("{0} cannot be deleted. {0} in Graph.".format((i,j)))
def normalize(self):
'''This function allows to set edge weights in a 0 to 1 scale.'''
totSum = 0
for k,v in self.edges.items():
if k[0] == 'i':
totSum += v
normalized_edges = {}
for k,v in self.edges.items():
normalized_edges[k] = round(v/totSum, 5)
return normalized_edges
def _remove_edges_below_tolerance(self, edges, tolerance):
'''This function is used by remove_underutilized_edges and should
not be called by users. It takes two input variables:
- edges: dictionary with tuples of edges (i,j) as keys,
- tolerance: and integer or float used to filter out edges.
The function returns a new dictionary of edges.'''
new_dict = {}
for k,v in edges.items():
if v >= tolerance:
new_dict[k] = v
return new_dict
def remove_underutilized_edges(self, tolerance, normalize=False):
''' This function removes edges whose weight is below a tolerance.
Input variables:
- tolerance (integer or float) used to filter out edges,
- normalize (default value False) whether the weighted edges are to
be normalized.
The function returns a dictionary of edges.'''
if normalize:
normalized_edges = self.normalize()
return self._remove_edges_below_tolerance(normalized_edges, tolerance)
else:
return self._remove_edges_below_tolerance(self.edges, tolerance)
def find_antiparallel_edges(self):
'''This function finds pairs of antiparallel edges.'''
antiparallel = []
for k in self.edges.keys():
antiparallel_edge = (k[1],k[0])
if self.edges.get(antiparallel_edge, False):
antiparallel.append(k)
antiparallel.sort()
return antiparallel
def searchPaths(self, i, j, visited, path):
'''Searches all possible paths from node i to node j.'''
# Set current node as visited and store it in path
visiting = dict(visited)
visiting[i] = True
aux = list(path)
aux.append(i)
# If current node is not same as destination, recursively
# search adjacent nodes.
all_paths = []
if i != j:
for u in self.adjacencyList[1].get(i, []):
if visiting[u] == False:
all_paths += self.searchPaths(u, j, visiting, aux)
else:
all_paths += [aux[:]]
return all_paths
def findAllPaths(self, i, j):
'''Find all possible paths from node i to node j.'''
# Set all nodes as not visited
visited = {n: False for n in self.nodes}
# Create a list to store the path
path = []
# Call recursive function to search for paths
return self.searchPaths(i, j, visited, path)
def _downstream_degree(self, t, path):
'''Determine the downstream degree of t. Input variables:
- node t for which the downstream degree is required,
- path in which this downstream degree is to be calculated.
The function returns the downstream degree of node t as
defined by Tutzauer (2007). The function is generalized to
also work with weighted graphs.'''
ingoing, outgoing = self.adjacencyList
downstream_degree = 0
t_index = path.index(t)
for adj_node in outgoing[t]:
if adj_node not in path[:t_index]:
downstream_degree += self.edges[ (t, adj_node) ]
return downstream_degree
def _transfer_probability(self, t, path):
'''Determine the transfer probability of path k. Input variables:
- node t for which the transfer probability is required,
- path in which this transfer probability is to be calculated.
The function returns the transfer probability of node t as
defined by Tutzauer (2007). The function is generalized to
also work with weighted graphs.'''
D_t = self._downstream_degree(t, path)
if D_t == 0:
T_k = 0
else:
t_index = path.index(t)
edge = (t, path[t_index + 1])
T_k = self.edges[edge] / D_t
return T_k
def _stopping_probability(self, t, path):
'''Determine the stopping probability of path k. Input variables:
- node t for which the stopping probability is required,
- path in which this stopping probability is to be calculated.
The function returns the stopping probability of node t as
defined by Tutzauer (2007). The function is generalized to
also work with weighted graphs. In order to work for looped
graphs, the edge (t,t) must explicitly show up in self.edges!'''
D_t = self._downstream_degree(t, path)
if D_t == 0:
sigma_k = 1
else:
edge = (t, t)
sigma_k = self.edges.get(edge, 0) / D_t
return sigma_k
def _probability_path_ij(self, i, j):
'''Calculate the probability of path i -> j. This is done
following the general formulae on Tutzauer (2007).'''
prob_ij = 0
all_paths = self.findAllPaths(i, j)
for path in all_paths:
product = 1
for node in path[:-1]:
T_k = self._transfer_probability(node, path)
product = product * T_k
product = product * self._stopping_probability(j, path)
prob_ij += product
return prob_ij
@property
def nodes(self):
'''Returns the set of nodes for this graph'''
edges = list(self.edges.keys())
nodes = [i[0] for i in edges] + [i[1] for i in edges]
return set(nodes)
@property
def adjacencyList(self):
'''Returns the adjacency list.'''
ingoing, outgoing = {k:[] for k in self.nodes}, {k:[] for k in self.nodes}
for edge in self.edges.keys():
i, j = edge[0], edge[1]
outgoing[i] = outgoing.get(i, []) + [j]
ingoing[j] = ingoing.get(j, []) + [i]
ingoing = {k:set(v) for k,v in ingoing.items()}
outgoing = {k:set(v) for k,v in outgoing.items()}
return ingoing, outgoing
@property
def degree(self):
'''Calculate the degree of each node.'''
ingoing, outgoing = self.adjacencyList
inDegree = {k:len(ingoing[k]) if k in ingoing else 0 for k in self.nodes}
outDegree = {k:len(outgoing[k]) if k in outgoing else 0 for k in self.nodes}
return inDegree, outDegree
@property
def strength(self):
'''Calculate the strength of each node.'''
inStrength, outStrength = {k:0 for k in self.nodes}, {k:0 for k in self.nodes}
for edge,weight in self.edges.items():
i, j = edge[0], edge[1]
inStrength[j] = inStrength[j] + weight
outStrength[i] = outStrength[i] + weight
return inStrength, outStrength
@property
def entropyCentrality(self):
'''Calculate the entropy of each node.'''
C_H = {k:0 for k in self.nodes}
for i in self.nodes:
for j in self.nodes:
p_ij = self._probability_path_ij(i, j)
if p_ij != 0: C_H[i] = C_H[i] + p_ij * log(p_ij, 2)
C_H[i] = - C_H[i]
return C_H | {"/main.py": ["/graphfile.py", "/graph.py"]} |
56,581 | zainsci/redbot | refs/heads/master | /popup.py | import tkinter as tk
from tkinter import ttk
import webbrowser
from time import sleep
def open_reddit(permalink, window, interval):
webbrowser.open(f"https://www.reddit.com{permalink}")
window.destroy()
if interval != None:
print(f"If Commenting Wait {interval / 60} Min")
sleep(int(interval))
def next_post(window):
window.destroy()
def notification(sub, title, score, comments, time, permalink, interval):
win = tk.Tk()
win.title("Reddit Notification")
group = tk.LabelFrame(win, text=sub, padx=10,
pady=10)
group.pack(padx=10, pady=10)
tk.Label(
group, text=title, font=(None, 12), wraplength=300, anchor="n").grid(row=0, column=1, pady=12, sticky='n')
inner_group = tk.Frame(group, padx=10, pady=10)
inner_group.grid(row=0, column=0)
lbl_1 = tk.LabelFrame(inner_group, text="Score", padx=5)
tk.Label(lbl_1, text=score).pack(pady=5)
lbl_2 = tk.LabelFrame(inner_group, text="Comments", padx=5)
tk.Label(lbl_2, text=comments).pack(pady=5)
lbl_3 = tk.LabelFrame(inner_group, text="Time", padx=5)
tk.Label(lbl_3, text=time).pack(pady=5)
lbl = [lbl_1, lbl_2, lbl_3]
for i in lbl:
i.grid(sticky="nswe")
i.rowconfigure(0, weight=1)
i.columnconfigure(0, weight=1)
btn_group = tk.Frame(win, padx=10, pady=5)
btn_group.pack(padx=10, pady=10)
ttk.Button(btn_group, text="Open", command=lambda: open_reddit(permalink, win, interval)).grid(
row=0, column=0, padx=10, pady=5)
ttk.Button(btn_group, text="Next", command=lambda: next_post(win)).grid(
row=0, column=1, padx=10, pady=5)
win.mainloop()
if __name__ == "__main__":
notification("Tkinter", "Tkinter Tkinter Tkinter Tkinter Tkinter Tkinter Tkinter Tkinter Tkinter Tkinter Tkinter",
"20K", "2.1K", "15:15 PM", "Link")
| {"/bot.py": ["/popup.py"]} |
56,582 | zainsci/redbot | refs/heads/master | /bot.py | from popup import notification
import requests
import argparse
from datetime import datetime
from time import sleep
headers = {
"user-agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.135 Safari/537.36"
}
QUESTIONS = ["What is", "What are", "What will"]
def make_request(args):
if not args.sub:
args.sub = "AskReddit"
elif not args.limit:
args.imit = 50
if args.time:
params = {
"limit": args.limit,
}
req = requests.get(
f"https://www.reddit.com/r/{args.sub}/{args.time}/.json",
headers=headers,
params=params,
)
if req.status_code == 200:
req = req.json()
response = req["data"]["children"]
return response
else:
req = requests.get(
f"https://www.reddit.com/r/{args.sub}/.json", headers=headers
)
if req.status_code == 200:
req = req.json()
response = req["data"]["children"]
return response
def k_or_m(value):
if int(value) > 1000000:
value = int(value) / 1000000
return str(value) + "M"
elif int(value) > 1000:
score = int(value) / 1000
return str(value) + "K"
else:
return value
def convert_time(time):
time = datetime.fromtimestamp(time).strftime("%H:%M")
return time
def parse_req(response, keyword, flare, interval):
if len(response) <= 0:
print("No response")
return
elif flare:
for post in response:
if flare == post["data"]["link_flair_text"]:
sub = post["data"]["subreddit"]
title = post["data"]["title"]
score = k_or_m(post["data"]["score"])
comments = k_or_m(post["data"]["num_comments"])
time = convert_time(post["data"]["created_utc"])
link = post["data"]["permalink"]
notification(sub, title, score, comments, time, link, interval)
elif keyword:
for post in response:
sub = post["data"]["subreddit"]
title = post["data"]["title"]
score = k_or_m(post["data"]["score"])
comments = k_or_m(post["data"]["num_comments"])
time = convert_time(post["data"]["created_utc"])
link = post["data"]["permalink"]
if keyword in title.split():
notification(sub, title, score, comments, time, link, interval)
else:
for post in response:
sub = post["data"]["subreddit"]
title = post["data"]["title"]
score = k_or_m(post["data"]["score"])
comments = k_or_m(post["data"]["num_comments"])
time = convert_time(post["data"]["created_utc"])
link = post["data"]["permalink"]
for q in QUESTIONS:
if q in title:
notification(sub, title, score, comments, time, link, interval)
def main():
parser = argparse.ArgumentParser(description="RedditBot for subreddits")
parser.add_argument("--sub", type=str, default=None, help="SubReddit to search for")
parser.add_argument(
"--limit",
type=int,
default=None,
help="No of posts to search through at a time",
)
parser.add_argument(
"--time", type=str, default=None, help="Hot, New, Top or Rising posts"
)
parser.add_argument(
"--keyword", type=str, default=None, help="Any specific keyword to search for"
)
parser.add_argument("--flare", type=str, default=None, help="Falre to lookup for")
parser.add_argument(
"--interval",
type=int,
default=None,
help="Set timer between each post that appears",
)
args = parser.parse_args()
response = make_request(args)
parse_req(response, args.keyword, args.flare, args.interval)
if __name__ == "__main__":
while True:
main()
sleep(600)
| {"/bot.py": ["/popup.py"]} |
56,629 | spawn3/python-util | refs/heads/master | /lich/lich/runner.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import functools
import subprocess
import time
def timethis(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
start = time.time()
r = func(*args, **kwargs)
end = time.time()
print('[{}]: {}.{} {} {}'.format(end - start, func.__module__, func.__name__, args, kwargs))
print('\t{}'.format(r))
print
return r
return wrapper
@timethis
def _exec(cmd):
cmd = cmd.split(' ')
try:
out = subprocess.check_output(cmd)
res = 0, out.splitlines()
except subprocess.CalledProcessError as e:
res = e.returncode, e.message.splitlines()
except OSError as e:
res = -1, str(e).splitlines()
except Exception as e:
raise e
return res
def local_runner(exc_handler=None):
def decorator(func):
@functools.wraps(func)
def wrapper(self, *args, **kw):
res = None
try:
cmd = func(self, *args, **kw)
res = _exec(cmd)
except Exception, e:
# TODO
# traceback.print_exc()
if exc_handler:
exc_handler(e, *args, **kw)
else:
raise
return res
return wrapper
return decorator
| {"/setup.py": ["/ask/__init__.py"]} |
56,630 | spawn3/python-util | refs/heads/master | /ask/algo/ds.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import collections
import itertools
import functools
# collections.defaultdict
# collections.namedtuple
| {"/setup.py": ["/ask/__init__.py"]} |
56,631 | spawn3/python-util | refs/heads/master | /ask/nas/r.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import datetime
import time
from redis import Redis
import json
import traceback
from optparse import OptionParser
from repoze.lru import lru_cache
ROOT_NODEID = '1'
ROOT_PATH = '/'
META = '@'
DEBUG = True
def print2(*args):
if DEBUG:
print ' '.join([str(x) for x in args])
class Meta(object):
def chown(self):
pass
def chmod(self):
pass
def repnum(self):
pass
class Fs(object):
def __init__(self):
self.r = Redis()
self.counter = 0
self.version = int(time.time())
#
meta = {'t': 'd', 'n': ROOT_PATH}
self.r.hset(ROOT_NODEID, META, meta)
def _newid(self):
self.counter += 1
return '%d_%s' % (self.counter, self.version)
def _split_path(self, path):
l = path.split('/')
if path[0] == ROOT_PATH:
l[0] = '/'
else:
l.insert(0, '/')
return l
def _meta(self, node_id):
meta = self.r.hget(node_id, META)
if meta:
return json.loads(meta)
else:
return meta
def _get_nodeid(self, parent_id=None, name='/'):
if parent_id is None or name == '/':
return ROOT_NODEID
node = self.r.hget(parent_id, name)
return node
def _exists(self, parent_id, name):
node = self._get_nodeid(parent_id, name)
if node:
return True
return False
# @lru_cache(maxsize=10000, timeout=600)
def _path2pair(self, path):
""" TODO
- rename
- hardlink/symlink
:param path:
:return:
"""
res = None
l = self._split_path(path)
parent_id = None
for i, x in enumerate(l):
# first is /
node_id = self._get_nodeid(parent_id, x)
print2('-- find %s/%s %s' % (parent_id, x, node_id))
res = (parent_id, node_id)
if node_id:
parent_id = node_id
else:
break
return res
def _newnode(self, parent_id, node_id, name, meta):
# hmset
print2('-- hset', node_id, META, meta)
self.r.hset(node_id, META, json.dumps(meta))
print2('-- hset: %s/%s %s' % (parent_id, name, node_id))
self.r.hset(parent_id, name, node_id)
def _delnode(self, parent_id, node_id, name, meta):
pass
def _scan(self, node_id, depth=1):
if depth == 1:
print2(node_id, '@', self.r.hget(node_id, META))
count = 0
for k, v in self.r.hscan_iter(node_id):
if k != META:
print2('\t' * depth, k, v)
self._scan(v, depth=depth+1)
count += 1
def scan(self):
parent_id = ROOT_NODEID
self._scan(parent_id)
def scan2(self):
for k in self.r.scan_iter():
self._scan(k)
def mkdir(self, path):
print2('mkdir', path)
parent_id, node_id = self._path2pair(path)
if node_id:
raise
#
l = self._split_path(path)
node_id = self._newid()
name = l[-1]
meta = {'n': name, 't': 'd'}
self._newnode(parent_id, node_id, name, meta)
def _delete(self, path, is_dir=False):
parent_id, node_id = self._path2pair(path)
if not node_id:
raise
meta = self._meta(node_id)
typ = 'd' if is_dir else 'f'
if meta['t'] != typ:
raise
# empty
if not self.is_empty(node_id):
raise
print2('-- hdel', parent_id, meta['n'])
self.r.hdel(parent_id, meta['n'])
print2('-- delete', node_id)
self.r.delete(node_id)
def rmdir(self, path):
print2('rmdir', path)
return self._delete(path, is_dir=True)
def nchildren(self, node_id):
return self.r.hlen(node_id) - 1
def is_empty(self, node_id):
return self.nchildren(node_id) == 0
def ls(self, path):
parent_id, node_id = self._path2pair(path)
if not node_id:
raise
# TODO
for k, v in self.r.hscan_iter(node_id):
print k, v
def touch(self, path):
print2('touch', path)
parent_id, node_id = self._path2pair(path)
if node_id:
raise
meta = self._meta(parent_id)
if meta['t'] != 'd':
raise
#
l = self._split_path(path)
node_id = self._newid()
name = l[-1]
meta = {'n': name, 't': 'f'}
self._newnode(parent_id, node_id, name, meta)
def unlink(self, path):
print2('unlink', path)
return self._delete(path, is_dir=False)
def write(self, buf, n):
pass
def read(self, n):
pass
TOTAL = 10
def test_xxx():
r = Redis()
for i in range(TOTAL):
r.set('%d' % i, 'abcd'*25)
if i % 100000 == 0:
print datetime.datetime.now(), '%d/%d' % (i, TOTAL)
def test_mkdir():
fs = Fs()
try:
fs.mkdir('a')
fs.ls('a')
fs.touch('a/0')
fs.unlink('a/0')
fs.rmdir('a')
except:
# traceback.print_exc()
pass
return
for i in range(10):
try:
fs.touch('a/%s' % i)
except:
# traceback.print_exc()
pass
finally:
print2()
if i % 100000 == 0:
print datetime.datetime.now(), '%d/%d' % (i, TOTAL)
def test_scan():
fs = Fs()
fs.scan2()
if __name__ == '__main__':
parser = OptionParser()
# parser.add_option("-f", "--file", dest="filename", help="write report to FILE", metavar="FILE")
# parser.add_option("-q", "--quiet", action="store_false", dest="verbose", default=True, help="don't print status messages to stdout")
parser.add_option("-d", "--dir", action="store_true", dest="dir", default=False, help="test dir")
parser.add_option("-s", "--scan", action="store_true", dest="scan", default=False, help="test scan")
parser.add_option("-f", "--find", action="store_true", dest="find", default=False, help="test find")
(options, args) = parser.parse_args()
print options, args
if options.dir:
test_mkdir()
if options.scan:
test_scan()
if options.find:
fs = Fs()
fs._path2pair('/a')
| {"/setup.py": ["/ask/__init__.py"]} |
56,632 | spawn3/python-util | refs/heads/master | /spider/gdc/updatebili.py | #!/usr/bin/python
# coding=utf-8
import pymongo
from webcrawl.handleRequest import requGet
if __name__ == '__main__':
mc = pymongo.MongoClient('localhost')
ddj = mc['dandan-jiang']
for one in ddj.video.find({'src':'哔哩哔哩'}):
if not 'mp4' in one['url']:
print one['_id'] | {"/setup.py": ["/ask/__init__.py"]} |
56,633 | spawn3/python-util | refs/heads/master | /spider/gdc/setting.py | #!/usr/bin/env python
# coding=utf-8
USER = 'root'
SECRET = '900150983cd24fb0d6963f7d28e17f72'
HOST = 'http://127.0.0.1:7001/'
| {"/setup.py": ["/ask/__init__.py"]} |
56,634 | spawn3/python-util | refs/heads/master | /ask/func/gen.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import math
import types
import itertools
from contextlib import contextmanager
def fibonacci():
a, b = 0, 1
while True:
yield b
a, b = b, a + b
def f(*args, **kwargs):
print args
print kwargs
def gensquares(N):
for i in range(N):
yield i ** 2
def countdown(n):
print 'Couting down!'
while n > 0:
yield n
n -= 1
def coroutine(func):
def start(*args, **kwargs):
g = func(*args, **kwargs)
g.next()
return g
return start
@coroutine
def receiver():
print 'Ready to receive'
while True:
n = (yield)
print 'Got %s' % n
@coroutine
def line_splitter(delimiter=None):
print 'Ready to split'
result = None
while True:
line = (yield result)
result = line.split(delimiter)
def is_prime(number):
if number > 1:
if number == 2:
return True
if number % 2 == 0:
return False
for current in range(3, int(math.sqrt(number) + 1), 2):
if number % current == 0:
return False
return True
return False
def get_primes(input_list):
return (x for x in input_list if is_prime(x))
def test():
print 'step 1...'
res = yield 10
print 'step 2...', res
def test1():
for i in range(3):
m = yield i
print m
if __name__ == '__main__':
gen = test1()
print gen.next()
print gen.next()
print gen.next()
print gen.next()
| {"/setup.py": ["/ask/__init__.py"]} |
56,635 | spawn3/python-util | refs/heads/master | /lich/utils.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
def GB(n):
return n * 1024 * 1024 * 1024
def timeit():
pass
def list_is_equal(l1, l2):
if len(l1) != len(l2):
return False
for x in l1:
if x not in l2:
return False
return True
| {"/setup.py": ["/ask/__init__.py"]} |
56,636 | spawn3/python-util | refs/heads/master | /ask/right/__init__.py | # from .. import util
from b import (BA)
from exceptions import (AError, BError)
__all__ = ['BA', 'AError', 'BError']
print '...import package %s' % __name__
| {"/setup.py": ["/ask/__init__.py"]} |
56,637 | spawn3/python-util | refs/heads/master | /spider/gdc/task/role/spiderRole.py | #!/usr/bin/env python
# coding=utf-8
import os, re, copy, json, time, commands
from pymongo import MongoClient
from datetime import timedelta
from datetime import datetime
from webcrawl.handleRequest import requGet
from webcrawl.handleRequest import requPost
from webcrawl.handleRequest import getHtmlNodeContent
from webcrawl.handleRequest import getXmlNodeContent
from webcrawl.work import retry
from webcrawl.work import index
from webcrawl.work import initflow
from webcrawl.handleRequest import getJsonNodeContent
from webcrawl.work import store
from webcrawl.work import timelimit
from webcrawl.work import next
from webcrawl.handleRequest import ensureurl
from webcrawl.handleRequest import parturl
from rolespider import Data
from rolespider import TIMEOUT
from rolespider import withData, RDB, WDB
from rolespider import SpiderRoleOrigin
from webcrawl.character import unicode2utf8
#_print, logger = logprint(modulename(__file__), modulepath(__file__))
role_re = re.compile('<[^>]*>')
class SpiderRole(SpiderRoleOrigin):
"""
哔哩官网 数据爬虫
"""
def __init__(self, worknum=6, queuetype='P', worktype='COROUTINE', timeout=-1, tid=0):
super(SpiderRole, self).__init__(worknum=worknum, queuetype=queuetype, worktype=worktype, timeout=timeout, tid=tid)
self.clsname = self.__class__.__name__
self.headers = {}
self.end = datetime.now()
self.begin = self.end - timedelta(days=7)
@store(withData(WDB), Data.insert, update=True, method='MANY')
@timelimit(3)
def writedata(self, url, additions={}, timeout=TIMEOUT, implementor=None):
data = additions['data']
name_cn = data.get('name_ch')
info = data.get('detail', [])
name = data.get('name_jp')
avatar = data.get('cover', '')
avatar = 'http:%s' % avatar if avatar.startswith('//lain.bgm.tv') else avatar
nick = []
gender = ''
character = ''
birth = None
desc = re.sub(role_re, '', data.get('dec', ''))
job = ''
relative = []
refkey = 'bangumi.tv-%s' % str(url)
actor = []
tag = []
for one in info:
one = one.split(':')
flag = True
if '生日' in one[0]:
birth = one[-1].strip()
flag = False
if '别名' in one[0]:
nick.append(one[-1].strip())
name = name or one[-1].strip()
flag = False
if '性别' in one[0]:
gender = 'm' if '男' in one[-1] else '女'
flag = False
if '中文' in one[0]:
name_cn = name_cn or one[-1].strip()
name = name or one[-1].strip()
flag = False
if flag:
tag.append(one[-1].strip())
print ','.join(tag)
if name_cn:
nick = [name_cn] + nick
nick = list(set(nick))
nick = ','.join(nick)
uid = None
atime = datetime.now()
page_data = Data(name=name, avatar=avatar, nick=nick, gender=gender, character=character, birth=birth, desc=desc, job=job, relative=relative,
refkey=refkey,
actor=actor,
uid=uid, atime=atime
)
yield page_data
@next(writedata)
@timelimit(20)
@initflow('www')
def readtxt(self, additions={}, timeout=TIMEOUT, implementor=None):
with open('/Users/uni/Downloads/character.txt', 'r') as f:
for line in f:
try:
data = eval(line)
data = unicode2utf8(data)
except:
continue
yield {'url':data['id'], 'additions': {'data':data}}
if __name__ == '__main__':
print 'start'
spider = SpiderRole(worknum=6, queuetype='P', worktype='COROUTINE')
spider.fetchDatas('www', 0)
spider.statistic()
print 'end'
| {"/setup.py": ["/ask/__init__.py"]} |
56,638 | spawn3/python-util | refs/heads/master | /lich/base.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from lich.umptypes import UmpPath
from lich.pool import LichPool
from lich.volume import LichVolume
from lich.snapshot import LichSnapshot
RET_OK = 0
class TestBase(unittest.TestCase):
def setUp(self):
self.lich_pool = LichPool()
self.lich_volume = LichVolume()
self.lich_snapshot = LichSnapshot()
def tearDown(self):
pass
def find(self, manager, name):
ret, resp = manager.list(skip=0, limit=100)
self.assertEqual(resp.status_code, RET_OK)
if resp.records:
for item in resp.records:
if name == item['name']:
return item
return None
def find_and_delete(self, manager, name):
item = self.find(manager, name)
if item:
self._delete(manager, item['id'])
def _test_list(self, manager, find_id=None, fields=[]):
ret, resp = manager.list(skip=0, limit=100)
self.assertEqual(ret, RET_OK)
found = False
if resp.records:
for item in resp.records:
try:
for field in fields:
self.assertIn(field, item)
_id = item['id']
if find_id:
if _id == find_id:
found = True
manager.stat(_id)
except ValueError, e:
print e
if find_id:
self.assertEqual(found, True)
# utilities
def _delete(self, manager, path, status_code=RET_OK):
exists = self.exists(manager, path)
if exists:
ret, resp = manager.delete(path)
self.assertEqual(ret, status_code)
return ret, resp
def stat(self, manager, path, status_code=RET_OK):
ret, resp = manager.stat(path)
self.assertEqual(ret, status_code)
return ret, resp
def exists(self, manager, path):
return manager.exists(path)
# POOL CRUD
def create_pool(self, path, pool_quota=None, status_code=RET_OK):
ret, resp = self.lich_pool.create(path)
self.assertEqual(ret, status_code)
return ret, resp
def del_pool(self, path, status_code=RET_OK):
return self._delete(self.lich_pool, path, status_code=status_code)
def stat_pool(self, path, status_code=RET_OK):
return self.stat(self.lich_pool, path, status_code)
def list_pools(self):
path = UmpPath('a/b@c')
rc, pools = self.lich_pool.list(path)
return pools
# VOLUME CRUD
def create_volume(self, path, size, status_code=RET_OK):
ret, resp = self.lich_volume.create(path, size)
self.assertEqual(ret, status_code)
return ret, resp
def del_volume(self, path, status_code=RET_OK):
return self._delete(self.lich_volume, path, status_code=status_code)
def stat_volume(self, path, status_code=RET_OK):
return self.stat(self.lich_volume, path, status_code)
# SNAPSHOT CRUD
def create_snapshot(self, path, status_code=RET_OK):
ret, resp = self.lich_snapshot.create(path)
self.assertEqual(ret, status_code)
return ret, resp
def del_snapshot(self, path, status_code=RET_OK):
return self._delete(self.lich_snapshot, path, status_code)
def stat_snapshot(self, path, status_code=RET_OK):
return self.stat(self.lich_snapshot, path, status_code)
| {"/setup.py": ["/ask/__init__.py"]} |
56,639 | spawn3/python-util | refs/heads/master | /spider/gdc/task/comic/spiderHentai.py | #!/usr/bin/env python
# coding=utf-8
import os, re, copy, json, time, random
from pymongo import MongoClient
from datetime import timedelta
from datetime import datetime
from webcrawl.request import requGet
from webcrawl.request import requPost
from webcrawl.request import getHtmlNodeContent
from webcrawl.request import getXmlNodeContent
from webcrawl.task import retry
from webcrawl.task import index
from webcrawl.task import initflow
from webcrawl.request import getJsonNodeContent
from webcrawl.task import store
from webcrawl.task import timelimit
from webcrawl.task import next
from webcrawl.request import ensureurl
from webcrawl.request import parturl
from model.setting import withData, datacfg
from comicspider import Data
from comicspider import TIMEOUT
from comicspider import SpiderComicOrigin
from bson import ObjectId
from webcrawl.request import FILE
import lxml.etree as ET
FILE.dir = '/home/yada/img/'
#_print, logger = logprint(modulename(__file__), modulepath(__file__))
dmzj_re = re.compile('initIntroData\(\[.*\]\);')
UA = [
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/47.0.2526.80 Safari/537.36",
"Mozilla/5.0 (Macintosh; U; Intel Mac OS X 10_6_8; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50 safari 5.1 – Windows",
"Mozilla/5.0 (Windows; U; Windows NT 6.1; en-us) AppleWebKit/534.50 (KHTML, like Gecko) Version/5.1 Safari/534.50 IE 9.0",
"Mozilla/5.0 (compatible; MSIE 9.0; Windows NT 6.1; Trident/5.0; IE 8.0",
"Mozilla/4.0 (compatible; MSIE 8.0; Windows NT 6.0; Trident/4.0) IE 7.0",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 6.0) IE 6.0",
"Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1) Firefox 4.0.1 – MAC",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10.6; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Firefox 4.0.1 – Windows",
"Mozilla/5.0 (Windows NT 6.1; rv:2.0.1) Gecko/20100101 Firefox/4.0.1 Opera 11.11 – MAC",
"Opera/9.80 (Macintosh; Intel Mac OS X 10.6.8; U; en) Presto/2.8.131 Version/11.11 Opera 11.11 – Windows",
"Opera/9.80 (Windows NT 6.1; U; en) Presto/2.8.131 Version/11.11 Chrome 17.0 – MAC",
"Mozilla/5.0 (Macintosh; Intel Mac OS X 10_7_0) AppleWebKit/535.11 (KHTML, like Gecko) Chrome/17.0.963.56 Safari/535.11",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Maxthon 2.0)TT",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; TencentTraveler 4.0) The World 2.x",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1) The World 3.x",
"?Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; The World)",
"?Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Trident/4.0; SE 2.X MetaSr 1.0; SE 2.X MetaSr 1.0; .NET CLR 2.0.50727; SE 2.X MetaSr 1.0) 360SE",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; 360SE) Avant",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1; Avant Browser) Green Browser",
"Mozilla/4.0 (compatible; MSIE 7.0; Windows NT 5.1)"
]
SPAN = [
3, 4, 3, 4, 5, 6, 2, 7, 3, 4, 4, 5, 7, 2, 7, 7
]
class SpiderEhentai(SpiderComicOrigin):
"""
哔哩官网 数据爬虫
"""
def __init__(self, worknum=6, queuetype='P', worktype='COROUTINE', timeout=-1, tid=0):
super(SpiderEhentai, self).__init__(worknum=worknum, queuetype=queuetype, worktype=worktype, timeout=timeout, tid=tid)
self.clsname = self.__class__.__name__
self.headers = {}
self.end = datetime.now()
self.begin = self.end - timedelta(days=7)
@store(withData(datacfg.W), Data.insert, update=True, method='MANY')
@timelimit(3)
@index('url')
def fetchDetail(self, url, additions={}, timeout=TIMEOUT, implementor=None):
headers = {'Host': 'g.e-hentai.org',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Cache-Control': 'max-age=0',
'Host': 'g.e-hentai.org',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1'}
headers['User-Agent'] = random.choice(UA)
time.sleep(random.choice(SPAN))
result = requGet(url, headers=dict(headers, **{'Referer':url}), timeout=timeout, format='HTML')
comics = result.findall('.//div[@id="gdt"]//div[@class="gdtm"]//a')
if len(comics) < 40:
nextpage = None
else:
index = url.split('=')
index[-1] = int(index[-1]) + 1
# if index[-1] >5:
# nextpage = None
# else:
# index[-1] = str(index[-1])
# nextpage = '='.join(index)
index[-1] = str(index[-1])
nextpage = '='.join(index)
yield nextpage
info = result.findall('.//div[@id="gdd"]//table//tr')
cat = ['doujin', 'hentai']
tag = [getHtmlNodeContent(one, 'TEXT') for one in result.findall('.//div[@id="taglist"]//a')]
name = getHtmlNodeContent(result.find('.//h1[@id="gn"]'), 'TEXT').strip()
desc = getHtmlNodeContent(result.find('.//h1[@id="gj"]'), 'TEXT')
cover = getHtmlNodeContent(result.find('.//div[@id="gd1"]//img'), {'ATTR':'src'})
author = ''
atime = None
owner = {}
owner['url'] = getHtmlNodeContent(result.find('.//div[@id="gdn"]//a'), 'TEXT')
headers['User-Agent'] = random.choice(UA)
time.sleep(random.choice(SPAN))
user_result = requGet(getHtmlNodeContent(result.findall('.//div[@id="gdn"]//a')[-1], {'ATTR':'href'}), headers=headers, timeout=timeout, format='HTML')
owner['name'] = getHtmlNodeContent(user_result.find('.//div[@id="profilename"]//font'), 'TEXT')
# owner['avatar'] = getHtmlNodeContent(user_result.findall('.//table[@class="ipbtable"]//tr//td//div')[1].find('.//img'), {'ATTR':'src'})
owner['avatar'] = ''
relate_page = {}
snum = 0
src = 'e-hentai'
host = 'g.e-hentai.org'
language = ''
parody = ''
format = 'image'
for one in info:
if getHtmlNodeContent(one.find('.//td[@class="gdt1"]'), 'TEXT') == 'Posted:':
atime = datetime.strptime(getHtmlNodeContent(one.find('.//td[@class="gdt2"]'), 'TEXT')+':00', '%Y-%m-%d %H:%M:%S')
if getHtmlNodeContent(one.find('.//td[@class="gdt1"]'), 'TEXT') == 'Parent:':
phref = getHtmlNodeContent(one.find('.//td[@class="gdt2"]//a'), {'ATTR':'href'})
if phref and not phref == 'None:':
ppage = phref + '?p=0'
ppid = str(hash(ppage))
relate_page = {ppid:ppage}
if getHtmlNodeContent(one.find('.//td[@class="gdt1"]'), 'TEXT') == 'Language:':
language = getHtmlNodeContent(one.find('.//td[@class="gdt2"]'), 'TEXT').split(' ')[0].strip()
info = result.findall('.//div[@id="taglist"]//table//tr')
for one in info:
if getHtmlNodeContent(one.find('.//td[@class="tc"]'), 'TEXT') == 'parody:':
parody = getHtmlNodeContent(one.find('.//div[@class="gt"]//a'), 'TEXT')
if getHtmlNodeContent(one.find('.//td[@class="tc"]'), 'TEXT') == 'artist:':
author = getHtmlNodeContent(one.find('.//div[@class="gt"]//a'), 'TEXT')
parent_page_id = hash(url)
parent_id = url.replace('http://g.e-hentai.org/g/', '').split('/')[0]
if not os.path.exists(os.path.join(FILE.dir, '%s/' % src)):
os.makedirs(os.path.join(FILE.dir, '%s/' % src))
for index, page in enumerate(comics):
page_url = getHtmlNodeContent(page, {'ATTR':'href'})
page_id = hash(page_url)
time.sleep(random.choice(SPAN))
headers['User-Agent'] = random.choice(UA)
time.sleep(random.choice(SPAN))
page_result = requGet(page_url, headers=dict(headers, **{'Referer':url}), timeout=timeout, format='HTML')
srcs = page_result.findall('.//img')
img_url = ''
download = False
snum = int(page_url.split('-')[-1])
for one in srcs:
img_url = getHtmlNodeContent(one, {'ATTR':'src'})
if 'http://ehgt.org/g' in img_url:
img_url = ''
else:
download = True
break
if img_url:
headers['User-Agent'] = random.choice(UA)
# time.sleep(random.choice(SPAN))
try:
requGet(img_url, headers=dict(headers, **{'Referer':url}), timeout=timeout, format='TEXT', filepath='%s/%s-%s' % (src, parent_id, str(snum)))
except:
download = False
page_data = Data(cat=cat, url=None, tag=tag, name=name,
desc=desc, cover=cover, author=author,
owner=owner, snum=snum,
src=src, host=host, language=language, parody=parody, relate_page=relate_page, page_url=page_url,
page_id=page_id, parent_page_id=parent_page_id,
atime=atime, tid=self.tid, download=download)
yield page_data
@next(fetchDetail)
@timelimit(20)
@index('url')
@initflow('www')
def fetchList(self, url, additions={}, timeout=TIMEOUT, implementor=None):
headers = {'Host': 'g.e-hentai.org',
'Accept-Language': 'en-US,en;q=0.8',
'Accept-Encoding': 'gzip, deflate, sdch',
'Cache-Control': 'max-age=0',
'Host': 'g.e-hentai.org',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',
'Upgrade-Insecure-Requests': '1'}
headers['User-Agent'] = random.choice(UA)
headers['Referer'] = url
time.sleep(random.choice(SPAN))
result = requGet(url, headers=headers, timeout=timeout, format='HTML')
comics = result.findall('.//div[@class="it5"]')
if len(comics) < 15:
nextpage = None
else:
index = url.split('/')
index[-1] = int(index[-1]) + 1
# if index[-1] >5:
# nextpage = None
# else:
# index[-1] = str(index[-1])
# nextpage = '/'.join(index)
index[-1] = str(index[-1])
nextpage = '/'.join(index)
yield nextpage
for one in comics:
url = getHtmlNodeContent(one.find('.//a'), {'ATTR':'href'}) + '?p=0'
yield {'url':url}
if __name__ == '__main__':
print 'start'
spider = SpiderEhentai(worknum=6, queuetype='P', worktype='COROUTINE')
spider.fetchDatas('www', 0, 'http://g.e-hentai.org/non-h/0')
spider.statistic()
print 'end'
| {"/setup.py": ["/ask/__init__.py"]} |
56,640 | spawn3/python-util | refs/heads/master | /lich/lich/base.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import json
import traceback
import sys
from pprint import pprint
class LichSetting(object):
def __init__(self):
self.lich_home = '/opt/fusionstack'
class LichBase(object):
def __init__(self):
self.settings = LichSetting()
self.lich_home = self.settings.lich_home
# commands
self.lich = '%s/lich/admin/cluster.py' % self.lich_home
self.lich_cluster = '%s/lich/admin/cluster.py' % self.lich_home
self.lich_node = '%s/lich/admin/node.py' % self.lich_home
self.lichfs = '%s/lich/libexec/lichfs' % self.lich_home
self.lichbd = '%s/lich/libexec/lichbd' % self.lich_home
self.lich_snapshot = '%s/lich/libexec/lich.snapshot' % self.lich_home
self.lich_inspect = '%s/lich/libexec/lich.inspect' % self.lich_home
self.lich_admin = '%s/lich/libexec/lich.admin' % self.lich_home
self.lich_license = '%s/lich/libexec/lich.license' % self.lich_home
self.lich_syncump = '%s/lich/admin/syncump.py' % self.lich_home
if __name__ == '__main__':
lich = LichBase()
| {"/setup.py": ["/ask/__init__.py"]} |
56,641 | spawn3/python-util | refs/heads/master | /setup.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from ask import __version__
try:
from setuptools import setup, find_packages
except ImportError:
from distutils.core import setup
f = open(os.path.join(os.path.dirname(__file__), 'README.md'))
long_description = f.read()
f.close()
setup(
name='ask',
version=__version__,
description='Python client for Redis key-value store',
long_description=long_description,
url='http://github.com/spawn3/python-util',
author='Andy McCurdy',
author_email='sedrik@gmail.com',
maintainer='Andy McCurdy',
maintainer_email='toctls@163.com',
keywords=['Python', 'Util'],
license='MIT',
packages=find_packages(),
test_suite='tests.all_tests',
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2.5',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
]
)
| {"/setup.py": ["/ask/__init__.py"]} |
56,642 | spawn3/python-util | refs/heads/master | /spider/gdc/webcrawl/pjq.py | #!/usr/bin/python
# coding=utf-8
import json
import heapq
import redis
import beanstalkc
import threading
import queue
threading.queue = queue
import cPickle as pickle
from bson import ObjectId
from character import unicode2utf8
try:
from kokolog.aboutfile import modulename, modulepath
from kokolog.prettyprint import logprint
except:
def modulename(n):
return None
def modulepath(p):
return None
def logprint(n, p):
def _wraper(*args, **kwargs):
print(' '.join(args))
return _wraper, None
_print, logger = logprint(modulename(__file__), modulepath(__file__))
DESCRIBE = {0:'ERROR', 1:'COMPLETED', 2:'WAIT', 'READY':10, 3:'RUNNING', 4:'RETRY', 5:'ABANDONED'}
class RedisQueue(object):
conditions = {}
def __init__(self, host='localhost', port=6379, db=0, tube='default', timeout=30, items=None, unfinished_tasks=None, init=True, weight=[]):
self.rc = redis.StrictRedis(host='localhost', port=port, db=db)
self.tube = tube
self.unfinished_tasks = 0
if self.tube in RedisQueue.conditions:
pass
else:
RedisQueue.conditions[self.tube] = {'unfinished_tasks': unfinished_tasks or 0, 'event': threading.Event(), 'mutex':threading.Lock(), 'weight':weight}
RedisQueue.conditions[self.tube]['event'].set()
if init:
self.clear()
if items:
for item in items:
self.put(item)
def sid(self):
return str(ObjectId())
def put(self, item):
priority, methodId, methodName, times, args, kwargs, tid = item
# self.rc.zadd(self.tube, pickle.dumps({'priority': priority, 'methodId': methodId,
# 'times': times, 'args': args, 'kwargs': kwargs}), priority)
sid = self.sid()
self.rc.lpush('-'.join([str(self.tube), str(priority)]), pickle.dumps({'priority': priority, 'methodId': methodId, 'methodName':methodName, 'times': times, 'args': args, 'kwargs': kwargs, 'tid':tid, 'sid':sid}))
if times == 0:
self.rc.hset('pholcus-state', sid, 2)
else:
self.rc.hset('pholcus-state', sid, 4)
RedisQueue.conditions[self.tube]['unfinished_tasks'] += 1
RedisQueue.conditions[self.tube]['event'].clear()
def get(self, block=True, timeout=0):
# item = self.rc.zrangebyscore(self.tube, float('-inf'), float('+inf'), start=0, num=1)
item = self.rc.brpop(['-'.join([str(self.tube), str(one)]) for one in RedisQueue.conditions[self.tube]['weight']], timeout=timeout)
if item:
item = item[-1]
item = pickle.loads(item)
if self.rc.hget('pholcus-state', item['sid']) == 5:
self.rc.hdel('pholcus-state', item['sid'])
_print('', tid=item['tid'], sid=item['sid'], type='ABANDONED', status=2, sname='', priority=item['priority'], times=item['times'], args='(%s)' % ', '.join([str(one) for one in item['args']]), kwargs=json.dumps(item['kwargs'], ensure_ascii=False), txt=None)
return None
else:
self.rc.hset('pholcus-state', item['sid'], 3)
return (item['priority'], item['methodId'], item['methodName'], item['times'], tuple(item['args']), item['kwargs'], item['tid']), item['sid']
else:
return None
def empty(self):
total = sum([self.rc.llen(one) for one in ['-'.join([str(self.tube), str(one)]) for one in RedisQueue.conditions[self.tube]['weight']]])
return total == 0
def copy(self):
pass
def task_done(self, item, force=False):
if item is not None:
tid, sname, priority, times, args, kwargs, sid = item
_print('', tid=tid, sid=sid, type='COMPLETED', status=1, sname=sname, priority=priority, times=times, args='(%s)' % ', '.join([str(one) for one in args]), kwargs=json.dumps(kwargs, ensure_ascii=False), txt=None)
self.rc.hdel('pholcus-state', sid)
if RedisQueue.conditions[self.tube]['unfinished_tasks'] <= 0:
# raise ValueError('task_done() called too many times')
pass
RedisQueue.conditions[self.tube]['unfinished_tasks'] -= 1
if RedisQueue.conditions[self.tube]['unfinished_tasks'] == 0 or force:
# if self.empty() or force:
RedisQueue.conditions[self.tube]['event'].set()
def join(self):
RedisQueue.conditions[self.tube]['event'].wait()
def clear(self):
for one in self.rc.keys():
if one.startswith(self.tube):
self.rc.delete(one)
def rank(self, weight):
RedisQueue.conditions[self.tube]['mutex'].acquire()
RedisQueue.conditions[self.tube]['weight'].extend(weight)
RedisQueue.conditions[self.tube]['weight'].sort()
RedisQueue.conditions[self.tube]['mutex'].release()
def total(self):
total = 0
for one in self.rc.keys():
if one.startswith(self.tube):
total += self.rc.llen(one)
return total
def abandon(self, sid):
self.rc.hset('pholcus-state', sid, 5)
def traversal(self, skip=0, limit=10):
tubes = [one for one in self.rc.keys() if one.startswith(self.tube)]
tubes.sort()
result = []
start = skip
end = skip + limit - 1
flag = False
for tube in tubes:
for item in self.rc.lrange(tube, start, end):
item = pickle.loads(item)
item['status_num'] = self.rc.hget('pholcus-state', item['sid']) or 3
if len(result) + skip > DESCRIBE['READY']:
item['status_desc'] = DESCRIBE.get(int(item['status_num']))
else:
item['status_desc'] = 'ready'
result.append(item)
if len(result) == limit:
flag = True
break
else:
start = 0
end = limit - len(result) - 1
if flag:
break
return result
def __repr__(self):
return "<" + str(self.__class__).replace(" ", "").replace("'", "").split('.')[-1]
def collect(self):
if self.tube in RedisQueue.conditions:
del RedisQueue.conditions[self.tube]
def __del__(self):
del self.rc
class BeanstalkdQueue(object):
conditions = {}
def __init__(self, host='localhost', port=11300, tube='default', timeout=30, items=None, unfinished_tasks=None):
self.bc = beanstalkc.Connection(host, port, connect_timeout=timeout)
self.tube = tube
self.bc.use(self.tube)
self.bc.watch(self.tube)
if self.tube in BeanstalkdQueue.conditions:
pass
else:
BeanstalkdQueue.conditions[self.tube] = {'unfinished_tasks': unfinished_tasks or 0, 'event': threading.Event()}
self.clear()
BeanstalkdQueue.conditions[self.tube]['event'].set()
if items:
for item in items:
self.put(item)
def put(self, item):
priority, methodId, methodName, times, args, kwargs, tid = item
self.bc.put(pickle.dumps({'priority': priority, 'methodId': methodId, 'methodName':methodName,
'times': times, 'args': args, 'kwargs': kwargs, 'tid':tid}), priority=priority)
BeanstalkdQueue.conditions[self.tube]['unfinished_tasks'] += 1
BeanstalkdQueue.conditions[self.tube]['event'].clear()
def get(self, block=True, timeout=0):
item = self.bc.reserve(timeout=timeout)
if item:
item.delete()
item = pickle.loads(item.body)
return (item['priority'], item['methodId'], item['methodName'], item['times'], tuple(item['args']), item['kwargs'], item['tid']), None
else:
return None
def empty(self):
return self.bc.stats_tube(self.tube)['current-jobs-ready'] == 0
def copy(self):
pass
def task_done(self, item, force=False):
if BeanstalkdQueue.conditions[self.tube]['unfinished_tasks'] <= 0:
raise ValueError('task_done() called too many times')
BeanstalkdQueue.conditions[self.tube]['unfinished_tasks'] -= 1
if BeanstalkdQueue.conditions[self.tube]['unfinished_tasks'] == 0 or force:
# if self.empty() or force:
BeanstalkdQueue.conditions[self.tube]['event'].set()
def join(self):
BeanstalkdQueue.conditions[self.tube]['event'].wait()
def clear(self):
while not self.empty():
item = self.get(timeout=10)
del item
def rank(self, weight):
pass
def __repr__(self):
return "<" + str(self.__class__).replace(" ", "").replace("'", "").split('.')[-1]
def collect(self):
if self.tube in BeanstalkdQueue.conditions:
del BeanstalkdQueue.conditions[self.tube]
def __del__(self):
del self.bc
# class LocalQueue(threading.queue.Queue):
# def __new__(cls):
# return threading.queue.Queue.__new__(cls)
def LocalQueue():
def __init__(self, maxsize=None, items=None, unfinished_tasks=None):
self.is_patch = not 'join' in dir(threading.queue.Queue)
self.maxsize = maxsize or 0
self.items = items
self.parent = threading.queue.Queue.__init__(self, maxsize)
if self.is_patch:
from gevent.event import Event
self._cond = Event()
self._cond.set()
if unfinished_tasks:
self.unfinished_tasks = unfinished_tasks
elif items:
self.unfinished_tasks = len(items)
else:
self.unfinished_tasks = 0
if self.is_patch and self.unfinished_tasks:
self._cond.clear()
def _init(self, maxsize):
if self.items:
self.queue = list(items)
else:
self.queue = []
def _put(self, item, heappush=heapq.heappush):
heappush(self.queue, item)
if self.is_patch:
self.unfinished_tasks += 1
self._cond.clear()
def _get(self, heappop=heapq.heappop):
return heappop(self.queue), None
def task_done(self, item, force=False):
if self.is_patch:
if self.unfinished_tasks <= 0:
raise ValueError('task_done() called too many times')
self.unfinished_tasks -= 1
if self.unfinished_tasks == 0 or force:
self._cond.set()
else:
self.all_tasks_done.acquire()
try:
unfinished = self.unfinished_tasks - 1
if unfinished <= 0 or force:
if unfinished < 0:
raise ValueError('task_done() called too many times')
self.all_tasks_done.notify_all()
self.unfinished_tasks = unfinished
finally:
self.all_tasks_done.release()
def join(self):
if self.is_patch:
self._cond.wait()
else:
# self.parent.join()
self.all_tasks_done.acquire()
try:
while self.unfinished_tasks:
self.all_tasks_done.wait()
finally:
self.all_tasks_done.release()
def rank(self, weight):
pass
def collect(self):
pass
return type('PriorityQueue', (threading.queue.Queue, ), {'__init__':__init__, '_init':_init, '_put':_put, '_get':_get, 'task_done':task_done, 'join':join, 'rank':rank, 'collect':collect})
if __name__ == '__main__':
from gevent.queue import JoinableQueue
import gevent
q = LocalQueue()
def worker():
while True:
item = q.get()
try:
gevent.sleep(5)
finally:
q.task_done()
def consume():
for i in range(10):
gevent.spawn(worker)
def produce():
for item in range(20):
q.put((20 - item, item))
gevent.sleep(0.1)
import threading
consume()
produce()
# a = threading.Thread(target=consume)
# b = threading.Thread(target=produce)
# a.start()
# b.start()
q.join()
| {"/setup.py": ["/ask/__init__.py"]} |
56,643 | spawn3/python-util | refs/heads/master | /ask/th.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from threading import Thread
class MyThread(Thread):
def __init__(self):
pass | {"/setup.py": ["/ask/__init__.py"]} |
56,644 | spawn3/python-util | refs/heads/master | /stock/lib/tjts.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import os, sys
import argparse
from prettytable import PrettyTable
def stock_list(begin_price, end_price, factor=0.04, partition=4, money=20000):
pt = PrettyTable(['IDX', 'Buy', 'Sell', 'Diff', 'Diff/4', 'Partition', 'Money', 'Stock1', "Stock2"])
pt.hrules = 1
price_list = []
price = begin_price
while price < end_price:
price_list.append(price)
price = price * (1 + factor)
for i, price in enumerate(price_list):
stock = int(money / price)
buy_sell_list = []
for j in range(partition):
price1 = price * (1 + factor * j / partition)
price2 = price * (1 + factor * (j + 1) / partition)
line = u'{:.2f}-{:.2f}'.format(price1, price2)
buy_sell_list.append(line)
pt.add_row([i,
u'{:.2f}'.format(price),
u'{:.2f}'.format(price * (1 + factor)),
u'{:.2f}'.format(price * factor),
u'{:.2f}'.format(price * factor / partition),
'\n'.join(buy_sell_list),
money,
stock,
stock / 100 * 100])
print(pt)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers()
def _stock_list(args):
stock_list(args.begin, args.end, factor=args.factor * 0.01, partition=args.partition, money=args.money)
parser_stock_List = subparsers.add_parser('stock', help='list stock')
parser_stock_List.add_argument('-b', '--begin', required=False, type=float, default=20.98, help="begin_price")
parser_stock_List.add_argument('-e', '--end', required=False, type=float, default=36, help="end_price")
parser_stock_List.add_argument('-f', '--factor', required=False, type=float, default=4, help="factor")
parser_stock_List.add_argument('-p', '--partition', required=False, type=int, default=4, help="partition")
parser_stock_List.add_argument('-m', '--money', required=False, type=float, default=20000, help="money")
parser_stock_List.set_defaults(func=_stock_list)
args = parser.parse_args()
args.func(args)
| {"/setup.py": ["/ask/__init__.py"]} |
56,645 | spawn3/python-util | refs/heads/master | /ask/__init__.py | __version__ = '0.1.0'
VERSION = tuple(map(int, __version__.split('.')))
print '...import package ask'
| {"/setup.py": ["/ask/__init__.py"]} |
56,646 | spawn3/python-util | refs/heads/master | /ask/dt.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import datetime
def get_daystr(dt=None, day=0):
if not dt:
dt = datetime.datetime.now()
dt += datetime.timedelta(days=day)
return '%04d%02d%02d' % (dt.year, dt.month, dt.day)
def total_seconds(delta):
return delta.days * 3600 * 24 + delta.seconds
| {"/setup.py": ["/ask/__init__.py"]} |
56,647 | spawn3/python-util | refs/heads/master | /test/all.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
class TestPool(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_stat(self):
self.assertEqual(0, 1)
def test_list(self):
self.assertEqual(1, 1)
def suite():
s = unittest.TestSuite()
load_from = unittest.defaultTestLoader.loadTestsFromTestCase
for tc in [TestPool]:
s.addTests(load_from(tc))
return s
if __name__ == '__main__':
t = unittest.TextTestRunner()
t.run(suite())
| {"/setup.py": ["/ask/__init__.py"]} |
56,648 | spawn3/python-util | refs/heads/master | /ask/clss/mc.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
class MyMeta(type):
def __new__(cls, name, parents, dct):
print 'A new class named ' + name + ' is going to be created'
return super(MyMeta, cls).__new__(cls, name, parents, dct)
class MyClass(object):
__metaclass__ = MyMeta
| {"/setup.py": ["/ask/__init__.py"]} |
56,649 | spawn3/python-util | refs/heads/master | /lich/all.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
import unittest
import test_pool
import test_volume
import test_snapshot
class TestPool(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_stat(self):
self.assertEqual(0, 0)
def test_list(self):
self.assertEqual(1, 1)
if __name__ == '__main__':
t = unittest.TextTestRunner()
t.run(test_pool.suite())
t.run(test_volume.suite())
t.run(test_snapshot.suite())
| {"/setup.py": ["/ask/__init__.py"]} |
56,650 | spawn3/python-util | refs/heads/master | /lich/lich/syncump.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
from base import LichBase, RemoteLocation
from runner import http_runner
class LichSyncumpParam(RemoteLocation):
def __init__(self, host_ip=None):
super(LichSyncumpParam, self).__init__(host_ip)
class LichSyncump(LichBase):
@http_runner()
def get_network(self, param):
cmd = '%s get_network ' % (self.lich_syncump)
return cmd
@http_runner()
def iscsi_port(self, param, port="check"):
cmd = "%s iscsi_port --port %s" % (self.lich_syncump, port)
return cmd
@http_runner()
def eth_hosts(self, param, host=None):
host = host or param.host_ip
cmd = "%s etc_hosts --host %s" % (self.lich_syncump, host)
return cmd
if __name__ == '__main__':
param = LichSyncumpParam(host_ip='192.168.120.211', dev_name='/dev/vda')
node = LichSyncump()
# print disk.add(param)
| {"/setup.py": ["/ask/__init__.py"]} |
56,651 | spawn3/python-util | refs/heads/master | /spider/gdc/grab.py | #!/usr/bin/env python
# coding=utf-8
import time, datetime, copy
import os, sys, json
import random
import traceback
from model.setting import withData, datacfg, WORKNUM, WORKQUEUE
from model.log import ProxyLog, Statistics, Log
from model.data import Proxy
from webcrawl.request import PROXY, requGet, requPost
from webcrawl.task import Workflows, DataQueue
from setting import USER, SECRET, HOST
from log import Producer
import task
DataQueue.update(**WORKQUEUE)
LIMIT = 600
INIT = """#!/usr/bin/env python
# coding=utf-8
"""
@withData(datacfg.W, resutype='DICT', autocommit=True)
def choose():
limit = datetime.datetime.now() - datetime.timedelta(days=3)
# proxys = dbpc.handler.queryAll(""" select * from grab_proxy where usespeed < 1 and update_time > '2015-12-15 01:11:00' order by usespeed asc, refspeed asc limit 200; """)
# proxys = Proxy.queryAll({'$and':[{'usespeed':{'$lt':1}}, {'usespeed':{'$gt':'2015-12-15 01:11:00'}}]}, sort=[('usespeed', 1), ('refspeed', 1)], skip=0, limit=200)
# proxys = dbpc.handler.queryAll(""" select * from grab_proxy where usespeed < 1 and update_time > '2015-12-15 01:11:00' order by update_time desc; """)
proxys = Proxy.queryAll({'$and':[{'usespeed':{'$lt':1}}, {'usespeed':{'$gt':'2015-12-15 01:11:00'}}]}, sort=[('update_time', -1)])
# return random.choice(proxys)
return proxys[0]
@withData(datacfg.W, resutype='DICT', autocommit=True)
def log(pid, elapse):
create_time = datetime.datetime.now()
proxylog = ProxyLog(pid=pid, elapse=elapse, create_time=create_time)
ProxyLog.insert(proxylog)
proxy = Proxy.queryOne({'_id':pid})
proxy['usespeed'] = (proxy['usespeed'] * proxy['usenum'] + elapse)/float(proxy['usenum']+1)
proxy['usenum'] = proxy['usenum'] + 1
Proxy.update({'_id':pid}, {'$set':{'usespeed':proxy['usespeed'], 'usenum':proxy['usenum'], 'update_time':create_time}})
# PROXY.use = True
# PROXY.choose = choose
# PROXY.log = log
# PROXY.worker.start()
@withData(datacfg.W, resutype='DICT', autocommit=True)
def record(tid, succ, fail, timeout, elapse=None, sname=None, create_time=None):
create_time = create_time or datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')
if sname is None:
statistics = Statistics(tid=tid, succ=succ, fail=fail, timeout=timeout, elapse=elapse, create_time=create_time)
return Statistics.insert(statistics)
else:
log = Log(gsid=tid, sname=sname, succ=succ, fail=fail, timeout=timeout, create_time=create_time)
Log.insert(log)
def stat(task, spider, create_time=None):
create_time = create_time or datetime.datetime.now()
gsid = record(task['_id'], spider.stat['total']['succ'], spider.stat['total']['fail'], spider.stat['total']['timeout'], elapse=spider.totaltime, create_time=create_time)
for name in spider.stat.keys():
if not name == 'total':
record(gsid, spider.stat[name]['succ'], spider.stat[name]['fail'], spider.stat[name]['timeout'], sname=name, create_time=create_time)
def schedule():
user_id = 0
condition = {'status':{'$gt':0}}
projection = {'_id':1, 'type':1, 'period':1, 'aid':1, 'sid':1, 'flow':1, 'params':1, 'worknum':1, 'queuetype':1, 'worktype':1, 'timeout':1, 'category':1, 'tag':1, 'name':1, 'extra':1, 'update_time':1, 'push_url':1}
tasks = requPost('%sgds/api/task' % HOST, {'condition':json.dumps(condition), 'projection':json.dumps(projection), 'limit':'all'}, format='JSON')
tasks = tasks['task']
for task in tasks:
projection = {'step':1, 'index':1, 'additions':1}
section = requPost('%sgds/api/section/%s' % (HOST, str(task['sid'])), {'projection':json.dumps(projection), 'limit':'one'}, format='JSON')
section = section['section']
projection = {'uid':1, 'filepath':1, 'name':1, 'clsname':1, 'filepath':1, 'fileupdate':1}
article = requPost('%sgds/api/article/%s' % (HOST, str(task['aid'])), {'projection':json.dumps(projection), 'limit':'one'}, format='JSON')
article = article['article']
if article['fileupdate']:
result = requGet('%sgds/static/exe/%s' % (HOST, article['filepath']), format='TEXT')
filepath = article['filepath']
fi = open(filepath, 'w')
fi.write(result)
fi.close()
requPost('%sgds/api/article/%s' % (HOST, str(task['aid'])), {'data':json.dumps({'fileupdate':0})})
projection = {'name':1, 'filepath':1, 'fileupdate':1, 'dmid':1}
unit = requPost('%sgds/api/unit/%s' % (HOST, str(article['uid'])), {'projection':json.dumps(projection), 'limit':'one'}, format='JSON')
unit = unit['unit']
if unit['fileupdate']:
result = requGet('%sgds/static/exe/%s' % (HOST, unit['filepath']), format='TEXT')
filepath = unit['filepath']
fi = open(filepath, 'w')
fi.write(result)
fi.close()
fi = open(os.path.join(os.path.dirname(os.path.abspath(filepath)), "__init__.py"), 'w')
fi.write('#!/usr/bin/env python\n# coding=utf8')
fi.close()
requPost('%sgds/api/unit/%s' % (HOST, str(article['uid'])), {'data':json.dumps({'fileupdate':0})})
filepath = os.path.join(os.path.dirname(filepath), '__init__.py')
if not os.path.exists(filepath):
fi = open(filepath, 'w')
fi.write(INIT)
fi.close()
projection = {'filepath':1, 'fileupdate':1}
datamodel = requPost('%sgds/api/datamodel/%s' % (HOST, str(unit['dmid'])), {'projection':json.dumps(projection), 'limit':'one'}, format='JSON')
datamodel = datamodel['datamodel']
if datamodel['fileupdate']:
result = requGet('%sgds/static/exe/%s' % (HOST, datamodel['filepath']), format='TEXT')
filepath = datamodel['filepath']
fi = open(filepath, 'w')
fi.write(result)
fi.close()
requPost('%sgds/api/datamodel/%s' % (HOST, str(unit['dmid'])), {'data':json.dumps({'fileupdate':0})})
task['step'] = section['step']
task['index'] = section['index']
task['additions'] = section.get('additions') or '{}'
task['filepath'] = article['filepath']
task['article'] = article['clsname']
task['unit'] = unit['name']
return tasks
def changestate(tid, status, extra=None):
requPost('%sgds/api/task/%s' % (HOST, str(tid)), {'data':json.dumps({'status':status})})
def task():
workflow = Workflows(WORKNUM, 'R', 'THREAD')
workflow.start()
last_stat = datetime.datetime.now()
local_spider = {}
while True:
for task in schedule():
module_name = task['filepath'].replace('.py', '').replace('/', '.')
task['update_time'] = datetime.datetime.strptime(task['update_time'], '%Y-%m-%d %H:%M:%S')
cls_name = task['article']
module = __import__(module_name, fromlist=['task.%s' % task['unit']])
cls = getattr(module, cls_name)
if task.get('type', 'FOREVER') == 'FOREVER':
spider = local_spider.get(cls_name, None)
if spider is None:
spider = cls(worknum=20, queuetype='R', worktype='THREAD', tid=int(task['_id']))
local_spider[cls_name] = spider
else:
spider = cls(worknum=task['worknum'], queuetype=task['queuetype'], worktype=task['worktype'], tid=int(task['_id']))
try:
changestate(task['_id'], 2)
step = task.get('step', 1) - 1
additions = {}
additions['name'] = task['name']
additions['cat'] = task['category'].split(',')
additions['tag'] = task['tag'].split(',')
additions = dict(json.loads(task['additions']), **additions)
if task.get('type', 'FOREVER') == 'FOREVER':
if ((datetime.datetime.now() - task['update_time']).seconds)/3600 < task.get('period', 12):
continue
weight = spider.weight(task['flow'], once=True)
section = spider.section(task['flow'], step)
if task['params'] is None or task['params'].strip() == '':
workflow.task(weight, section, task['_id'], **{'additions':additions})
elif task['params'].startswith('{'):
workflow.task(weight, section, task['_id'], **dict(json.loads(task['params']), **{'additions':additions}))
elif task['params'].startswith('('):
workflow.task(weight, section, task['_id'], *tuple(task['params'][1:-1].split(',')), **{'additions':additions})
else:
if task['index'] is None or task['index'].isdigit():
workflow.task(weight, section, task['_id'], task['params'], **{'additions':additions})
else:
workflow.task(weight, section, task['_id'], **{task['index']:task['params'], 'additions':additions})
else:
if task['params'] is None or task['params'].strip() == '':
spider.fetchDatas(task['flow'], step, **{'additions':additions})
elif task['params'].startswith('{'):
spider.fetchDatas(task['flow'], step, **dict(json.loads(task['params']), **{'additions':additions}))
elif task['params'].startswith('('):
spider.fetchDatas(task['flow'], step, *tuple(task['params'][1:-1].split(',')), **{'additions':additions})
else:
if task['index'] is None or task['index'].isdigit():
spider.fetchDatas(task['flow'], step, task['params'], **{'additions':additions})
else:
spider.fetchDatas(task['flow'], step, **{task['index']:task['params'], 'additions':additions})
spider.statistic()
changestate(task['_id'], 0)
if task.get('push_url') is not None:
requPost(task['push_url'], {'type':'video', 'tid':task['_id']})
except:
t, v, b = sys.exc_info()
err_messages = traceback.format_exception(t, v, b)
extra = ','.join(err_messages)
print extra
changestate(task['_id'], 3, extra=extra)
else:
if not task.get('type', 'FOREVER') == 'FOREVER':
stat(task, spider)
finally:
if ((datetime.datetime.now() - last_stat).seconds) >= LIMIT:
last_stat = datetime.datetime.now()
for spider in local_spider.values():
spider.statistic()
stat(task, spider, last_stat)
time.sleep(60)
if __name__ == '__main__':
task()
| {"/setup.py": ["/ask/__init__.py"]} |
56,652 | spawn3/python-util | refs/heads/master | /lich/test_volume.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from pprint import pprint
from lich.umptypes import UmpPath
from base import TestBase
class TestAll(TestBase):
def setUp(self):
super(TestAll, self).setUp()
self.pool_name = UmpPath('volume.a')
self.volume_name = UmpPath('volume.a/b')
self.size = 1024*1024*1024
self.del_volume(self.volume_name)
self.del_pool(self.pool_name)
self.create_pool(self.pool_name)
# self.create_volume(self.volume_name, self.size)
def tearDown(self):
# self.del_volume(self.volume_name)
self.del_pool(self.pool_name)
pass
def test_delete(self):
self.create_volume(self.volume_name, self.size)
self.del_volume(self.volume_name)
def test_stat(self):
self.create_volume(self.volume_name, self.size)
self.stat_volume(self.volume_name)
self.del_volume(self.volume_name)
def suite():
s = unittest.TestSuite()
load_from = unittest.defaultTestLoader.loadTestsFromTestCase
for tc in [TestAll]:
s.addTests(load_from(tc))
return s
if __name__ == '__main__':
unittest.main()
| {"/setup.py": ["/ask/__init__.py"]} |
56,653 | spawn3/python-util | refs/heads/master | /ask/io.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import redis
def test_file():
with open('io.py') as f:
for line in f:
line = line.strip('\n')
print line
| {"/setup.py": ["/ask/__init__.py"]} |
56,654 | spawn3/python-util | refs/heads/master | /ask/object_cache.py | # -*- coding: UTF-8 -*-
import objcache
class ObjectCache(objcache.ObjectCache):
def __init__(self):
super(ObjectCache, self).__init__()
def cache_get_or_create(name, load_func, expire=600):
cache_name = name + 'Cache'
if not ObjectCache.exists(cache_name):
ObjectCache.create(load_func, name=cache_name, expire=expire)
return ObjectCache.get(cache_name)
| {"/setup.py": ["/ask/__init__.py"]} |
56,655 | spawn3/python-util | refs/heads/master | /lich/config.py | #!/usr/bin/env python2
# -*- coding: utf-8 -*-
class VolumeConfig(object):
def __init__(self, name, snapshots=['snap_01'], cloned_volumes=['clone_01']):
self.name = name
self.snapshots = snapshots
self.cloned_volumes = cloned_volumes
self.size = 1024*1024*1024
self.new_size = 2 * self.size
@property
def pool_name(self):
idx = self.name.find('/')
return self.name[:idx]
VOLUMES = [
VolumeConfig('a/b')
]
| {"/setup.py": ["/ask/__init__.py"]} |
56,656 | spawn3/python-util | refs/heads/master | /ask/web.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tornado
import tornado.ioloop
import tornado.web
import tornado.httpclient
import tornado.gen
from tornado.httpserver import HTTPServer
class BaseHandler(tornado.web.RequestHandler):
def prepare(self):
pass
def initialize(self):
pass
def get_current_user(self):
pass
def write_error(self, status_code, **kwargs):
pass
def send_error(self, status_code=500, **kwargs):
pass
def on_finish(self):
pass
class MainHandler(BaseHandler):
def get(self):
# self.write("Hello, World")
self.set_header('Content-Type', 'text/json')
self.write({'code': 0, 'msg': 'hello'})
class AsyncHandler(BaseHandler):
@tornado.web.asynchronous
def get(self):
url = 'http://so.picasso.adesk.com/emoji/v1/hits'
http = tornado.httpclient.AsyncHTTPClient()
http.fetch(url, callback=self.on_response)
def on_response(self, response):
if response.error:
raise tornado.web.HTTPError(500)
json = tornado.escape.json_decode(response.body)
self.write(json)
self.finish()
class GenHandler(BaseHandler):
@tornado.gen.coroutine
def get(self):
url = 'http://so.picasso.adesk.com/emoji/v1/hits'
http = tornado.httpclient.AsyncHTTPClient()
response = yield http.fetch(url)
json = tornado.escape.json_decode(response.body)
self.write(json)
def make_app():
""" global configuration, including the routing table that maps requests to handlers.
:return:
"""
return tornado.web.Application([
(r'/', MainHandler),
(r'/async', AsyncHandler),
(r'/gen', GenHandler),
])
if __name__ == '__main__':
print 'Listening 8888...'
app = make_app()
app.listen(8888)
tornado.ioloop.IOLoop.current().start()
| {"/setup.py": ["/ask/__init__.py"]} |
56,657 | spawn3/python-util | refs/heads/master | /ask/func/decorator.py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import functools
enable_tracing = True
if enable_tracing:
debug_log = open('debug.log', 'w')
def trace(func):
if enable_tracing:
def callf(*args, **kwargs):
debug_log.write('Calling %s: %s, %s\n' % (func.__name__, args, kwargs))
r = func(*args, **kwargs)
debug_log.write('%s returned %s\n' % (func.__name__, r))
return r
return callf
else:
return func
@trace
def square(x):
return x * x
def wrap(func):
@functools.wraps(func)
def call(*args, **kwargs):
return func(*args, **kwargs)
return call
if __name__ == '__main__':
square(1)
| {"/setup.py": ["/ask/__init__.py"]} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.