text stringlengths 38 1.54M |
|---|
import os
import re
import tempfile
from io import BytesIO
import networkx as nx
import requests
from bs4 import BeautifulSoup
ACCEPTED_CLASSES = [
"Actinopterygii",
"Amphibia",
"Aves",
"Insecta",
"Mammalia",
"Reptilia",
]
BASE_CLASS_URL = "https://github.com/bansallab/asnr/tree/master/Networks"
GITHUB_URL = "https://github.com"
BASE_RAW_GRAPHML = "https://raw.githubusercontent.com"
def urlopen(*args, **kwargs):
"""
Lazy-import wrapper for stdlib urlopen, as that imports a big chunk of
the stdlib.
"""
import urllib.request
return urllib.request.urlopen(*args, **kwargs)
class SpeciesGraph(object):
def __init__(self, species, graph_link):
self.species = species
self._graph_link = graph_link
self._graph = None
def create_graph(self):
url = BASE_RAW_GRAPHML + self._graph_link.replace("/blob", "")
req = urlopen(url)
reader = BytesIO(req.read())
req.close()
tmpdir = tempfile.mkdtemp()
filename = "temp_{}.graphml".format(url.replace("/", "_"))
path = os.path.join(tmpdir, filename)
with open(path, "wb") as tmp:
tmp.write(reader.getbuffer())
g = nx.read_graphml(path)
os.remove(path)
os.rmdir(tmpdir)
self._graph = g
@property
def graph(self):
if self._graph is None:
self.create_graph()
return self._graph
class ASNRReader(object):
def __init__(self):
self._species_data = None
self.base_url = "https://github.com/bansallab/asnr/tree/master/Networks"
@staticmethod
def create_metadata(link):
metadata_url = GITHUB_URL + link
resp = requests.get(metadata_url)
soup = BeautifulSoup(resp.text, "html.parser")
keys = [re.sub("<.*?>", "", str(i)) for i in soup.find_all("td")[2:][0:][::2]]
values = [re.sub("<.*?>", "", str(i)) for i in soup.find_all("td")[2:][1:][::2]]
metadata = dict(zip(keys, values))
return metadata
@staticmethod
def find_links(url):
resp = requests.get(url)
soup = BeautifulSoup(resp.text, "html.parser")
links = soup.find_all("a", {"class": "js-navigation-open link-gray-dark"})
return list(map(lambda x: x["href"], links))
def create_species_graphs(self, species, link):
url = GITHUB_URL + link
links = self.find_links(url)[1:]
species_graphs = list(map(lambda x: SpeciesGraph(species, x), links))
return species_graphs
def create_species_graph_dict(self, links, species_class):
graph_dict = {}
for link in links:
value_dict = {}
key = link.split("{}/".format(species_class))[1].split("_")[0]
value_dict["graphs"] = self.create_species_graphs(key, link)
value_dict["metadata"] = self.create_metadata(link)
graph_dict[key] = value_dict
return graph_dict
def find_class_urls(self, species_class):
url = self.base_url + "/" + species_class
links = self.find_links(url)
return links
def create_species_data(self):
species_data = {}
print("Gathering data from the ASNR Repository")
for species_class in ACCEPTED_CLASSES:
class_urls = self.find_class_urls(species_class)
graph_dict = self.create_species_graph_dict(class_urls, species_class)
species_data[species_class] = graph_dict
print("Data is now available")
self._species_data = species_data
return
@property
def species_data(self):
if self._species_data is None:
self.create_species_data()
return self._species_data
|
import turtle
# create a list of colors
rainbow_colors = ['grey', 'yellow', 'green', 'blue', 'purple', 'orange', 'red']
# width for single rainbow color
rainbow_width = 30
# the radius of most inner rainbow color
rainbow_size = 100
# set up turtle painter
painter = turtle.Turtle()
painter.shape('turtle')
painter.pensize(rainbow_width)
# for loop each color and draw semi circle
next_rainbow_radius = rainbow_size
for rainbow_color in rainbow_colors:
painter.penup()
painter.setposition(next_rainbow_radius, 0)
painter.pendown()
painter.setheading(90)
painter.pencolor(rainbow_color)
painter.circle(next_rainbow_radius, 180)
next_rainbow_radius += rainbow_width
turtle.done()
|
import argparse
import codecs
import pickle
import os
import sys
from escapewords import escape_words
from stopwords import stopwords
import stems
#from stemming.porter2 import stem
inverted_index = {}
ignore_list = set(['c++', 'md5', 'sha1', 'sha2', 'sha256', 'sha512'])
def remove_special_chars(word):
if word in ignore_list:
return word
if '%' in word:
return None
if '^' in word:
return None
if '&' in word:
return None
if '*' in word:
return None
if '[' in word or ']' in word:
return None
if '\\' in word:
return None
if '/' in word:
return None
if '|' in word:
return None
if '<' in word or '>' in word:
return None
if '~' in word:
return None
if '@' in word:
return None
if '=' in word:
return None
if '+' in word:
return None
if ':' in word:
return None
if '$' in word:
return None
w = word.replace('-', ' ')
w = w.replace('!', '')
w = w.replace('#', '')
w = w.replace('(', '')
w = w.replace(')', '')
w = w.replace(',', ' ')
w = w.replace(';', ' ')
w = w.replace('"', '')
w = w.replace('`', '')
w = w.replace('1', '')
w = w.replace('2', '')
w = w.replace('3', '')
w = w.replace('4', '')
w = w.replace('5', '')
w = w.replace('6', '')
w = w.replace('7', '')
w = w.replace('8', '')
w = w.replace('9', '')
w = w.replace('0', '')
w = w.replace('?', '')
if '.' in w:
if w[-1] == '.':
w = w.replace('.', '')
else:
w = w.replace('.', ' ')
if "'" in w:
if w[-2] == "'" and w[-1] == 's':
w = w[:-2]
w = w.replace("'", '')
return w
def parse_file(input_file, output=False, stem=True, filter_stopwords=False):
try:
with codecs.open(input_file, 'r', 'latin-1') as ip:
file_name_parts = ip.name.split('/')[-1].split('.')
file_name = file_name_parts[0] + '(' + file_name_parts[1] + ')'
for line in ip:
sys.stdout.write('\n')
for word in line.split():
word = word.lower()
replacements = remove_special_chars(word)
if replacements is None:
continue
for w in replacements.split():
if filter_stopwords and word in stopwords:
continue
if stem and w in stems.stems:
w = stems.stems.get(w)
if output:
sys.stdout.write('%s ' % w)
if w in inverted_index:
doclist = inverted_index[w]
if file_name in doclist:
doclist[file_name] += 1
else:
doclist[file_name] = 1
else:
inverted_index[w] = {}
inverted_index[w][file_name] = 1
except Exception as e:
sys.stderr.write('Failed to open: %s due to %s\n' % (input_file, str(e)))
def parse_dir_tree(rootdir, output=False, stem=True, filter_stopwords=False):
for root, dirs, files in os.walk(rootdir):
for f in files:
parse_file(os.path.join(rootdir, f), output=output,
stem=stem, filter_stopwords=filter_stopwords)
for d in dirs:
parse_dir_tree(os.path.join(rootdir, d))
def save_index():
with open('iindex', 'wb') as output:
pickle.dump(inverted_index, output)
def main():
argparser = argparse.ArgumentParser()
argparser.add_argument('-s', '--source')
argparser.add_argument('-o', '--output', type=bool, default=False)
argparser.add_argument('-f', '--file')
argparser.add_argument('--no-stem', dest="no_stem", default=False)
argparser.add_argument('--filter-stopwords', dest='stop_words', default=False)
args = argparser.parse_args()
if args.file:
parse_file(args.file, output=args.output, filter_stopwords=args.stop_words, stem=not(args.no_stem))
else:
parse_dir_tree(args.source, output=args.output, filter_stopwords=args.stop_words, stem=not(args.no_stem))
save_index()
if __name__ == '__main__':
main()
|
#!/usr/bin/python3
import csv
import itertools
import os
import re
import sys
import tempfile
from collections import OrderedDict
from pathlib import Path
from shutil import copyfile
class bcolors:
PURPLE = '\033[95m'
BLUE = '\033[94m'
GREEN = '\033[92m'
YEL = '\033[93m'
RED = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def printc(string, color=bcolors.RED):
print(color + string + bcolors.ENDC)
def clear():
os.system('cls' if os.name == 'nt' else 'clear')
PRONOUNS = {
'XE': {'M': "he", 'F': 'she', 'N': 'they', 'NAME': 'NAME'},
'XIM': {'M': "him", 'F': 'her', 'N': 'them', 'NAME': 'NAME'},
'XIS': {'M': "his", 'F': 'her', 'N': 'their', 'NAME': "NAME's"},
}
def parse_pronouns(comment, sex='N', name=None):
pronouns = list(PRONOUNS.keys())
comment = replace_nth_with_name(comment, pronouns, 3)
for p in pronouns:
comment = comment.replace(p, PRONOUNS[p][sex])
comment = comment.replace('NAME', name.capitalize())
return comment
def replace_nth_with_name(text, pronouns, n=3):
# p = '|'.join(pronouns)
# pattern = '(' + p + ')'
# # https://stackoverflow.com/a/46705842/2700631
# replaced = re.sub(pattern, lambda m, c=itertools.count(): m.group() if next(c) % n else 'NAME', text)
new_text = ""
count = 0
for word in text.split():
if word in pronouns:
if count % n == 0:
new_text += PRONOUNS[word]['NAME']
else:
new_text += word
count += 1
else:
new_text += word
if word == 'NAME':
count = 1 # reset counter
new_text += " "
return new_text
def cap_matches(match):
return match.group().capitalize()
def capitalize_sentences(text):
# https://stackoverflow.com/a/22801162/2700631
p = re.compile(r'((?<=[\.\?!]\s)(\w+)|(^\w+)|(?<=\)\s)(\w+))')
return p.sub(cap_matches, text)
class Student:
def __init__(self, firstname, lastname, sex):
self.firstname = firstname
self.lastname = lastname
self.sex = sex
self.comments = []
def comment_string(self):
s = ""
for i, c in enumerate(self.comments):
s += "({}{}{}) {} ".format(bcolors.BLUE, i, bcolors.ENDC, c)
s = parse_pronouns(s, self.sex, self.firstname) if s else s
return capitalize_sentences(s)
def final_comment_string(self):
s = ' '.join(self.comments)
s = parse_pronouns(s, self.sex, self.firstname) if s else s
return capitalize_sentences(s)
class CommentGenerator:
def __init__(self):
self.comment_file = None
self.students = self.generate_students(self.get_student_list())
self.comments = self.get_comment_dict()
self.save_file = "saved_comments.txt"
def generate_students(self, student_list):
students = []
for s in student_list:
student = Student(s["FIRST NAME"], s["LAST NAME"], s["SEX"])
students.append(student)
return students
def get_student_list(self):
while True:
print("Student data file should be csv with at least the following fields: "
"FIRST NAME, LAST NAME, SEX")
filename = input("Student data file path (default is ~/StudentList.csv): ")
if filename == '':
filename = "~/StudentList.csv"
if filename[0] is '~': # expand ~ to home dir
home_path = str(Path.home())
filename = filename.replace('~', home_path)
# print("Looking for: {}".format(filename))
try:
with open(filename) as f:
f.readline() # skip first row with is just column number, 2nd row is headings.
csv_reader = csv.DictReader(f)
csv_reader.fieldnames = [field.strip().upper() for field in csv_reader.fieldnames]
student_list = list(csv_reader)
for i, student in enumerate(student_list):
student_list[i] = {k: v.strip() for k, v in student.items() if k} # strip whitespace and remove empty keys
return student_list
except FileNotFoundError:
print("File not found... try again?")
def get_comment_dict(self):
while True:
filename = input("Enter comment file path, or [Enter] for default: ")
if filename == '':
filename = "comments.txt"
elif filename[0] is '~': # expand ~ to home dir
home_path = str(Path.home())
filename = filename.replace('~', home_path)
# print("Looking for: {}".format(filename))
try:
comment_dict_import = OrderedDict()
with open(filename) as f:
# read the first line separately so we can set an initial category
line = f.readline().strip()
current_list = []
if line and line[0] != '#':
current_category = "GENERAL"
current_list.append(line)
else:
current_category = line[1:].strip() # remove hash and whitespace
# we have a category, let's do the rest now.
for line in f:
line = line.strip()
if line and line[0] == '#': # comment heading
# save the current list with the previous heading and start a new one.
comment_dict_import[current_category] = current_list
current_category = line[1:].strip() # remove hash
current_list = [] # start a new list for the new category
elif line:
current_list.append(line)
# add the last category
comment_dict_import[current_category] = current_list
self.comment_file = filename # save so we can add to it later!
return comment_dict_import
except FileNotFoundError:
print("File not found... try again?")
def insert_into_comment_file(self, comment, category):
# backup
copyfile(self.comment_file, "{}_bu".format(self.comment_file))
with open(self.comment_file) as f:
original = f.readlines()
with open(self.comment_file, 'w') as f:
found_category = False
inserted = False
for line in original:
if not inserted:
if not found_category and line[1:].strip() == category:
found_category = True
elif found_category and line[0] == '#': # add before the next category (at end of chosen category)
f.write(comment + "\n")
inserted = True
f.write(line)
if not inserted: # last category?
f.write(comment + "\n")
def remove_comment_from_file(self, category):
index = int(input("Which comment template do you want to delete? "))
comment_to_remove = self.comments[category][index]
print(comment_to_remove)
confirm = input("Are you sure you want to delete this template? ")
if confirm == 'y':
with open(self.comment_file) as f:
original = f.readlines()
with open(self.comment_file, 'w') as f:
for line in original:
if line.strip() != comment_to_remove.strip():
f.write(line + "\n")
else: # found the line, also remove from current list
self.comments[category].remove(comment_to_remove)
def custom_comment(self, category, save_option=True):
comment = input("Enter your custom comment (use XE XIS and XIM to allow for easy reuse): ")
prompt = "Do you want to save this comment for future use under the {} category? (y) or n: ".format(category)
gotta_save = input(prompt) if save_option else 'n'
if gotta_save == '' or gotta_save.lower()[0] != 'n':
self.insert_into_comment_file(comment, category)
self.comments[category].append(comment)
return comment
def remove_comment(self, student):
self.print_header(student)
index = input("Which comment do you want to remove (a for all)? ")
if index == 'a':
student.comments = []
student.comments.pop(int(index))
def move_comment(self, student):
self.print_header(student)
index = int(input("Which comment do you want to move? "))
comment = student.comments.pop(index)
print(student.comment_string())
index = int(input("Move it before which comment? "))
student.comments.insert(index, comment)
def update_gender(self, student):
self.print_header(student)
choice = input("Choose a gender: M, F, or N (neutral): ")
if choice.upper() in ['M', 'F', 'N']:
student.sex = choice.upper()
def update_name(self, student):
self.print_header(student)
choice = input("First name to use in comments: ")
student.firstname = choice
def save(self):
with open(self.save_file, 'w') as f:
for student in self.students:
f.write(student.firstname.upper() + " " + student.lastname.upper() + "\n")
f.write(student.final_comment_string())
f.write("\n\n")
def get_category(self, index):
return list(self.comments.keys())[index]
def get_categories(self):
return list(self.comments.keys())
def print_header(self, student):
clear()
print("Generating comments for: {}{} {}{} ({}):".
format(bcolors.PURPLE, student.firstname, student.lastname, bcolors.ENDC, student.sex))
print(student.comment_string())
printc(student.final_comment_string())
def choose_comment(self, student):
try:
categories = self.get_categories()
self.print_header(student)
print("Choose a comment category")
for i, cat in enumerate(categories):
print("{}: {}".format(i, cat))
# print("c: CUSTOM")
print("-------- OR --------")
print("change (g)ender or (n)ame | "
"(c)ustom comment | "
"(r)emove or (m)ove a comment | "
"(s)ave and ne(x)t or save and (q)uit"
)
choice = input()
if choice == 'q':
self.save()
clear()
sys.exit()
elif choice == 's':
self.save()
return None, "complete"
elif choice == 'x':
return None, "complete"
elif choice == 'r':
self.remove_comment(student)
return None, "continue"
elif choice == 'c':
return self.custom_comment(category=None, save_option=False), "new"
elif choice == 'g':
self.update_gender(student)
return None, "continue"
elif choice == 'n':
self.update_name(student)
return None, "continue"
elif choice == 'm':
self.move_comment(student)
return None, "continue"
else:
choice = int(choice)
self.print_header(student)
category = self.get_category(choice)
print("Choose a {} Comment:".format(category))
for i, com in enumerate(self.comments[category]):
print("{}: {}".format(i, com))
print("-------- OR --------")
print("a: Add a new templated comment")
print("r: remove a templated comment")
print("b: Go back")
choice = input()
if choice == 'a':
return self.custom_comment(category), "new"
if choice == 'r':
self.remove_comment_from_file(category)
return None, "continue"
elif choice == 'b':
return None, "continue"
choice = int(choice)
return self.comments[category][choice], "new"
except (IndexError, ValueError):
print("That wasn't a valid option!")
return None, "continue"
def generate_comments(self, student):
adding_comments = True
while adding_comments:
comment, status = self.choose_comment(student)
if status == "new":
student.comments.append(comment)
elif status == "complete":
adding_comments = False
def run(self):
for student in self.students:
self.generate_comments(student)
print("**** REPORT CARD COMMENT GENERATOR 2000 ****")
cg = CommentGenerator()
cg.run()
|
import torch.nn as nn
import torch
class Biaffine(nn.Module):
def __init__(self,n_input,n_output=1):
super().__init__()
self.n_in=n_input
self.n_out=n_output
self.weight=nn.Parameter(torch.Tensor(self.n_out,self.n_in+1,self.n_in))#1x501x500
self.reset_parameters()
def reset_parameters(self):
nn.init.zeros_(self.weight)
def forward(self,x,y):
'''
:param x:mlp_d [batch_size, seq_len, n_in]
:param y:mlp_h [batch_size, seq_len, n_in]
:return:s:[batch_size, n_out, seq_len, seq_len]
If n_out is 1, size of n_out will be squeezed automatically.
'''
x = torch.cat((x, torch.ones_like(x[..., :1])), -1)
#x*w*Y^T 求的是s[i][j]j->i的弧分数
s = torch.einsum('bxi,oij,byj->boxy', x, self.weight, y)
s = s.squeeze(1)
return s
|
import scrapy
import csv
import os
class OilTraceSpider(scrapy.Spider):
name = "oiltrace"
def start_requests(self):
urls = [
'https://g1.globo.com/natureza/noticia/2019/10/08/lista-de-praias-atingidas-pelas-manchas-de-oleo-no-nordeste.ghtml'
]
for url in urls:
yield scrapy.Request(url=url, callback=self.parse)
def parse(self, response):
source_path = '/'.join(os.getcwd().split('/')[:-1])
with open(source_path+'/files/oilstory.csv', 'w') as csv_file:
wr = csv.writer(csv_file, delimiter=',', quoting=csv.QUOTE_ALL)
for row in response.xpath('//tr'):
list_row = row.css('td::text').getall()
wr.writerow(list_row)
|
from datetime import datetime
from django.core.management import BaseCommand
from core.facade import get_usd_cny_exchange
from core.models import CotacoesMoedas
class Command(BaseCommand):
help = '''Atualiza cotações no banco de dados'''
def handle(self, *args, **options):
cotacoes_moedas = get_usd_cny_exchange()
for cotacao in cotacoes_moedas['cotacao_cny']:
data = datetime.strptime(cotacao["data"], "%d/%m/%Y")
valor = cotacao["valor"]
try:
obj = CotacoesMoedas.objects.get(date=data)
obj.cny = valor
obj.save()
except CotacoesMoedas.DoesNotExist:
new_obj = CotacoesMoedas.objects.create(date=data, cny=valor)
new_obj.save()
for cotacao in cotacoes_moedas['cotacao_brl']:
data = datetime.strptime(cotacao["data"], "%d/%m/%Y")
valor = cotacao["valor"]
try:
obj = CotacoesMoedas.objects.get(date=data)
obj.usd = valor
obj.save()
except CotacoesMoedas.DoesNotExist:
new_obj = CotacoesMoedas.objects.create(date=data, usd=valor)
new_obj.save()
|
from gtav_properties import properties, columns
import os
out = open('out.html', "w+")
for p in properties:
d = dict(zip(columns, p))
out.write("""%s (%s) """ % (d["personid"], d["gender"]))
for idx in xrange(1, 5):#d['numimages']+1):
fname = "jpgs/%s_%03d.jpg" % (d["personid"], idx)
out.write("""<img src="%s" />""" % (fname))
if not os.path.exists(fname):
print "%s does not exist" % fname
out.write("""<br><hr><br><br><br>\n""")
out.close()
|
#!/usr/bin/env python
# /data3/wk/MPTopo/src/select_paircmp.py
import os
import sys
import libtopologycmp as lcmp
import myfunc
import copy
import subprocess
DEBUG_UNMAPPED_TM_POSITION = 0
BLOCK_SIZE = 100000
progname = os.path.basename(sys.argv[0])
usage="""
Usage: %s paircmp-file [-o OUTFILE]
Description:
Select paircmp file based on filtering schemes
OPTIONS:
-o OUTFILE Output the result to file
-q Quiet mode
-h, --help Print this help message and exit
-tableinfo FILE Set pairwise alignment table info, get more pairwise
statistics
-seqidttype INT Set sequence identity type, (default: 1)
0: seqIDT = numIDTRes /alnLength
1: seqIDT = numIDTRes / min(len1, len2)
2: seqIDT = numIDTRes / (alnLength - NumGAP)
Note: if seqidttype = 1 or 2, tableinfo file must be set
-alignrange STR Select alignment with different alignment ranges
all, full, part, (default: all)
-cmpclass STR cmpclass
-min-seqidt FLOAT (default: 0)
-max-seqidt FLOAT (default: 100)
-signalp FILE signalp file
-rmsp Remove pairs with signalp
-restrictidlist FILE Set restriction seq idlist
Created 2013-06-26, updated 2013-08-13, Nanjiang Shu
Example:
%s t1.paircmp -tableinfo t1.tableinfo -seqidttype 1 -alignrange full -cmpclass TM2SEQ -cmpclass TM2GAP_AND_TM2SEQ
"""%(progname, progname)
def PrintHelp():#{{{
print usage
#}}}
def AddTableInfo(recordList, pairalnStat):#{{{
if pairalnStat != {}:
for record in recordList:
pairid = record['id1'] + '-' + record['id2']
if pairid in pairalnStat:
record['seqidt1'] = pairalnStat[pairid]['seqidt1']
record['seqidt2'] = pairalnStat[pairid]['seqidt2']
# print pairid, record['seqidt1']
#}}}
def AddSignalPInfo(recordList, signalpDict): #{{{
if signalpDict != {}:
for record in recordList:
id1 = record['id1']
id2 = record['id2']
try:
record['sp1'] = signalpDict[id1]
except KeyError:
record['sp1'] = -1
try:
record['sp2'] = signalpDict[id2]
except KeyError:
record['sp2'] = -1
#}}}
def AddDupInfo(recordList, dupPairSet):#{{{
for record in recordList:
id1 = record['id1']
id2 = record['id2']
if (id1,id2) in dupPairSet or (id2,id1) in dupPairSet:
record['isDup'] = True
else:
record['isDup'] = False
#}}}
def FilterPairCmpResult(recordList, cmpclassList, rltyDict, #{{{
restrictIDSet):
"""
Filter paircmp result by g_params
return newList
"""
newList = []
pairListSet = set([])
numInputRecord = len(recordList)
seqidttype = g_params['seqidttype']
isRemoveSignalP = g_params['isRemoveSignalP']
isRemoveDup = g_params['isRemoveDup']
for record in recordList:
# print record['id1'], record['seqidt1']
# print record['id2'], record['seqidt1']
if record == {}:
continue
id1 = record['id1']
id2 = record['id2']
if ((g_params['isRestrictIDListSet'] == True) and
((not id1 in restrictIDSet)
or (not id2 in restrictIDSet))):
continue
if isRemoveSignalP:
if 'sp1' in record and record['sp1'] != -1:
continue
if 'sp2' in record and record['sp2'] != -1:
continue
if isRemoveDup:
if 'isDup' in record and record['isDup']:
continue
if record['isLocalAlignment'] and g_params['alignrange'] != 'all':
if record['alignrange'] != g_params['alignrange']:
continue
if rltyDict != {}:
if id1 in rltyDict:
rlty = rltyDict[id1]
# print "rlty[%s]=%.1f"%(id1, rlty)
if rlty < g_params['minRLTY'] or rlty > g_params['maxRLTY']:
continue
if id2 in rltyDict:
# print "rlty[%s]=%.1f"%(id1, rlty)
if rlty < g_params['minRLTY'] or rlty > g_params['maxRLTY']:
continue
seqidt = lcmp.GetSeqIDT(record, seqidttype)
if (seqidt < g_params['minSeqIDT'] or seqidt >=
g_params['maxSeqIDT']):
continue
if len(cmpclassList) == 0 or record['cmpclass'] in cmpclassList:
newList.append(record)
if g_params['isDEBUG']:
if numOutputRecord < numInputRecord:
print "%d pairs dropped" % (numInputRecord-numOutputRecord)
return newList
#}}}
def GetSeqIDTGroupIndex(seqidt, seqIDTGroupList):#{{{
numGroup = len(seqIDTGroupList)/2
for i in xrange(numGroup):
if seqidt >= seqIDTGroupList[i*2] and seqidt < seqIDTGroupList[i*2+1]:
return i
return numGroup
#}}}
def main(g_params):#{{{
argv = sys.argv
numArgv=len(sys.argv)
if numArgv < 2:
PrintHelp()
return 1
infile = ""
outpath = "./"
isQuiet = False
tableinfoFile = ""
cmpclassList = []
restrictIDListFile = ""
signalpFile = ""
dupFile = ""
outfile = ""
i = 1
isNonOptionArg=False
while i < numArgv:
if isNonOptionArg == True:
infile = sys.argv[i]
isNonOptionArg = False
i += 1
elif sys.argv[i] == "--":
isNonOptionArg=True
i += 1
elif sys.argv[i][0] == "-":
if sys.argv[i] in [ "-h", "--help"]:
PrintHelp()
sys.exit()
elif argv[i] in [ "-o", "--o"]:
(outfile, i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in [ "-cmpclass", "--cmpclass"]:
(tmpstr, i) = myfunc.my_getopt_str(argv, i)
cmpclassList.append(tmpstr)
elif argv[i] in [ "-signalp", "--signalp"]:
(signalpFile, i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in [ "-restrictidlist", "--restrictidlist"]:
(restrictIDListFile, i) = myfunc.my_getopt_str(argv, i)
g_params['isRestrictIDListSet'] = True
elif argv[i] in [ "-dup", "--dup", "-dupfile", "--dupfile"]:
(dupFile, i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in [ "-rmsp", "--rmsp"]:
g_params['isRemoveSignalP'] = True; i+=1
elif argv[i] in [ "-rmdup", "--rmdup"]:
g_params['isRemoveDup'] = True; i+=1
elif argv[i] in ["-seq2fammap", "--seq2fammap"]:
(seq2famMapfile, i) = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-seqidttype", "--seqidttype"]:
g_params['seqidttype'], i = myfunc.my_getopt_int(argv,i)
elif argv[i] in ["-tableinfo", "--tableinfo"]:
tableinfoFile, i = myfunc.my_getopt_str(argv, i)
elif argv[i] in ["-min-seqidt", "--min-seqidt"]:
g_params['minSeqIDT'], i = myfunc.my_getopt_float(argv, i)
elif argv[i] in ["-max-seqidt", "--max-seqidt"]:
g_params['maxSeqIDT'], i = myfunc.my_getopt_float(argv, i)
elif argv[i] in ["-evodist", "--evodist"]:
g_params['isEvodist'] = True
i += 1
elif argv[i] in ["-alignrange", "--alignrange"]:
g_params['alignrange'],i = myfunc.my_getopt_str(argv,i)
if not g_params['alignrange'] in ['all', 'full', 'part']:
print >> sys.stderr, "alignrange must be one of [all, full, part]"
return 1
else:
if g_params['alignrange'] == 'full':
g_params['alignrange'] = 'FULL_ALIGNED'
elif g_params['alignrange'] == 'part':
g_params['alignrange'] = 'PART_ALIGNED'
elif argv[i] in ["-debug", "--debug"]:
if argv[i+1][0].lower() == 'y':
g_params['isDEBUG'] = True
else:
g_params['isDEBUG'] = False
i += 2
elif argv[i] in ["-debug-unmapped-position", "--debug-unmapped-position"]:
DEBUG_UNMAPPED_TM_POSITION = 1
i += 2
elif sys.argv[i] == "-q":
isQuiet=True
i += 1
else:
print >> sys.stderr, "Error! Wrong argument:", sys.argv[i]
return -1
else:
infile = sys.argv[i]
i += 1
if infile == "":
print >> sys.stderr, "infile not set. Exit."
return -1
elif not os.path.exists(infile):
print >> sys.stderr, "infile %s does not exists. Exit."%infile
try:
fpin = open(infile, "rb")
except IOError:
print >> sys.stderr, "Failed to open input file %s"%(infile)
return -1
pairalnStat = {}
if g_params['seqidttype'] != 0:
if tableinfoFile == "" or not os.path.exists(tableinfoFile):
print >> sys.stderr, "tableinfoFile must be set when seqidttype is set to 1 or 2"
print >> sys.stderr, "but seqidttype = %d is set. Exit."%g_params['seqidttype']
return -1
pairalnStat = lcmp.ReadPairAlnTableInfo(tableinfoFile)
rootname = os.path.basename(os.path.splitext(infile)[0])
binpath = os.path.dirname(sys.argv[0])
signalpDict = {}
if signalpFile != "":
signalpDict = lcmp.ReadSignalPDict(signalpFile)
if signalpDict != {}:
g_params['isSignalPSet'] = True
dupPairList = []
if dupFile != "":
dupPairList = lcmp.ReadDupPairList(dupFile)
if len(dupPairList) > 0:
g_params['isDupSet'] = True
dupPairSet = set(dupPairList)
restrictIDSet = set([])
if restrictIDListFile != "":
restrictIDSet = set(myfunc.ReadIDList(restrictIDListFile))
rltyDict = {}
fpout = myfunc.myopen(outfile, sys.stdout, "w", False)
unprocessedBuffer=""
cntTotalReadInRecord = 0
cntTotalOutputRecord = 0
isEOFreached = False
while 1:
buff = fpin.read(BLOCK_SIZE)
if buff == "":
isEOFreached = True
buff = unprocessedBuffer + buff
pairCmpRecordList=[]
unprocessedBuffer = lcmp.ReadPairCmpResultFromBuffer(buff,pairCmpRecordList)
AddTableInfo(pairCmpRecordList, pairalnStat)
AddSignalPInfo(pairCmpRecordList, signalpDict)
AddDupInfo(pairCmpRecordList, dupPairSet)
cntTotalReadInRecord += len(pairCmpRecordList)
pairCmpRecordList = FilterPairCmpResult(pairCmpRecordList, cmpclassList, rltyDict, restrictIDSet)
if len(pairCmpRecordList) > 0:
lcmp.WritePairCmpRecord(pairCmpRecordList, cntTotalOutputRecord, fpout)
cntTotalOutputRecord += len(pairCmpRecordList)
if isEOFreached == True:
break
fpin.close()
print "cntTotalReadInRecord =", cntTotalReadInRecord
print "cntTotalOutputRecord =", cntTotalOutputRecord
myfunc.myclose(fpout)
return 0
#}}}
def InitGlobalParameter():#{{{
g_params = {}
g_params['isDEBUG'] = False
g_params['selecttype'] = 'all'
g_params['outpath'] = ""
g_params['minGapFraction'] = 0.0
g_params['maxGapFraction'] = 1.0
g_params['minRLTY'] = 0.0 # minimal reliability score
g_params['maxRLTY'] = 100.0 # maximal reliability score
g_params['seqidttype'] = 0
g_params['minDGvalue'] = -999999.0
g_params['maxDGvalue'] = 100
g_params['minSeqIDT'] = 0.0
g_params['maxSeqIDT'] = 100.0
g_params['isShowProgress'] = True
g_params['isPrintDIFFPair'] = False
g_params['DIFFPairList'] = []
g_params['isPrintCountPairInFam'] = False
g_params['countPairInFam'] = []
g_params['isPrintFileRltyCmpclass'] = False
g_params['isPrintFileRltyHelixCmpclass'] = False
g_params['isPrintNumTMHeatMap'] = True
g_params['isEvodist'] = False
g_params['pairwise_comparison_method'] = 0
g_params['thHigh_seqidt'] = 30.0
g_params['thHigh_evodist'] = 1.0
g_params['isFilterPredictedSeq'] = False
g_params['isRLTYSupplied'] = False
g_params['isSignalPSet'] = False
g_params['isRemoveSignalP'] = False
g_params['isRemoveDup'] = False
g_params['isDupSet'] = False
g_params['numTMHeatMapMode'] = "full"
g_params['isRestrictIDListSet'] = False
g_params['alignrange'] = 'all'
return g_params
#}}}
if __name__ == '__main__' :
g_params = InitGlobalParameter()
sys.exit(main(g_params))
|
from multiprocessing import Pool, TimeoutError
import time
import os
def f(x):
return os.getpid(), x*x
if __name__ == '__main__':
# start 4 worker processes
with Pool(processes=8) as pool:
# print "[0, 1, 4,..., 81]"
print(pool.map(f, range(100)))
print(pool.imap(f, range(10)))
|
import modeTest #导入模块 第一种导入方式
# # from modeTest import add #第二种
# # from modeTest import * #第三种
# re = modeTest.add(1,2) #模块中的测试代码也被执行了 如果想要测试的代码不执行就需要加上一个判断(见原模块中)
# print(re)
# print(modeTest.diff(3,4))
# #此时的 运行结果中就不包含测试中的代码
print(modeTest.printInfo()) #使用第一种方式引入模块则即使不在__all__函数中也可以执行
#使用此方式引入模块,若模块中有__all__函数,则引用的函数均来自于__all__函数中
# from modeTest import *
# print(add(1,2))
# print(diff(1,2))
# # print(printInfo()) #此时这个函数无法调用成功,因为在模块中__all__中未定义引用此方法
|
"""General monte carlo simulation helper."""
import os
from time import time
import multiprocessing
from collections import OrderedDict
from pathos.multiprocessing import ProcessPool
import pandas as pd
import seaborn as sns
import matplotlib.pyplot as plt
from tqdm import tqdm
from mpmath import mpf
global_fn_multi = None
global_var_multi = None
def multiprocessing_func(fn_to_eval, random_var_gen, i):
"""Allows monte carlo to run on multiple CPUs."""
random_vars = random_var_gen(i)
result = fn_to_eval(*random_vars)
return result
def monte_carlo(
fn_to_eval,
random_var_gen,
num_simulations,
num_cpus=1,
save_every=1,
save_name="monte_carlo_result.csv",
headers=None,
progress=True,
):
"""
Full monte carlo simulation loop.
Evaluate fn_to_eval over num_simulations iterations, with
*random_var_gen(i) passed into fn_to_eval at each iteration i.
"""
all_stats = []
global_fn_multi = fn_to_eval
global_var_multi = random_var_gen
save_every = int(save_every * num_simulations)
# temp = [random_var_gen() for _ in range(num_simulations)]
# random_vars = [[] for _ in temp[0]]
# for val in temp:
# for i, item in enumerate(val):
# random_vars[i].append(item)
pbar = tqdm(range(num_simulations), disable=not progress)
if num_cpus > 1:
# pool = multiprocessing.get_context("spawn").Pool(num_cpus)
pool = ProcessPool(nodes=num_cpus)
print(
"Launching {} workers for {} iterations".format(num_cpus, num_simulations)
)
pbar.set_description("Monte carlo loop")
for i in pbar:
result = pool.apipe(
multiprocessing_func, global_fn_multi, global_var_multi, i
)
# result = pool.amap(fn_to_eval, random_vars)
# result = pool.apply_async(
# multiprocessing_func,
# (i, global_fn_multi, global_var_multi))
all_stats.append(result.get())
else:
pbar.set_description("Monte carlo loop")
for i in pbar:
random_vars = random_var_gen(i)
result = fn_to_eval(*random_vars)
all_stats.append(result)
if (i != 0) and (i % save_every == 0):
parts = os.path.splitext(save_name)
out_name = parts[0] + "_" + str(i) + parts[1]
df = list_to_df(all_stats, headers)
here = os.path.dirname(os.path.realpath(__file__))
os.makedirs(os.path.join(here, "..", "mc"), exist_ok=True)
print("Saving results at {} iterations to {}".format(i, out_name))
df.to_csv(os.path.join(here, "..", "mc", out_name), index=False)
return all_stats
def list_to_df(in_list, headers=None):
"""Convert a list to a dataframe with the given headers."""
if headers is None:
headers = ["V{}".format(i) for i in range(len(in_list[0]))]
results_df = pd.DataFrame.from_records(in_list, columns=headers)
return results_df
def summarise_monte_carlo(
df,
txt_outfile=None,
plot=True,
to_plot=None,
plt_outfile=None,
do_print=False,
):
"""Summary stats of monte carlo with optional dist plot."""
result = df.describe().round(4)
if (txt_outfile is None) and do_print:
print(result)
elif txt_outfile is not None:
with open(txt_outfile, "w") as f:
f.write(result)
if plot:
if to_plot is None:
raise ValueError("Please provide a column to plot")
a = df[to_plot].to_numpy()
is_unique = (a[0] == a).all()
if not is_unique:
sns.displot(
df[to_plot],
kde=True,
rug=False,
# kde_kws={"color": "k", "lw": 3, "label": "KDE"},
# hist_kws={"histtype": "step", "linewidth": 3, "alpha": 1, "color": "g"},
)
if plt_outfile is None:
plt.show()
else:
plt.savefig(plt_outfile, dpi=400)
plt.close()
return result
def get_distribution(result_df, column_name, num_iters):
"""Calculate the simulated distribution of column_name."""
distrib = {}
to_add = 1 / num_iters
for val in result_df[column_name]:
if val in distrib:
distrib[val] += to_add
else:
distrib[val] = to_add
ordered_dist = OrderedDict()
keys = sorted(distrib.keys())
for key in keys:
ordered_dist[key] = distrib[key]
return ordered_dist
def dist_difference(actual_distribution, expected_distribution):
"""Calculate the difference between two distributions."""
difference = {}
for k, v in expected_distribution.items():
difference[k] = actual_distribution[k] - v
return difference
|
import ttg
import prettytable
class truthTable:
def __init__(self, AST):
self.AST = AST
self.proposition = []
self.propVar = []
self.operations = ["and", "or", "=>", "~"]
self.convertProposition()
print(self.proposition)
print(self.propVar)
def convertProposition(self):
treetoString = str(self.AST)
print(treetoString)
treetoString = treetoString.replace("CONJ", "and")
treetoString = treetoString.replace("DISJ", "or")
treetoString = treetoString.replace("IMPL", "=>")
treetoString = treetoString.replace("BIMPL", "=")
treetoString = treetoString.replace("NOT", "~")
self.proposition.append(treetoString)
li = treetoString.replace("(", "").replace(")", "").split()
for element in li:
if element not in self.operations and element not in self.propVar:
self.propVar.append(element)
def generateTruth(self):
# print(ttg.Truths(['p','q','r'],['(p or (~q)) => r'],ints=False))
# print(ttg.Truths(self.propVar, self.proposition, ints=False))
truth = ttg.Truths(self.propVar, self.proposition, ints=False)
ans = truth.as_tabulate(index=False, table_format='html')
return ans
def getResults(self):
NgC = ttg.Truths(self.propVar, self.proposition, ints=True)
res = NgC.as_pandas()[self.proposition].values.tolist()
res = [str(i[0]) for i in res]
res2 = "".join(res)
return res2
|
"""Copyright (c) 2018 Red Hat, Inc
All rights reserved.
This software may be modified and distributed under the terms
of the BSD license. See the LICENSE file for details.
After squashing our image, verify that it has the media types that
the registry expects
"""
from __future__ import unicode_literals
from atomic_reactor.constants import (PLUGIN_GROUP_MANIFESTS_KEY, PLUGIN_VERIFY_MEDIA_KEY,
MEDIA_TYPE_DOCKER_V1, MEDIA_TYPE_DOCKER_V2_SCHEMA1,
MEDIA_TYPE_DOCKER_V2_SCHEMA2,
MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST)
from atomic_reactor.plugin import ExitPlugin
from atomic_reactor.util import get_manifest_digests, get_platforms, RegistrySession
from atomic_reactor.plugins.pre_reactor_config import (get_registries,
get_platform_to_goarch_mapping)
from copy import deepcopy
from itertools import chain
def verify_v1_image(image, registry, log, insecure=False, dockercfg_path=None):
registry_session = RegistrySession(registry, insecure=insecure, dockercfg_path=dockercfg_path)
headers = {'Accept': MEDIA_TYPE_DOCKER_V1}
url = '/v1/repositories/{0}/tags/{1}'.format(image.get_repo(), image.tag)
log.debug("verify_v1_image: querying {0}, headers: {1}".format(url, headers))
response = registry_session.get(url, headers=headers)
for r in chain(response.history, [response]):
log.debug("verify_v1_image: [%s] %s", r.status_code, r.url)
log.debug("verify_v1_image: response headers: %s", response.headers)
response.raise_for_status()
# if we returned ok, then everything is fine.
return True
class VerifyMediaTypesPlugin(ExitPlugin):
key = PLUGIN_VERIFY_MEDIA_KEY
is_allowed_to_fail = False
def run(self):
# Only run if the build was successful
if self.workflow.build_process_failed:
self.log.info("Not running for failed build")
return []
# Work out the name of the image to pull
if not self.workflow.tag_conf.unique_images:
raise ValueError("no unique image set, impossible to verify media types")
if self.workflow.push_conf.pulp_registries:
self.log.info("pulp registry configure, verify_media_types should not run")
return
image = self.workflow.tag_conf.unique_images[0]
media_types = set()
registries = deepcopy(get_registries(self.workflow, {}))
for registry_name, registry in registries.items():
initial_media_types = registry.get('expected_media_types', [])
if not initial_media_types:
continue
expected_media_types = self.set_manifest_list_expectations(initial_media_types)
pullspec = image.copy()
pullspec.registry = registry_name
insecure = registry.get('insecure', False)
secret = registry.get('secret', None)
check_digests = (MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST in expected_media_types or
MEDIA_TYPE_DOCKER_V2_SCHEMA2 in expected_media_types or
MEDIA_TYPE_DOCKER_V2_SCHEMA1 in expected_media_types)
if check_digests:
digests = get_manifest_digests(pullspec, registry_name, insecure, secret,
require_digest=False)
if digests:
if digests.v2_list:
self.log.info("Manifest list found")
if MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST in expected_media_types:
media_types.add(MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST)
if digests.v2:
self.log.info("V2 schema 2 digest found")
if MEDIA_TYPE_DOCKER_V2_SCHEMA2 in expected_media_types:
media_types.add(MEDIA_TYPE_DOCKER_V2_SCHEMA2)
if digests.v1:
self.log.info("V2 schema 1 digest found")
if MEDIA_TYPE_DOCKER_V2_SCHEMA1 in expected_media_types:
media_types.add(MEDIA_TYPE_DOCKER_V2_SCHEMA1)
if MEDIA_TYPE_DOCKER_V1 in expected_media_types:
if verify_v1_image(pullspec, registry_name, self.log, insecure, secret):
media_types.add(MEDIA_TYPE_DOCKER_V1)
# sorting the media type here so the failure message is predictable for unit tests
missing_types = []
for media_type in sorted(expected_media_types):
if media_type not in media_types:
missing_types.append(media_type)
if missing_types:
raise KeyError("expected media types {0} ".format(missing_types) +
"not in available media types {0}".format(sorted(media_types)))
return sorted(media_types)
def set_manifest_list_expectations(self, expected_media_types):
if not self.workflow.postbuild_results.get(PLUGIN_GROUP_MANIFESTS_KEY):
self.log.debug('Cannot check if only manifest list digest should be returned '
'because group manifests plugin did not run')
return expected_media_types
platforms = get_platforms(self.workflow)
if not platforms:
self.log.debug('Cannot check if only manifest list digest should be returned '
'because we have no platforms list')
return expected_media_types
try:
platform_to_goarch = get_platform_to_goarch_mapping(self.workflow)
except KeyError:
self.log.debug('Cannot check if only manifest list digest should be returned '
'because there are no platform descriptors')
return expected_media_types
for plat in platforms:
if platform_to_goarch[plat] == 'amd64':
self.log.debug('amd64 was built, all media types available')
return expected_media_types
self.log.debug('amd64 was not built, only manifest list digest is available')
return [MEDIA_TYPE_DOCKER_V2_MANIFEST_LIST]
|
import calendar
year=2021
for month in range(1,13):
print(calendar.month_name[month])
|
import vgg16; reload(vgg16)
from vgg16 import VGG16
#from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
import numpy as np
from keras.utils.np_utils import to_categorical
from keras.models import Sequential, load_model
from keras.layers import Flatten,Dense, BatchNormalization, Activation
from keras.layers.convolutional import Convolution2D
from keras.optimizers import Adam
from keras.regularizers import l2
import bcolz
MODEL_VGG16 = 1
MODEL_VGG19 = 2
MODEL_RESNET = 3
MODEL_INCEPTION = 4
MODEL_TOP = 5
#vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32).reshape((3,1,1))
vgg_mean = np.array([123.68, 116.779, 103.939], dtype=np.float32)
def save_array(fname, arr):
c=bcolz.carray(arr, rootdir=fname, mode='w')
c.flush()
def load_array(fname):
return bcolz.open(fname)[:]
def vgg_preprocess(x,num):
print("vgg process start")
my_mean = np.tile(vgg_mean,(num,1))
my_mean = my_mean.reshape((num,3,1,1))
x = x - my_mean
print("vgg process end")
return x[:, ::-1] # reverse axis rgb->bgr
def concat_data(data_gen):
return np.concatenate([data_gen.next() for i in range(data_gen.nb_sample)])
#for i in range(data_gen.nb_sample):
# ndata = data_gen.next()
def oneHot(x):
return to_categorical(x)
class VisionModel:
def __init__(self,train_dir,val_dir,test_dir,saved_dir,
width_shift_range=0, width_zoom_range=0, zoom_range=0,
channel_shift_range=0, height_shift_range=0, shear_range=0,
horizontal_flip=False):
self.train_datagen = image.ImageDataGenerator()
#self.train_datagen = image.ImageDataGenerator(
# rescale=1./255,
# shear_range=0.2,
# zoom_range=0.2,
# horizontal_flip=True)
self.val_datagen = image.ImageDataGenerator()
self.test_datagen = image.ImageDataGenerator()
#self.val_datagen = image.ImageDataGenerator(rescale=1./255)
#self.test_datagen = image.ImageDataGenerator(rescale=1./255)
self.train_dir =train_dir
self.test_dir =test_dir
self.val_dir = val_dir
self.saved_dir = saved_dir
def createDataGen(self,height=224,width=224,batch=1,entropy=None):
print("createTrainGen train")
self.train_gen = self.train_datagen.flow_from_directory(
self.train_dir,
target_size=(height, width),
batch_size=batch,
class_mode=entropy,
shuffle=False)
self.num_classes = self.train_gen.nb_class
#self.train_imgs = concat_data(self.train_gen)
#save_array(self.saved_dir+'train_data.bc',self.train_imgs)
#self.train_imgs = load_array(self.saved_dir+'train_data.bc')
#self.train_imgs = vgg_preprocess(self.train_imgs,self.train_gen.nb_sample)
print("createTrainGen val")
self.val_gen = self.val_datagen.flow_from_directory(
self.val_dir,
target_size=(height, width),
batch_size=batch,
class_mode=entropy,
shuffle=False)
#self.val_imgs = concat_data(self.val_gen)
#self.val_imgs = load_array(self.saved_dir+'val_data.bc')
#save_array(self.saved_dir+'val_data.bc',self.val_imgs)
#self.val_imgs = vgg_preprocess(self.val_imgs,self.val_gen.nb_sample)
print("createTestGen")
self.test_gen = self.test_datagen.flow_from_directory(
self.test_dir,
target_size=(height, width),
batch_size=batch,
class_mode=entropy,
shuffle=False)
#
#self.test_imgs = concat_data(self.test_gen)
#self.test_imgs = load_array(self.saved_dir+'test_data.bc')
#save_array(self.saved_dir+'test_data.bc',self.test_imgs)
#print("done createTesGen saving ")
#self.test_imgs = vgg_preprocess(self.test_imgs,self.test_gen.nb_sample)
print("createTestGenFinito")
def getNumClasses(self):
return self.num_classes
def getTrainLabels(self):
return oneHot(self.train_gen.classes)
def getValLabels(self):
return oneHot(self.val_gen.classes)
def nukeGens(self):
self.val_gen = []
self.train_gen = []
self.test_gen = []
def getTestGen(self):
return self.test_gen
def getValGen(self):
return self.val_gen
def getTrainGen(self):
return self.train_gen
class VisionTrainedModel(VisionModel):
def __init__(self,train_dir,val_dir,test_dir,saved_dir,model_type,
type=MODEL_VGG16,rotation_range=0,
width_shift_range=0, width_zoom_range=0, zoom_range=0,
channel_shift_range=0, height_shift_range=0, shear_range=0,
horizontal_flip=False):
VisionModel.__init__(self,train_dir,val_dir,test_dir,saved_dir,
width_shift_range=0, width_zoom_range=0, zoom_range=0,
channel_shift_range=0, height_shift_range=0, shear_range=0,
horizontal_flip=False)
self.type = type
self.model_type = model_type
if(self.type == MODEL_VGG16):
if(self.model_type == "simple"):
self.model = VGG16(include_top=True,weights='imagenet')
else:
self.model = VGG16(include_top=False,weights='imagenet')
else:
print("ERROR: only VCG16 model is supported")
def predictGenerator(self):
self.train_X = self.model.predict_generator(self.train_gen, self.train_gen.nb_sample)
self.val_X = self.model.predict_generator(self.val_gen,self.val_gen.nb_sample)
# need to know test size
self.test_X = self.model.predict_generator(self.test_gen,self.test_gen.nb_sample)
def predict(self):
print("trainedModel Predict train")
self.train_X = self.model.predict(self.train_imgs,batch_size=8)
print("trainedModel Predict val")
self.val_X = self.model.predict(self.val_imgs,batch_size=8)
print("trainedModel Predict test")
self.test_X = self.model.predict(self.test_imgs,batch_size=8)
def saveFeatures(self):
if(self.train_X is not None ):
print("trainedModel save train features")
np.save(open(self.saved_dir + '/bottleneck_features_train' + self.model_type + '.npy', 'w'), self.train_X)
if(self.val_X is not None ):
print("trainedModel save val features")
np.save(open(self.saved_dir + '/bottleneck_features_val'+ self.model_type +'.npy' , 'w'), self.val_X)
if(self.test_X is not None ):
print("trainedModel save test features")
np.save(open(self.saved_dir + '/bottleneck_features_test' + self.model_type + '.npy', 'w'), self.test_X)
def loadFeatures(self):
self.train_X = np.load(open(self.saved_dir + '/bottleneck_features_train' + self.model_type + '.npy'))
self.val_X = np.load(open(self.saved_dir + '/bottleneck_features_val' + self.model_type + '.npy'))
self.test_X = np.load(open(self.saved_dir + '/bottleneck_features_test' + self.model_type + '.npy'))
def getFeatures(self):
return self.train_X, self.val_X, self.test_X
def getFeaturesShape(self):
if(self.train_X is not None):
return self.train_X.shape[1:]
else:
print("error model has not been trained ")
return None
class VisionTopModel:
def __init__(self,num_classes,model_type,input_shape,regularizer):
self.model = Sequential();
self.num_classes = num_classes
self.input_shape = input_shape
self.model_type = model_type
self.regularizer = regularizer
def fineTune(self):
if(self.model_type == "simple"):
self.model = Sequential([ Dense(self.num_classes, activation='softmax', input_shape=self.input_shape) ])
else:
self.flatten()
self.addFC(2,4096, batch_norm=True)
self.addClassificationLayer()
print("TopModel add dense ");
def flatten(self):
self.model.add(Flatten(input_shape=self.input_shape))
def addConv(self,num_filter,nb_row,nb_col):
# self.model.add(Convolution2D(filters, length, width, border='same',activation='relu'))
self.model.add(Convolution2D(num_filter, nb_row,nb_col, border_mode='same'))
self.model.add(BatchNormalization())
self.model.add(Activation('relu'))
def addFC(self,num_layers,num_neurons, batch_norm=True):
if(batch_norm == False):
print("addFC without batch norm ")
self.model.add(Dense(num_neurons,activation='relu', W_regularizer=l2(0.01)))
else:
print("addFC add batch norm ")
for i in range(num_layers):
if(self.regularizer == "none"):
self.model.add(Dense(num_neurons,activation='relu'))
elif(self.regularizer == "l2"):
self.model.add(Dense(num_neurons,activation='relu',W_regularizer=l2(0.01)))
else:
self.model.add(Dense(num_neurons,activation='relu',W_regularizer=l1(0.01)))
self.model.add(BatchNormalization())
def disableWeightUpdate(self):
for layer in self.model.layers:
layer.trainable=False
def addBatchNormalizationLayer(self):
self.model.add(BatchNormalization(axis=1))
def setLearningRate(self,rate):
self.model.optimizer.lr=rate
def fineTuneVCG16TopLayer(self,num_out):
#self.model.add(Dense(num_out,activation='softmax',input_shape=(1000,)))
self.model = Sequential([ Dense(2, activation='softmax', input_shape=(1000,)) ])
print("TopModel add dense ");
def addClassificationLayer(self):
if(self.regularizer == "none"):
self.model.add(Dense(self.num_classes,activation='softmax'))
elif(self.regularizer == "l2"):
self.model.add(Dense(self.num_classes,activation='softmax',W_regularizer=l2(0.01)))
else:
self.model.add(Dense(self.num_classes,activation='softmax',W_regularizer=l1(0.01)))
def compileModel(self, lr=0.01):
# to do add binary cross entropy
self.model.compile(optimizer=Adam(lr=lr), loss='categorical_crossentropy', metrics=['accuracy'])
def saveModel(self,mypath,filename):
self.model.save(mypath+ self.model_type + filename)
def loadModel(self,mypath,filename):
print("loading vision model")
self.model = load_model(mypath+self.model_type + filename)
def fit(self,train_input,train_labels,val_input,val_labels,batch=32,nbepoch=1):
self.model.fit(train_input, train_labels,nb_epoch=nbepoch, batch_size=batch,
validation_data=(val_input, val_labels))
def predict(self,test_input,batch=32):
return self.model.predict(test_input,batch_size=batch)
def fit_generator(self,train_input,train_labels,val_input,val_labels,batch_size=32,nb_epoch=1):
if(self.model is not None):
self.visionModel.fit_generator(
self.train_gen,
nb_train_samples,
nb_epoch,
self.val_gen,
nb_val_samples);
else:
print("Error: model not created\n");
def createTestGen(self,test_data_dir,height=224,width=224,batch=32,entropy=None):
self.test_gen = self.test_datagen.flow_from_directory(
test_data_dir,
target_size=(height, width),
batch_size=batch,
class_mode=entropy,
shuffle=False)
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render
from models import Post
from models import Family
from django.http import HttpResponse,Http404
from django.template.loader import get_template
from django.shortcuts import redirect
import sys
reload(sys)
sys.setdefaultencoding('utf8')
from datetime import datetime
# Create your views here.
def homepage(request):
template = get_template('index.html')
posts = Post.objects.all()
now = datetime.now()
html = template.render(locals())
return HttpResponse(html)
def showpost(request, slug):
template = get_template('post.html')
try:
post = Post.objects.get(slug=slug)
if post != None:
html = template.render(locals())
return HttpResponse(html)
except:
return redirect('/')
def showallfamily(request):
template=get_template('family_index.html')
family=Family.objects.all()
#now = datetime.now()
html=template.render(locals())
return HttpResponse(html)
def showfamily(request,nona):
template = get_template('family_post.html')
try:
family = Family.objects.get(nano_name=nona)
except:
raise Http404("没有这么一个人啦~~")
html = template.render(locals())
return HttpResponse(html) |
# coding=utf-8
import unittest
import logging
from sgcharts.nlp import ngram_gen, ngram_from_tokens_gen
logging.basicConfig(level=logging.INFO)
log = logging.getLogger(__name__)
class TestNgramGenerator(unittest.TestCase):
def test_bigram(self):
inp = "Apple is looking at buying U.K. startup for $1 billion"
e = [('Apple', 'is'), ('is', 'looking'), ('looking', 'at'), ('at', 'buying'), ('buying', 'U.K.'),
('U.K.', 'startup'), ('startup', 'for'), ('for', '$1'), ('$1', 'billion')]
a = list(ngram_from_tokens_gen(inp.split(), n=2))
log.debug('a={}'.format(repr(a)))
self.assertListEqual(a, e)
def test_trigram(self):
inp = "Apple is looking at buying U.K. startup for $1 billion"
e = [('Apple', 'is', 'looking'), ('is', 'looking', 'at'), ('looking', 'at', 'buying'),
('at', 'buying', 'U.K.'), ('buying', 'U.K.', 'startup'), ('U.K.', 'startup', 'for'),
('startup', 'for', '$1'), ('for', '$1', 'billion')]
a = list(ngram_gen(inp, n=3))
self.assertListEqual(a, e)
def test_fourgram(self):
inp = "Apple is looking at buying U.K. startup for $1 billion"
e = [('Apple', 'is', 'looking', 'at'),
('is', 'looking', 'at', 'buying'),
('looking', 'at', 'buying', 'U.K.'),
('at', 'buying', 'U.K.', 'startup'),
('buying', 'U.K.', 'startup', 'for'),
('U.K.', 'startup', 'for', '$1'),
('startup', 'for', '$1', 'billion')]
a = list(ngram_gen(inp, n=4))
self.assertListEqual(a, e)
def test_fivegram(self):
inp = "Apple is looking at buying U.K. startup for $1 billion"
e = [('Apple', 'is', 'looking', 'at', 'buying'),
('is', 'looking', 'at', 'buying', 'U.K.'),
('looking', 'at', 'buying', 'U.K.', 'startup'),
('at', 'buying', 'U.K.', 'startup', 'for'),
('buying', 'U.K.', 'startup', 'for', '$1'),
('U.K.', 'startup', 'for', '$1', 'billion')]
a = list(ngram_gen(inp, n=5))
self.assertListEqual(a, e)
def test_ngram_gen(self):
inp = "Apple is looking at buying U.K. startup for $1 billion"
e = [('Apple', 'is'), ('is', 'looking'), ('looking', 'at'), ('at', 'buying'), ('buying', 'U.K.'),
('U.K.', 'startup'), ('startup', 'for'), ('for', '$1'), ('$1', 'billion')]
a = list(ngram_gen(inp, n=2))
self.assertListEqual(a, e)
|
from PyCov19.beta_models import exp, tanh
def reproduction_number (beta_model, epd_model, **kwargs):
# must have the following
# model, beta_0, alpha, mu, tau, tl
for k in kwargs:
try:
kwargs[k] = float(kwargs[k])
except:
pass
beta = eval(beta_model) (kwargs['num_days'], **kwargs)
if epd_model == 'SIR':
R = beta/ kwargs['gamma']
if epd_model == 'SIRD':
R = beta/ (kwargs ['gamma'] + kwargs ['delta'])
return R
|
# coding: UTF-8
import sys
# python filenames
args = sys.argv
"""
ohtoさん提供の棋譜をpos形式に変換する
データの形式:
dealt { d3 c4 s4 h6 h9 ct hj hk d2 h2 jo } { c3 s6 h7 c8 s8 ht cj dq ca ha c2 } { d5 d7 s7 dt st sj sq ck sk da } { d4 h4 c5 h5 s5 c7 c9 d9 dj dk sa } { h3 s3 c6 d6 d8 h8 s9 cq hq s2 }
changed { h6 ct } { } { sk da } { c7 } { s2 }
original { d3 c4 s4 h9 hj hk sk da d2 h2 jo } { c3 s6 h7 c8 s8 ht cj dq ca ha c2 } { d5 h6 d7 s7 ct dt st sj sq ck } { d4 h4 c5 h5 s5 c9 d9 dj dk sa s2 } { h3 s3 c6 d6 c7 d8 h8 s9 cq hq }
originalがゲーム開始時の手札
play cs-4[44650] cd-9[31989] p[33022]...
と言った要領で手が列挙される[]の中の意味は謎
cs-4 ... クラブとスペードの4
d-ka2 ... ダイヤのka2の階段
cds-5(c) ... cdsの5を3枚出し。cはjokerを利用
s-678(8) ... スペードの678。8jokerを利用
"""
# 上記ルールに従って手のテキストを数値に変換する
def text2card(plytext) :
if plytext[0:2] == "jk":
return "o", "o", True # jk単体の扱い
headidx = plytext.find('-')
bodyidx = plytext.find('[')
passidx = plytext.find('p[')
jokeridx = plytext.find('(')
isjk = False
if jokeridx != -1:
isjk = True
bodyidx = jokeridx
if passidx != -1:
return "pass", "pass", isjk
if headidx == -1:
#print("error (assumed to be unreachable!!)")
#print(plytext)
return "pass","pass", isjk
if bodyidx == -1 :
bodyidx = len(plytext)
return plytext[0:headidx], plytext[headidx+1:bodyidx], isjk
def char2rank(char):
rank = 0
if char.isdigit():
rank = int(char) - 2
if rank == 0:
rank = 13
else:
if char == "t" :
rank = 8
if char == "j" :
rank = 9
if char == "q" :
rank = 10
if char == "k" :
rank = 11
if char == "a" :
rank = 12
if char == "o" :
rank = 14
return rank
def translate(head, body):
suit = 0
rank = 0
handtype = 0 # 0はpass
if head == "pass" :
return suit, rank, handtype
if len(body) == 1 :
# この手はペアである
rank = char2rank(body)
for i in range(len(head)):
if head[i] == 'c' :
suit += 1
handtype += 1
if head[i] == 'd' :
suit += 2
handtype += 1
if head[i] == 'h' :
suit += 4
handtype += 1
if head[i] == 's' :
suit += 8
handtype += 1
if head[i] == 'o' :
handtype = 1 # joker単体だしでしか出てこないはず
if head[i] == 'x' :
handtype += 1 # cdhsxによる5枚だし
else:
# この手は階段である
strrank = body[0]
rank = char2rank(strrank)
handtype = 3 + len(body)
for i in range(len(head)):
if head[i] == 'c' :
suit += 1
if head[i] == 'd' :
suit += 2
if head[i] == 'h' :
suit += 4
if head[i] == 's' :
suit += 8
#print([suit,rank,handtype])
return suit, rank, handtype
def isyagiri(rank,handtype):
if handtype < 6 :
if rank == 6:
return True
else:
return False
else:
if rank > 6:
return False
else :
if rank + handtype - 4 < 6:
return False
else:
return True
def hand2bit(char):
bitsuit = 0
if char == "jo" :
return 52
if char[0] == 'd' :
bitsuit += 1
if char[0] == 'h' :
bitsuit += 2
if char[0] == 's' :
bitsuit += 3
rank = char2rank(char[1])
return (rank - 1) * 4 + bitsuit
def hands2bin(hands):
# 手札の0/1を2進数に変換
output = 0
tmp = 16
for i in range(52):
if i in hands :
output += tmp
tmp = tmp * 2
jokerbit = tmp * 16
if 52 in hands:
output += jokerbit
return output
def geterases(erases, rank, suit, handtype, jk):
if handtype == 0:
return
if jk :
erases.append(52)
if handtype < 6 :
# pairの場合の削る手札
if suit & 1 != 0:
erases.append((rank - 1) * 4)
if suit & 2 != 0:
erases.append((rank - 1) * 4 + 1)
if suit & 4 != 0:
erases.append((rank - 1) * 4 + 2)
if suit & 8 != 0:
erases.append((rank - 1) * 4 + 3)
else :
# kaidanの場合の削る手札
if suit & 1 != 0:
bsuit = 0
if suit & 2 != 0:
bsuit = 1
if suit & 4 != 0:
bsuit = 2
if suit & 8 != 0:
bsuit = 3
for i in range(handtype-3):
erases.append((rank-1+i) * 4 + bsuit)
class board:
def __init__(self):
self.hand = [[],[],[],[],[]]
self.playedhand = [[],[],[],[],[]]
self.suitlock = 0
self.kakumei = 0
self.chair = [0,1,2,3,4]
self.turn = 0
self.prevsuit = 0
self.prevrank = 0
self.prevhandtype = 0
self.lastplay = 0
self.passed = [False,False,False,False,False]
def clearboard(self):
# 手札と縛りのクリア
for i in range(5):
self.hand[i].clear()
self.playedhand[i].clear()
self.passed[i] = False
self.suitlock = 0
self.prevsuit = 0
self.prevrank = 0
self.prevhandtype = 0
self.lastplay = 0
self.kakumei = 0
self.prevjk = False
def setturn(self):
# d3を持ってる人が初手
for i in range(5):
if 1 in self.hand[i]:
for j in range(5):
if self.chair[j] == i:
self.turn = j
return
print("error no d3 player")
def getNext(self):
self.turn += 1
self.turn = self.turn % 5
# 次のプレイヤーが上がってる場合
if len(self.hand[self.chair[self.turn]]) == 0:
self.getNext()
return
# 次のプレイヤーが既にパスをしている場合
if self.passed[self.turn] == True:
self.getNext()
return
def plycard(self,suit, rank, handtype, jk):
#print("plycard input")
#print([suit,rank,handtype,jk,self.chair[self.turn],self.prevrank,self.suitlock])
# 手を指させる。ぶっちゃけ一番面倒な要素だ
erases = []
if handtype == 0:
self.passed[self.turn] = True
isFlush = True
# 全員がパスした = 場が流れた
for i in range(5):
if self.passed[i] == False and len(self.hand[self.chair[i]]) != 0:
isFlush = False
break
if isFlush:
self.flush()
return True
isjks3 = False
if self.prevrank == 14 and rank == 1 :
# jk s3のコンボ
isjks3 = True
if handtype == 4 or handtype == 5 or handtype > 7 :
self.kakumei = not self.kakumei
self.lastplay = self.turn
if suit == self.prevsuit:
self.suitlock = suit
else:
self.suitlock = 0
self.prevsuit = suit
self.prevrank = rank
self.prevhandtype = handtype
self.prevjk = jk
geterases(erases, rank, suit, handtype, jk)
# erasesの手を削る
selfcheck = 0
#print("eraselist")
#print(erases)
for erase in erases:
if erase in self.hand[self.chair[self.turn]]:
self.hand[self.chair[self.turn]].remove(erase)
self.playedhand[self.chair[self.turn]].append(erase)
else:
selfcheck += 1
# 合法手判定。もっと良い書き方がありそう
# 注意:jokerがなくても作れる役をjoker込で作るのは非合法扱い
if (handtype == 5 and selfcheck == 0) or (selfcheck == 0 and (not jk or (jk and handtype == 1))) or (selfcheck == 1 and jk) :
pass
else:
print("something wrong in plycard")
print([suit,rank,handtype,jk,self.turn])
print(self.hand[self.chair[self.turn]])
return False
# 上がり判定
if len(self.hand[self.chair[self.turn]]) == 0:
#print("agari pl :" + str(self.chair[self.turn]))
# agariはpassを内包する。flush判定を入れなければならない
isFlush = True
# 全員がパスした = 場が流れた
for i in range(5):
if self.passed[i] == False and len(self.hand[self.chair[i]]) != 0:
isFlush = False
break
if isFlush:
self.flush()
return True
# self.flush()
# 手に8が含まれる場合、即座にflushする
# jkからのs3の場合、即座にflushする
if isyagiri(rank, handtype) or isjks3:
self.flush()
return True
def printposbin(self,suit, rank, handtype, jk):
# 各盤面を吐き出す。此方は手札の情報を圧縮したもの
outstr = ""
#print(hands2bin(self.hand[self.chair[self.turn]]))
#print(hands2bin(self.playedhand[self.chair[self.turn]]))
outstr += "hand " + str(hands2bin(self.hand[self.chair[self.turn]])) + "\n"
outstr += "myply " + str(hands2bin(self.playedhand[self.chair[self.turn]])) + "\n"
idx = 0
for i in range(5):
if i == self.turn :
continue
#print(hands2bin(self.playedhand[self.chair[i]]))
outstr += "eply " + str(idx) + " " + str(hands2bin(self.playedhand[self.chair[i]])) + "\n"
idx += 1
#print([self.kakumei,self.prevsuit, self.prevrank, self.prevhandtype, self.suitlock])
if self.kakumei :
outstr += "kakumei 1\n"
else:
outstr += "kakumei 0\n"
cboard = []
geterases(cboard, self.prevrank, self.prevsuit, self.prevhandtype, self.prevjk)
outstr += "cboard " + str(hands2bin(cboard)) + "\n"
outstr += "suitlock " + str(self.suitlock) + "\n"
outstr += "type " + str(self.prevhandtype) + "\n"
outstr += "rank " + str(self.prevrank) + "\n"
cboard = []
geterases(cboard, rank, suit, handtype, jk)
outstr += "movtype " + str(handtype) + "\n"
outstr += "movrank " + str(rank) + "\n"
if jk :
outstr += "movjk 1\n"
else:
outstr += "movjk 0\n"
outstr += "movc " + str(hands2bin(cboard)) + "\n"
outstr += "eop"
#print([suit, rank, handtype, jk])
return outstr
def printpos(self,suit, rank, handtype, jk):
# 各盤面を吐き出す。本スクリプトの存在意義とも言える
# debug
for i in range(5):
print(self.hand[i])
for i in range(5):
print(self.playedhand[i])
print(self.kakumei)
print([self.prevsuit, self.prevrank, self.prevhandtype, self.suitlock])
print([suit, rank, handtype, jk])
def flush(self):
# 場が流れることに相当。suitlockを解除し、次の手番のプレイヤーを決める
#print("flushed lastplayed : " + str(self.lastplay))
self.suitlock = 0
self.prevsuit = 0
self.prevrank = 0
self.prevhandtype = 0
self.prevjk = False
self.turn = (self.lastplay + 4) % 5
for i in range(5):
self.passed[i] = False
def original2hand(self,originaltext) :
# originalで与えられる手札を代入する
idx = 0
datas = originaltext.split(" ")
for data in datas :
if data == "{" or data == "original" :
pass
elif data == "}" :
idx += 1
if idx == 5:
break
else:
self.hand[idx].append(hand2bit(data))
# debug
#print(self.hand)
def setseat(self,seattext):
# seat 1 4 0 2 3 と言った形式で与えられる席順を代入
datas = seattext.split(" ")
for i in range(5):
self.chair[int(datas[i+1])] = i
# debug
#print(self.chair)
def domove(self,suit, rank, handtype, jk):
result = self.plycard(suit,rank,handtype,jk)
# ゲームが終了していたら即座に打ち切る
if self.endgamecheck():
return result
self.getNext()
return result
def endgamecheck(self):
endcnt = 0
for i in range(5):
if len(self.hand[i]) == 0:
endcnt += 1
if endcnt >=4 :
return True
return False
class glicine2pos:
def __init__(self):
self.bd = board()
self.gamecnt = 0
def load(self,fname,outfname):
f = open(fname)
lines = f.readlines()
f.close()
fw = open(outfname , 'w')
for line in lines:
if line[0:4] == "game":
#print("gamestart")
self.gamecnt += 1
self.bd.clearboard()
if line[0:4] == "seat":
self.bd.setseat(line[4:])
if line[0:4] == "orig":
self.bd.original2hand(line)
# 手札を確定されたら初手のプレイヤーが決まる
self.bd.setturn()
if line[0:4] == "play" and not (line[0:6] == "player" ) :
datas = (line[5:]).split(" ")
fw.write("gamestr " + line.replace(" ", ",") + "\n")
for data in datas:
head, body, jk = text2card(data)
suit, rank, handtype = translate(head,body)
outpos = self.bd.printposbin(suit,rank,handtype,jk)
if (handtype != 0 and rank == 0) or handtype > 9 :
print("illeagal hand in the game")
print(line)
print(data)
break
#print(data)
result = self.bd.domove(suit,rank,handtype,jk)
if result == False:
print(line)
print(data)
break
fw.write(outpos + "\n")
if self.bd.endgamecheck():
break
if self.gamecnt > 114514:
fw.close()
return
head, body, jk = text2card("d-345(5)[39960]")
suit, rank, handtype = translate(head,body)
#print([suit,rank,handtype])
gl = glicine2pos()
#gl.load("../../record_for_learn/Glicine_50000.dat", "../reclearn/Glicine_50000.txt")
#gl.load("../../record_for_learn/Glicine_50000_1.dat", "../reclearn/Glicine_50000_1.txt")
#gl.load("../../record_for_learn/Glicine_50000_2.dat", "../reclearn/Glicine_50000_2.txt")
gl.load("../../record_for_learn/Glicine_50000_13.dat", "../reclearn/Glicine_50000_13.txt")
#gl.load("../../record_for_learn/Glicine_50000_4.dat", "../reclearn/Glicine_50000_4.txt")
#gl.load("../../record_for_learn/Glicine_50000_7.dat", "../reclearn/Glicine_50000_7.txt")
#gl.load("../../record_for_learn/Glicine_50000_9.dat", "../reclearn/Glicine_50000_9.txt")
#gl.load("../../record_for_learn/Glicine_50000_11.dat", "../reclearn/Glicine_50000_11.txt")
#gl.load("/media/shiku/04E4E0C7E4E0BC54/record_for_learn/Glicine_50000.dat", "../reclearn/Glicine_50000.txt")
# playで与えられる手を代入する
#head, body, jk = text2card(plytext)
#suit, rank, handtype = translate(head,body)
|
from .InputEngine.InputEngineInterface import IInputEngine
from .OutputEngine.OutputEngineInterface import IOutputEngine
class IOEngine(object):
def __init__(self, inputEngine: IInputEngine, outputEngine: IOutputEngine):
assert isinstance(inputEngine, IInputEngine)
assert isinstance(outputEngine, IOutputEngine)
self.inputEngine = inputEngine
self.outputEngine = outputEngine
def output(self, outputString: str):
self.outputEngine.output(outputString)
def getInput(self, starter: str = None):
return self.inputEngine.getInput(starter)
|
import os
import random
import sys
import zipfile
from operator import itemgetter
import numpy
import numpy as np
import prettytable
from prettytable import PrettyTable
from nupic.frameworks.opf.model_factory import ModelFactory
import csv
import matplotlib.pyplot as plt
PAGE_CATEGORIES = [
'04f2', '00a0', '0370', '05a2', '0690', '0110', '04b0', '02b0', '05a0', '0165', '0081', '018f', '02a0', '0220', '0080', '01f1', '0120', '0153', '0260', '0545', '04f1', '0043', '059b', '0587', '05e4', '05f0', '04b1', '0329', '0382', '043f', '0510', '02c0', '051a', '0050', '00a1', '0018', '0034', '0044', '0440', '0042', '04f0', '0517', '0164', '0350', '0316'
]
# print(PAGE_CATEGORIES)
# Configure the sensor/input region using the "SDRCategoryEncoder" to encode
# the page category into SDRs suitable for processing directly by the TM
SENSOR_PARAMS = {
"verbosity": 0,
"encoders": {
"page": {
"fieldname": "page",
"name": "page",
"type": "SDRCategoryEncoder",
# The output of this encoder will be passed directly to the TM region,
# therefore the number of bits should match TM's "inputWidth" parameter
"n": 512,
# Use ~2% sparsity
"w": 12
},
},
}
# Configure the temporal memory to learn a sequence of page SDRs and make
# predictions on the next page of the sequence.
TM_PARAMS = {
"seed": 1960,
# Use "nupic.bindings.algorithms.TemporalMemoryCPP" algorithm
"temporalImp": "tm_cpp",
# Should match the encoder output
"inputWidth": 512,
"columnCount": 2048,
# Use 1 cell per column for first order prediction.
# Use more cells per column for variable order predictions.
"cellsPerColumn": 6,
}
# Configure the output region with a classifier used to decode TM SDRs back
# into pages
CL_PARAMS = {
"implementation": "cpp",
"regionName": "SDRClassifierRegion",
# alpha parameter controls how fast the classifier learns/forgets. Higher
# values make it adapt faster and forget older patterns faster.
"alpha": 0.1,
"steps": 1,
}
# Create a simple HTM network that will receive the current page as input, pass
# the encoded page SDR to the temporal memory to learn the sequences and
# interpret the output SDRs from the temporary memory using the SDRClassifier
# whose output will be a list of predicted next pages and their probabilities.
#
# page => [encoder] => [TM] => [classifier] => prediction
#
MODEL_PARAMS = {
"version": 1,
"model": "HTMPrediction",
"modelParams": {
# 'anomalyParams': { u'anomalyCacheRecords': None,
# u'autoDetectThreshold': None,
# u'autoDetectWaitRecords': None},
"inferenceType": "TemporalAnomaly",
"sensorParams": SENSOR_PARAMS,
# The purpose of the spatial pooler is to create a stable representation of
# the input SDRs. In our case the category encoder output is already a
# stable representation of the category therefore adding the spatial pooler
# to this network would not help and could potentially slow down the
# learning process
"spEnable": False,
"spParams": {},
"tmEnable": True,
"tmParams": TM_PARAMS,
"clParams": CL_PARAMS,
},
}
def main():
# Create HTM prediction model and enable inference on the page field
model = ModelFactory.create(MODEL_PARAMS)
model.enableInference({"predictedField": "page"})
# Use the model encoder to display the encoded SDRs the model will learn
sdr_table = PrettyTable(field_names=["Page Category",
"Encoded SDR (on bit indices)"],
sortby="Page Category")
sdr_table.align = "l"
encoder = model._getEncoder()
sdrout = np.zeros(encoder.getWidth(), dtype=np.bool)
for page in PAGE_CATEGORIES:
encoder.encodeIntoArray({"page": page}, sdrout)
sdr_table.add_row([page, sdrout.nonzero()[0]])
sdrlist = encoder.getsdrs()
numpy.save("idEn.npy", sdrlist)
if __name__ == "__main__":
random.seed(1)
np.random.seed(1)
main()
|
from django.db import models
from web.models.mixins import Archivable
class ProductLegislation(Archivable, models.Model):
name = models.CharField(max_length=500, verbose_name="Legislation Name")
is_active = models.BooleanField(default=True)
is_biocidal = models.BooleanField(
default=False,
verbose_name="Biocidal",
help_text=(
"Product type numbers and active ingredients must be entered"
" by the applicant when biocidal legislation is selected"
),
)
is_eu_cosmetics_regulation = models.BooleanField(
default=False,
verbose_name="Cosmetics Regulation",
help_text=(
"A 'responsible person' statement may be added to the issued certificate"
" schedule when the applicant selects EU Cosmetics Regulation legislation"
),
)
is_biocidal_claim = models.BooleanField(default=False, verbose_name="Biocidal Claim")
gb_legislation = models.BooleanField(default=True, verbose_name="GB Legislation")
ni_legislation = models.BooleanField(default=True, verbose_name="NI Legislation")
@property
def is_biocidal_yes_no(self):
return "Yes" if self.is_biocidal else "No"
@property
def is_biocidal_claim_yes_no(self):
return "Yes" if self.is_biocidal_claim else "No"
@property
def is_eu_cosmetics_regulation_yes_no(self):
return "Yes" if self.is_eu_cosmetics_regulation else "No"
@property
def is_gb_legislation(self):
return "Yes" if self.gb_legislation else "No"
@property
def is_ni_legislation(self):
return "Yes" if self.ni_legislation else "No"
def __str__(self):
if self.id:
return self.name
else:
return "Product Legislation (new)"
class Meta:
ordering = (
"-is_active",
"name",
)
|
'''
후위 표기 수식 계산
- 수식을 왼쪽부터 차례로 읽음
- 피연산자가 나타나면 스택에 push
- 연산자가 나타나면 스택에 들어있는 피연산자를 두개 pop후 연산을 적용,
그 결과를 다시 스택에 넣음
'''
class ArrayStack:
def __init__(self):
self.data = []
def size(self):
return len(self.data)
def isEmpty(self):
return self.size() == 0
def push(self, item):
self.data.append(item)
def pop(self):
return self.data.pop()
def peek(self):
return self.data[-1]
def splitTokens(exprStr):
tokens = []
val = 0
valProcessing = False
for c in exprStr:
if c == ' ':
continue
if c in '0123456789':
val = val * 10 + int(c)
valProcessing = True
else:
if valProcessing:
tokens.append(val)
val = 0
valProcessing == False
tokens.append(c)
if valProcessing:
tokens.append(val)
return tokens
def infixToPostfix(tokenList):
prec = {
'*':3,
'/':3,
'+':2,
'-':2,
'(':1
}
opStack = ArrayStack()
postfixList = []
for token in tokenList:
if type(token) is int:
postfixList.append(token)
elif token == '(':
opStack.push(token)
elif token == ')':
while opStack.peek != '(':
postfixList.append(opStack.pop(token))
opStack.pop()
else:
while not opStack.isEmpty():
if prec[token] > prec[opStack.peek()]:
break
postfixList.append(opStack.pop())
opStack.push(token)
while not opStack.isEmpty():
postfixList.append(opStack.pop())
return postfixList
def postfixEval(tokenList):
valStack = ArrayStack()
for token in tokenList:
if type(token) is int:
valStack.push(token)
elif token == '*':
num1 = valStack.pop()
num2 = valStack.pop()
valStack.push(num1*num2)
elif token == '/':
num1 = valStack.pop()
num2 = valStack.pop()
valStack.push(num2/num1)
elif token == '+':
num1 = valStack.pop()
num2 = valStack.pop()
valStack.push(num1+num2)
elif token == '-':
num1 = valStack.pop()
num2 = valStack.pop()
valStack.push(num2-num1)
return valStack.pop()
def solution(expr):
tokens = splitTokens(expr)
postfix = infixToPostfix(tokens)
val = postfixEval(postfix)
return val |
from torchvision import datasets
import torchvision.transforms as transforms
from torch.utils.data import DataLoader
from torch.utils.data import SubsetRandomSampler
import sys
# We split the total dataset to four equal parts. Half of the dataset is Dshadow, from which half is Dshadow_train
# used for training the shadow model, and the other half is Dshadow_out for evaluation. The second half of the total
# dataset is Dtarget with is used for attack evaluation. The target model is trained on the half of that Dtarget_train,
# which serve as the members of target's training data, and the other half is the Dtarget_out as the non-member data
# points.
def subsetloader(ls_indices, start, end, trainset, batch_size):
"""
Function that takes a list of indices and a certain split with start and end, creates a randomsampler and returns
a subset dataloader with this sampler
"""
ids = ls_indices[start:end]
sampler = SubsetRandomSampler(ids)
loader = DataLoader(trainset, batch_size=batch_size, sampler=sampler)
return loader
# The main dataloader used. Can return 4 differents dataloaders for each different split based on the paper's
# methodology and a testloader, for both CIFAR10 and MNIST.
def dataloader(dataset="cifar", batch_size_train=8, batch_size_test=1000, split_dataset="shadow_train"):
"""
Dataloader function that returns dataloader of a subset for train and test data of CIFAR10 or MNIST.
"""
try:
if dataset == "cifar":
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.247, 0.243, 0.261))])
trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)
testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)
testloader = DataLoader(testset, batch_size=batch_size_test, shuffle=False)
elif dataset == "mnist":
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))])
trainset = datasets.MNIST(root='./data', train=True, download=True, transform=transform)
testset = datasets.MNIST(root="./data", train=False, download=True, transform=transform)
testloader = DataLoader(testset, batch_size=batch_size_test, shuffle=False)
else:
raise NotAcceptedDataset
except NotAcceptedDataset:
print('Dataset Error. Choose "cifar" or "mnist"')
sys.exit()
total_size = len(trainset)
split1 = total_size // 4
split2 = split1 * 2
split3 = split1 * 3
indices = [*range(total_size)]
if split_dataset == "shadow_train":
return subsetloader(indices, 0, split1, trainset, batch_size_train)
elif split_dataset == "shadow_out":
return subsetloader(indices, split1, split2, trainset, batch_size_train)
elif split_dataset == "target_train":
return subsetloader(indices, split2, split3, trainset, batch_size_train)
elif split_dataset == "target_out":
return subsetloader(indices, split3, total_size, trainset, batch_size_train)
else:
return testloader
# Just a simple custom exception that is raised when the dataset argument is not accepted
class NotAcceptedDataset(Exception):
"""Not accepted dataset as argument"""
pass
|
h = input('請輸入身高(cm):')
w= input('請輸入體重(kg):')
h = float(h)
w = float(w)
h = h / 100 #換算成m
bmi = w / h / h
print(bmi)
if bmi < 18.5:
print('你的bmi值為', bmi, '體重過輕')
elif bmi >= 18.5 and bmi < 24:
print('你的bmi值為', bmi, '正常範圍')
elif bmi >= 24 and bmi < 27:
print('你的bmi值為', bmi, '過重')
elif bmi >= 27 and bmi < 30:
print('你的bmi值為', bmi, '輕度肥胖')
elif bmi >= 30 and bmi < 35:
print('你的bmi值為', bmi, '中度肥胖')
else:
print('你的bmi值為', bmi, '重度肥胖')
|
# Improting Image class from PIL module
from PIL import Image
# Opens a image in RGB mode
im = Image.open(r"test_image.jpg")
# Size of the image in pixels (size of orginal image)
# (This is not mandatory)
width, height = im.size
print(width, height)
newsize = (1500, 800)
im1 = im.resize(newsize)
# Shows the image in image viewer
im1.show()
im1.save('test_resize.jpeg', 'JPEG') |
#
# @lc app=leetcode.cn id=917 lang=python3
#
# [917] 仅仅反转字母
#
# @lc code=start
class Solution:
def reverseOnlyLetters(self, S: str) -> str:
# "ab-cd"
'''
# 1.字母栈:遍历S,字母放入栈;再次遍历,是字母就弹出,不是就加入本身符号 O(n) O(n)
leters = [c for c in S if c.isalpha()]
# print(leters) # ['a', 'b', 'c', 'd']
res = []
for i in S:
if i.isalpha():
res.append(leters.pop())
else:
res.append(i)
return "".join(res)
'''
# 2.反转指针 维护一个指针j从后往前遍历字符串,当需要字母时就使用它。[是字母就加入交换的,不是字母就直接加入]
res = []
j = len(S) - 1
for ch in S:
if ch.isalpha():
while not S[j].isalpha():
j -= 1
res.append(S[j])
j -= 1
else:
res.append(ch)
return "".join(res)
# @lc code=end
|
import os
import numpy as np
from itertools import product
import sys
sys.path.append('../')
from utils import partitions, weak_partitions
import pandas as pd
PYRAMINX_GROUP_SIZE = 11520 * 4
def alpha_parts():
irreps = []
for alpha in weak_partitions(6, 2):
for parts in product(partitions(alpha[0]), partitions(alpha[1])):
irreps.append((alpha, parts))
return irreps
def pyraminx_dists(fname):
dist_dict = {}
with open(fname, 'r') as f:
for line in f.readlines():
opart, ppart, dist = line.strip().split(',')
otup = tuple(int(x) for x in opart)
perm = tuple(int(x) for x in ppart)
dist = int(dist)
dist_dict[(otup, perm)] = dist
return dist_dict
def dist_df(fname):
df = pd.read_csv(fname, header=None, dtype={0: str, 1: str, 2: int})
df[0] = df[0].map(lambda x: tuple(int(i) for i in x))
df[1] = df[1].map(lambda x: tuple(int(i) for i in x))
return df
def load_rep_mat(alpha, parts, prefix='/local/hopan/pyraminx/irreps_mat/'):
fname = os.path.join(prefix, str(alpha), str(parts) + '.npy')
return np.load(fname)
def load_rep_mat_sample(alpha, parts, nsample, prefix='/local/hopan/pyraminx/irreps_mat/'):
reps = load_rep_mat(alpha, parts, prefix)
idx = np.random.randint(len(reps), size=nsample)
return reps[idx, :, :]
def load_mat_ift(alpha, parts, prefix='/local/hopan/pyraminx/fourier_eval/'):
fname = os.path.join(prefix, str(alpha), str(parts) + '.npy')
return np.load(fname)
def load_ft_sample(alpha, parts, nsample, prefix='/local/hopan/pyraminx/fourier_sample/'):
fname = os.path.join(prefix, str(nsample), str(alpha), str(parts) + '.npy')
return np.load(fname)
def load_mat_ift_sample(alpha, parts, nsample, prefix='/local/hopan/pyraminx/fourier_eval_sample'):
fname = os.path.join(prefix, str(nsample), str(alpha), str(parts) + '.npy')
return np.load(fname)
|
'''
풀이
R,B,V 가 있다.
V 는 좌우에 있는게 같으면 안된다.
R,B,V는 같은게 연속으로 올 수 없다.
'''
N=int(input())
S=input()
top = S[0]
cnt=1
max_cnt=1
for i in S[1:]:
if top == i or i == "V" or top =="V":
max_cnt=max(cnt,max_cnt)
cnt=1
top=i
else:
cnt+=1
top=i
max_cnt=max(cnt,max_cnt)
print(max_cnt)
|
import gc, argparse, sys, os, errno
import numpy as np
import pandas as pd
import seaborn as sns
#sns.set()
#sns.set_style('whitegrid')
import h5py
from PIL import Image
import os
from tqdm import tqdm as tqdm
import scipy
import sklearn
from scipy.stats import pearsonr
import warnings
warnings.filterwarnings('ignore')
import time
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--text', type=str,
help='text type')
args = parser.parse_args()
text = args.text#'majornews'
import tushare as ts
#ts.set_token('') #one time only
ts.set_token('3e7caaeffcf8b35c3419538148106d35990847037d50d07f231d3518')
pro = ts.pro_api()
# 确定起止日期
start_date = '2016-01-01 00:00:00'
end_date = '2019-12-20 23:59:59'
# 做一个用于tushare调用的list
from dateutil import rrule
from datetime import datetime
all_dates_list = []
for dt in rrule.rrule(rrule.DAILY,
dtstart=datetime.strptime(start_date.split(' ')[0], '%Y-%m-%d'),
until=datetime.strptime(end_date.split(' ')[0], '%Y-%m-%d')):
all_dates_list.append(dt.strftime('%Y%m%d'))
if text == 'majornews':
for i in tqdm(all_dates_list):
if not os.path.exists('data/news/majornews/'+i+'.csv'):
try:
df = pro.major_news(src='', start_date=i+' 00:00:00', \
end_date=i+' 23:59:59', fields='title,content')
df.to_csv('data/news/majornews/'+i+'.csv')
time.sleep(60) #每分钟最多访问该接口2次
except:
print ('bad')
if text == 'news':
newssrc = ['sina','wallstreetcn','10jqka','eastmoney','yuncaijing']
for i in tqdm(range(len(all_dates_list))):
for src in newssrc:
if not os.path.exists('data/news/news/'+all_dates_list[i]+src+'.csv'):
try:
df = pro.news(src=src, start_date=all_dates_list[i], \
end_date=all_dates_list[i+1])
df.to_csv('data/news/news/'+all_dates_list[i]+src+'.csv')
time.sleep(7) #每分钟最多访问该接口10次
except:
print ('bad')
if text == 'twitternews':
for i in tqdm(all_dates_list):
if not os.path.exists('data/news/twitternews/'+i+'.csv'):
try:
df = pro.exchange_twitter(start_date=i+' 00:00:00', end_date=i+' 23:59:59', \
fields="id,account,nickname,content,retweet_content,media,str_posted_at,create_at")
df.to_csv('data/news/twitternews/'+i+'.csv')
time.sleep(1) #每分钟最多访问该接口2次
except:
print ('bad')
if text == 'allcompany':
allcompany = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
startdate = all_dates_list[0]
enddate=all_dates_list[-1]
for i in tqdm(range(len(allcompany))):
if not os.path.exists('data/news/company/'+allcompany['ts_code'][i]+'.csv'):
try:
singlestockalldf = pro.anns(ts_code=allcompany['ts_code'][i], start_date=startdate, end_date=enddate)
if singlestockalldf.shape[0] ==50:
#print ('new')
startdate = np.array(df.ann_date)[-1] #表格最后一天
df = pro.anns(ts_code=allcompany['ts_code'][i], start_date=startdate, end_date=enddate)
singlestockalldf = pd.concat((singlestockalldf,df),axis=0)
singlestockalldf.to_csv('data/news/company/'+allcompany['ts_code'][i]+'.csv')
time.sleep(0) #每分钟最多访问该接口10次
except:
print ('bad') |
"""
Monte Carlo Tic-Tac-Toe Player
"""
import random
import poc_ttt_gui
import poc_ttt_provided as provided
# Constants for Monte Carlo simulator
# You may change the values of these constants as desired, but
# do not change their names.
NTRIALS = 100 # Number of trials to run
SCORE_CURRENT = 1.0 # Score for squares played by the current player
SCORE_OTHER = 1.0 # Score for squares played by the other player
# Add your functions here.
def mc_trial(board, player):
'''the function make trial in the board untill someone win'''
statues = board.check_win()
if statues == None:
empty = board.get_empty_squares()
random_index = random.randrange(0 , len(empty))
board.move(empty[random_index][0] , empty[random_index][1] , player)
# Recursion
player = provided.switch_player(player)
return mc_trial(board , player)
else:
return statues
def mc_update_scores(scores, board, player):
'''the function updata the scores according the board and the curplayer'''
dim = board.get_dim()
table = {provided.EMPTY:0}
winner = board.check_win()
if winner == player:
table[player] = SCORE_CURRENT
table[provided.switch_player(player)] = -SCORE_OTHER
elif winner == provided.switch_player(player):
table[player] = -SCORE_CURRENT
table[provided.switch_player(player)] = SCORE_OTHER
else:
table[player] = 0
table[provided.switch_player(player)] = 0
for row in range(dim):
for col in range(dim):
name = board.square(row,col)
scores[row][col] += table[name]
def get_best_move(board, scores):
'''the function take the board and get the best move according the scores'''
max_score = -float('inf')
max_index = []
for index in board.get_empty_squares():
if scores[index[0]][index[1]] > max_score:
max_index = [index]
max_score = scores[index[0]][index[1]]
elif scores[index[0]][index[1]] == max_score:
max_index.append(index)
random_index = random.randrange(len(max_index))
return max_index[random_index]
def mc_move(board, curplayer, ntrials):
'''the function take board and make a Monte Carlo simulation by the ntrials,
then get the best move'''
dim = board.get_dim()
scores = [[0 for _ in range(dim)]
for _ in range(dim)]
for _ in range(ntrials):
test_board = board.clone()
mc_trial(test_board , curplayer)
mc_update_scores(scores, test_board, curplayer)
return get_best_move(board , scores)
# Test game with the console or the GUI. Uncomment whichever
# you prefer. Both should be commented out when you submit
# for testing to save time.
#provided.play_game(mc_move, NTRIALS, False)
poc_ttt_gui.run_gui(4, provided.PLAYERX, mc_move, NTRIALS, False)
|
import csv
import json
import os
import re
import sys
import time
import operator
#define and create folder for output
folderoutput = "_output"
if not os.path.exists(folderoutput):
os.mkdir(folderoutput)
#create JSONs files if not exist
dictallfiles = {"0":"extracted_ks","1":"extracted_bl","2":"extracted_top_pages"}
wannacancel = input("Sovrascrivere se ci sono dei file? y/n ")
for dictfiles in dictallfiles:
if not os.path.exists(folderoutput + "/" + dictallfiles[dictfiles] + ".csv"):
with open(folderoutput + "/" + dictallfiles[dictfiles] + ".csv","w") as newfile:
newfile.closed
elif os.path.exists(folderoutput + "/" + dictallfiles[dictfiles] + ".csv"):
if(wannacancel == "y"):
os.remove(folderoutput + "/" + dictallfiles[dictfiles] + ".csv")
print(dictallfiles[dictfiles] + ".csv - Rimosso")
elif(wannacancel =="n"):
print("ok... fermo il processo")
sys.exit()
else:
print('ops... tasto sbagliato, usa "y" o "n"')
sys.exit()
break
print("Caricamento...")
time.sleep(3)
#counters
folders = 0
global kscounter,blcounter,tgcounter
kscounter,blcounter,tgcounter = 0,0,0
#global dictionaries
ksdict = {}
bldict = {}
tgdict = {}
#CODE START
#--------------------------------------|#--------------------------------------|
#FUNCTION 01: Check if file name has this string then call appropriate function
#--------------------------------------|#--------------------------------------|
#create dictionary in output then for each file in folder,
#check name and start specific routines
def combinestart(foldername, folderpath):
for file in os.scandir(folderpath):
if(re.search("ubersuggest.",str(file.name))):
extract_ks(file)
elif(re.search("backlink.",str(file.name))):
extract_bl(file)
elif(re.search("top_pages.",str(file.name))):
extract_top_pages(file)
#--------------------------------------|#--------------------------------------|
#FUNCTION 02: extract data from CSV into JSON
#--------------------------------------|#--------------------------------------|
#Extract ks from file then put in json
def extract_ks(file):
global kscounter
#open CSV reader by DictReader
with open(file.path,encoding='utf-8-sig') as csvf:
csvReader = csv.DictReader(csvf)
#Convert into Dict and add it to data
for rows in csvReader:
#assuming column "No" exists as primary key
key = rows["Keywords"]
ksdict[key] = {
"Volume" : int(rows["Volume"]),
"SEO Difficulty" : int(rows["SEO Difficulty"])
}
#counter
kscounter += 1
#open JSON writer the json.dumps()
if(kscounter==folders):
with open(folderoutput + "/" + dictallfiles["0"] + ".json","a", encoding="utf-8") as jsonf:
jsonf.write(json.dumps(ksdict,indent=4))
#Extract bl from file then put in json
def extract_bl(file):
global blcounter
#open CSV reader by DictReader
with open(file.path,encoding='utf-8-sig') as csvf:
csvReader = csv.DictReader(csvf)
#Convert into Dict and add it to data
for rows in csvReader:
#assuming column "No" exists as primary key
key = rows["Source URL"]
bldict[key] = {
"Source Page Title" : rows["Source Page Title"],
"Domain Auth" : int(rows["Domain Authority"]),
"Page Auth" : int(rows["Page Authority"]),
"Target URL" : rows["Target URL"],
"Anchor Text" : rows["Anchor Text"]
}
blcounter += 1
#open JSON writer the json.dumps()
if(blcounter == folders):
with open(folderoutput + "/" + dictallfiles["1"] + ".json","a", encoding="utf-8") as jsonf:
jsonf.write(json.dumps(bldict,indent=4))
#Extract top pages from file then put in json
def extract_top_pages(file):
global tgcounter
#open CSV reader by DictReader
with open(file.path,encoding='utf-8-sig') as csvf:
csvReader = csv.DictReader(csvf)
#Convert into Dict and add it to data
for rows in csvReader:
#assuming column "No" exists as primary key
key = rows["URL"]
tgdict[key] = {
"Title" : rows["Title"],
"Est. Visits" : int(rows["Est. Visits"]),
"Backlinks" : int(rows["Backlinks"]),
"Facebook Shares" : int(rows["Facebook Shares"]),
"Pinterest Shares" : int(rows["Pinterest Shares"])
}
tgcounter += 1
#open JSON writer the json.dumps()
if(tgcounter == folders):
with open(folderoutput + "/" + dictallfiles["2"] + ".json","a", encoding="utf-8") as jsonf:
jsonf.write(json.dumps(tgdict,indent=4))
#--------------------------------------|#--------------------------------------|
#FUNCTION 03: sort all CSV extracted by choosen column
#--------------------------------------|#--------------------------------------|
def sorterone(filename,whichlinesort):
with open(filename,"r",encoding="utf-8", newline='') as apri_csv:
#definisci il nome del file temp giusto per comodita'
tempname = folderoutput + "/temp.csv"
if os.path.exists(tempname):
os.remove(tempname)
#copia contenuto csv originale
temp_csv = csv.reader(apri_csv)
#loop
counter = 0
with open(tempname,"w",encoding="utf-8", newline='') as f:
temp_csv_for_sort = csv.writer(f)
for row in temp_csv:
if (counter == 0):
counter += 1
headers = row
else:
temp_csv_for_sort.writerow(row)
with open(tempname,"r",encoding="utf-8", newline='') as f:
temp_csv_for_sort = csv.reader(f)
temp_sorter = sorted(temp_csv_for_sort, key=lambda x:int(x[whichlinesort]), reverse=True)
with open(filename,"w",encoding="utf-8", newline='') as f:
csv_temp_writer = csv.writer(f)
csv_temp_writer.writerow(headers)
for line in temp_sorter:
csv_temp_writer.writerow(line)
os.remove(tempname)
#--------------------------------------|#--------------------------------------|
#CODE 01: look for files in folders then call function to check by file name
#--------------------------------------|#--------------------------------------|
#loop for each folder
for entry in os.scandir():
if(entry.name == folderoutput or entry.name.endswith(".py")):
continue
else:
folders +=1
for entry in os.scandir():
if(entry.name == folderoutput):
print("Found Folder = " + entry.name)
elif(entry.name.endswith(".py")):
print("Found Routine - ignore file = " + entry.name)
else:
combinestart(entry.name,entry.path)
#--------------------------------------|#--------------------------------------|
#CODE 02: convert JSON into CSV
#--------------------------------------|#--------------------------------------|
for file in os.scandir(folderoutput):
#usa solo i file JSON nella cartella
if(file.name.endswith(".json")):
#Procedi JSON to CSV per KS
#suddividi e crea csv annessi
if(file.name == dictallfiles["0"] + ".json"):
with open(file.path) as jsonf:
tempjson = json.load(jsonf)
with open(folderoutput + "/" + dictallfiles["0"] + ".csv","w",encoding="utf-8", newline='') as apri_csv:
nomecampi = ['Keyword', 'Volume' , 'SEO Difficulty']
oggetto_csv = csv.DictWriter(apri_csv, fieldnames=nomecampi)
oggetto_csv.writeheader()
for key in tempjson.keys():
oggetto_csv.writerow({'Keyword' : key,
'Volume' : tempjson[key]["Volume"],
'SEO Difficulty' : tempjson[key]["SEO Difficulty"]})
#rimuovi il JSON non serve piu'
os.remove(file)
#riapri il file per rimetterlo in ordine
sorterone(str(folderoutput+"/"+str(file.name).replace(".json","")+".csv"),1)
#Procedi JSON to CSV per BL
#suddividi e crea csv annessi
elif(file.name == dictallfiles["1"] + ".json"):
with open(file.path) as jsonf:
tempjson = json.load(jsonf)
with open(folderoutput + "/" + dictallfiles["1"] + ".csv","w",encoding="utf-8", newline='') as apri_csv:
nomecampi = ['Source Link','Source Page','Target URL','Anchor Text' , 'Domain Auth', 'Page Auth']
oggetto_csv = csv.DictWriter(apri_csv, fieldnames=nomecampi)
oggetto_csv.writeheader()
for key in tempjson.keys():
oggetto_csv.writerow({'Source Link' : key,
'Source Page' : tempjson[key]["Source Page Title"],
'Target URL' : tempjson[key]["Target URL"],
'Anchor Text' : tempjson[key]["Anchor Text"],
'Domain Auth' : tempjson[key]["Domain Auth"],
'Page Auth' : tempjson[key]["Page Auth"]})
#rimuovi il JSON non serve piu'
os.remove(file)
#riapri il file per rimetterlo in ordine
sorterone(str(folderoutput+"/"+str(file.name).replace(".json","")+".csv"),4)
#Procedi JSON to CSV per TG
#suddividi e crea csv annessi
elif(file.name == dictallfiles["2"] + ".json"):
with open(file.path) as jsonf:
tempjson = json.load(jsonf)
with open(folderoutput + "/" + dictallfiles["2"] + ".csv","w",encoding="utf-8", newline='') as apri_csv:
nomecampi = ['Pagina', 'Titolo' , 'Est. Visite', 'Backlinks','Facebook Shares', 'Pinterest Shares']
oggetto_csv = csv.DictWriter(apri_csv, fieldnames=nomecampi)
oggetto_csv.writeheader()
for key in tempjson.keys():
oggetto_csv.writerow({'Pagina' : key,
'Titolo' : tempjson[key]["Title"],
'Est. Visite' : int(tempjson[key]["Est. Visits"]),
'Backlinks' : tempjson[key]["Backlinks"],
'Facebook Shares' : tempjson[key]["Facebook Shares"],
'Pinterest Shares' : tempjson[key]["Pinterest Shares"]})
#chiudi il file
apri_csv.closed
#rimuovi il JSON non serve piu'
os.remove(file)
#riapri il file per rimetterlo in ordine
sorterone(str(folderoutput+"/"+str(file.name).replace(".json","")+".csv"),2)
else:
print("Il file non deve essere analizzato")
print("Processo completato")
|
import os
import struct
BUFFER_SIZE = 8388608
#reads buffer sized data from the unsorted file and generates a list from it
def create_list_of_nums(bin_file):
list_of_nums = []
buf = bin_file.read(BUFFER_SIZE)
if not buf:
return "done",list_of_nums
fmt = '%si' % (len(buf) // 4)
list_of_nums = list(struct.unpack(fmt, buf))
return "not done", list_of_nums
#creates a tuples list form a regular list
def list_to_couples_of_tuples(l):
list_of_couples = []
for i in range(len(l)):
if i%2==0:
tmp_tuple=(l[i], l[i + 1])
list_of_couples.append(tmp_tuple)
return list_of_couples
#this will write buffer_sized data to small files in sorted way
def write_to_file_in_bin(dir,file_name,list,to_sort):
tuples_list = list_to_couples_of_tuples(list)
if to_sort:
tuples_list.sort()
tuples_list = [x for item in tuples_list for x in item]
bin_file = open(dir+file_name + '.bin', 'ab')
buf = struct.pack('%si' % len(tuples_list), *tuples_list)
bin_file.write(buf)
bin_file.close()
#gets list of tuples from file
def get_tupples_from_file(file):
list_of_tupples =[]
status, list_data = create_list_of_nums(file)
if status == "done":
print("error")
else:
list_of_tupples = list_to_couples_of_tuples(list_data)
return list_of_tupples
#merge two sorted tuples lists into one sorted list of tuples
def merge(res_file,file_left,file_right,num_of_part):
pos_left = 0
pos_right = 0
tupples_res=[]
tupples_left = get_tupples_from_file(file_left)
tupples_right= get_tupples_from_file(file_right)
left_part_read = 1
right_part_read = 1
size_left = len(tupples_left)
size_right = len(tupples_right)
while left_part_read <= num_of_part and right_part_read <= num_of_part:
if len(tupples_res) > BUFFER_SIZE*2: # write to res file only if buffer size is reached
tupples_res = [x for item in tupples_res for x in item]
buf = struct.pack('%si' % len(tupples_res), *tupples_res)
res_file.write(buf)
tupples_res = []
if pos_left == size_left and pos_right == size_right and left_part_read == num_of_part and right_part_read == num_of_part:
break
if pos_left == size_left:
if left_part_read == num_of_part:
tupples_res.append(tupples_right[pos_right])
pos_right += 1
else:
tupples_left = get_tupples_from_file(file_left)
pos_left = 0
left_part_read+=1
elif pos_right == size_right:
if right_part_read == num_of_part:
tupples_res.append(tupples_left[pos_left])
pos_left += 1
else:
tupples_right = get_tupples_from_file(file_right)
pos_right = 0
right_part_read+=1
else:
if tupples_left[pos_left] < tupples_right[pos_right]:
tupples_res.append(tupples_left[pos_left])
pos_left+=1
else:
tupples_res.append(tupples_right[pos_right])
pos_right+=1
return tupples_res
# breaks the unsorted tuples file into buffer sized sorted files
def create_buffer_sized_files(dir,bin_file):
part_file_name = 0
while True:
status, num_list = create_list_of_nums(bin_file)
if status == "done":
break
write_to_file_in_bin(dir,str(part_file_name), num_list,True)
part_file_name += 1
return part_file_name
#main function that generates the sorted tuples file
def generate_sorted_tuples(dir,num_of_files):
num_of_parts = 1
next_file_name = 0
while num_of_files != 1:
for i in range(num_of_files):
if i % 2 == 0:
left_file = open(dir+str(i) + ".bin", "rb")
right_file = open(dir+str(i + 1) + ".bin", "rb")
res_file = open(dir+"temp.bin", "ab")
buf_res = merge(res_file,left_file, right_file, num_of_parts)
left_file.close()
right_file.close()
os.remove(dir+str(i) + ".bin")
os.remove(dir+str(i + 1) + ".bin")
buf_res = [x for item in buf_res for x in item] # from tuples to list
if len(buf_res) != 0:
buf = struct.pack('%si' % len(buf_res), *buf_res)
res_file.write(buf)
res_file.close()
os.rename(dir + "temp.bin", dir + str(next_file_name)+".bin")
next_file_name += 1
next_file_name = 0
num_of_files = int(num_of_files / 2)
num_of_parts *= 2
|
import math, os, pickle, re
class Bayes_Classifier:
#positive files = 11129
#positive frequency = 631382
#percentage positve = 0.8027264858626659
#percentage of frequencies that are positive = 0.825
#negative files = 2735
#negative frequency = 134120
#percentage negative = 0.1972735141373341
#percentage of frequencies that are negative = 0.175
def __init__(self):
"""This method initializes and trains the Naive Bayes Sentiment Classifier. If a
cache of a trained classifier has been stored, it loads this cache. Otherwise,
the system will proceed through training. After running this method, the classifier
is ready to classify input text."""
if os.path.isfile("Positive") and os.path.isfile("Negative"):
self.positive = self.load("Positive")
self.negative = self.load("Negative")
else:
self.train
def train(self):
"""Trains the Naive Bayes Sentiment Classifier."""
IFileList = []
positive_dict = {}
negative_dict = {}
for fFileObj in os.walk("reviews/"):
IFileList = fFileObj[2]
break
for review in IFileList:
count = 0
sText = self.loadFile("reviews/" + review)
word_list = self.tokenize(sText)
if review[7] == '1':
for word in word_list:
if negative_dict.has_key(word):
negative_dict[word] = negative_dict[word] + 1
else:
negative_dict[word] = 1
else:
for word in word_list:
if positive_dict.has_key(word):
positive_dict[word] = positive_dict[word] + 1
else:
positive_dict[word] = 1
self.save(positive_dict, "Positive")
self.save(negative_dict, "Negative")
def classify_bayes(self, sText):
"""Given a target string sText, this function returns the most likely document
class to which the target string belongs (i.e., positive, negative or neutral).
"""
word_list = self.tokenize(sText)
positive = self.probabilityPositive(word_list)
negative = self.probabilityNegative(word_list)
difference = abs(positive-negative)
print "positive: ", positive
print "negative: ", negative
print "difference: ", difference
if difference < 1:
return "neutral"
elif positive > negative:
return "positive"
else:
return "negative"
def probabilityPositive(self,lst):
"""Determines the probability of a text being positive given a list of the words
contained in the text given as reference the training document Positive"""
probability_positive = 0
positive_features = 0.825
for word in lst:
if self.positive.has_key(word):
probability_positive += math.log((self.positive[word]+1)/float(positive_features))
else:
probability_positive += math.log(1/float(positive_features))
return abs(probability_positive)
def probabilityNegative(self,lst):
"""Determines the probability of a text being positive given a list of the words
contained in the text given as reference the training document Positive"""
probability_negative = 0
negative_features = 0.175
for word in lst:
if self.negative.has_key(word):
probability_negative += math.log((self.negative[word]+1)/float(negative_features))
else:
probability_negative += math.log(1/float(negative_features))
return abs(probability_negative)
def loadFile(self, sFilename):
"""Given a file name, return the contents of the file as a string."""
f = open(sFilename, "r")
sTxt = f.read()
f.close()
return sTxt
def save(self, dObj, sFilename):
"""Given an object and a file name, write the object to the file using pickle."""
f = open(sFilename, "w")
p = pickle.Pickler(f)
p.dump(dObj)
f.close()
def load(self, sFilename):
"""Given a file name, load and return the object stored in the file."""
f = open(sFilename, "r")
u = pickle.Unpickler(f)
dObj = u.load()
f.close()
return dObj
def tokenize(self, sText):
"""Given a string of text sText, returns a list of the individual tokens that
occur in that string (in order)."""
lTokens = []
sToken = ""
for c in sText:
if re.match("[a-zA-Z0-9]", str(c)) != None or c == "\"" or c == "_" or c == "-":
sToken += c
else:
if sToken != "":
lTokens.append(sToken)
sToken = ""
if c.strip() != "":
lTokens.append(str(c.strip()))
if sToken != "":
lTokens.append(sToken)
return lTokens
|
print('hello world')
def hellogit():
print('hello git!')
hellogit()
print('the files changes')
print('maybe merge conflict')
print('try to modify the file and commit again')
print('what is the problem')
"""
clone , add, commit,
push , pull, reset
this is how we use git.
"""
print('edit from cloud')
def push():
print('push at 10 pm')
|
#Break and Continue in Loops
#Break -- breaks the loop
for i in range(10):
if i == 4:
print('Breaking at 4')
break
#Continue -- this will skip the current execution of further lines
i = 0
while i < 10:
print(i)
i += 1
if i == 4:
print('Skipping 4')
continue
print('next condition')
|
#!/usr/bin/env python
'''
Author: Eli Moss elimoss@stanford.edu, Prag Batra prag@stanford.edu
Purpose:
Modify the output of bedtools' intersectbed utility in order to contain only one instance of each genomic locus.
Explanation:
The left outer join functionality of intersectbed will output one line per match between its two inputs, whereas we really want only one line
per genomic locus. The purpose of this script then is to take the output of intersectbed and, when a locus appears more than once
with multiple annotations, produce one line for that locus with the annotations joined with commas.
Example:
1 241320 rs79974410 A G 3.22 PASS DP 70 13_Heterochrom/lo
1 241320 rs79974410 A G 3.22 PASS DP 70 11_Weak_Txn
becomes
1 241320 rs79974410 A G 3.22 PASS DP 70 13_Heterochrom/lo,11_Weak_Txn
The script does so in a streaming fashion from stdin.
'''
from __future__ import print_function
import sys
import argparse
import yaml_keys
import yaml_utils
parser = argparse.ArgumentParser()
parser.add_argument('--modules', help='modules yaml config file (to get BED delimiter info)')
args = parser.parse_args()
modules = yaml_utils.parse_yaml(args.modules)
bed_multimatch_internal_delimiter = modules[yaml_keys.kAnnotation][yaml_keys.kABedMultimatchInternalDelimiter]
annotations = []
firstTime = True
for line in sys.stdin:
if(line.startswith('#')): # header
print (line.rstrip("\n"))
continue
#else
s = line.rstrip('\n').split("\t")
if firstTime:
# prevLine = ['']*len(s)
prevLine = s
firstTime = False
continue
#else
# 3 cases:
# 1) duplicate annotation (same chr, start, stop, annotation)
# 2) additional annotation (same chr, start, stop (aka same variant), different annotation)
# 3) different annotation (different chr, start, and/or stop, aka different variant)
if(not firstTime):
# case 1: duplicate annotation
if(s[0] == prevLine[0] and s[1] == prevLine[1] and s[2] == prevLine[2] and s[3] == prevLine[3]): # chr, start, stop, annotation match
continue # do nothing -- we already have this annotation in the annotations list
# case 2: additional annotation
elif(s[0] == prevLine[0] and s[1] == prevLine[1] and s[2] == prevLine[2] and s[3] != prevLine[3]): # chr, start, stop match, but not annotation (additional annotation)
# add to list of annotations for this variant
annotations.append(s[-1])
# case 3: different annotation (annotation for a different variant)
# trigger output + reset annotation list
else:
print ('\t'.join(prevLine) + '\t' + bed_multimatch_internal_delimiter.join(annotations)) # for now, print out entire VCF line + annotation (TODO print out just annotation in future)
annotations = []
annotations.append(s[-1])
prevLine = s
# print out any remaining annotations
if(len(annotations) > 0):
print ('\t'.join(prevLine) + '\t' + bed_multimatch_internal_delimiter.join(annotations)) # for now, print out entire VCF line + annotation (TODO print out just annotation in future)
|
#https://www.dataquest.io/blog/python-api-tutorial/
import requests
import json
from time import sleep
from datetime import datetime
# #test 404 error code, this api doesnt exist!!!
# response = requests.get("http://api.open-notify.org/this-api-doesnt-exist")
# print(response.status_code)
# #tests success status code
# response = requests.get("http://api.open-notify.org/astros.json")
# print(response.status_code)
# #test json response
# response = requests.get("http://api.open-notify.org/astros.json")
# print(response.json())
# #test jprint function
# jprint(response.json())
# #object for retrieving specific data from the api
# parameters = {
# "lat": 40.71,
# "lon": -74
# }
# #test api with parameters
# response = requests.get("http://api.open-notify.org/iss-pass.json", params=parameters)
# jprint(response.json())
# #understanding response data based on documentation
# pass_times = response.json()['response']
# jprint(pass_times)
# #use a loop to extract just the five risetime values
# risetimes = []
# for d in pass_times:
# time = d['risetime']
# risetimes.append(time)
# print(risetimes)
# #change the format on the times to something that makes sense
# times = []
# for rt in risetimes:
# time = datetime.fromtimestamp(rt)
# times.append(time)
# print(time)
# parameters2 = {
# "current_user_url":
# }
# response = requests.get("https://api.github.com/repos/omxhealth/t-k-interview")
# print(response.status_code)
# jprint(response.json())
# parameters = {
# "username": "tknick2",
# "password": "GitIsGr8"
# }
# requests.get("https://api.github.com/tknick2", auth=requests.auth.HTTPBasicAuth("tknick2", "GitIsGr8"))
# response = requests.get("https://api.github.com/repos/omxhealth/t-k-interview/issues")
# print(response.status_code)
# jprint(response.json())
# response = requests.get("https://api.github.com/repos/omxhealth/t-k-interview/issues/events", params={
# "number": 0
# })
# print(response.status_code)
# jprint(response.json())
# response = requests.get("https://api.github.com/repos/omxhealth/t-k-interview/issues/comments", params={
# "number": 1
# })
# print(response.status_code)
# jprint(response.json())
# test = response.json()
# timestamp = test[0]['updated_at']
# print(timestamp)
# parameters2 = {
# "since": timestamp,
# "state": "all"
# }
# responseEvents = requests.get(eventsURL)
# events = responseEvents.json()
# # print(response.status_code)
# print("Events...")
# print(str(len(events)))
# jprint(events)
# closedIssues = 0
# parameters = {
# "state": "all",
# "user": "tknick2",
# }
# #creates a string from a python object
def jprint(obj):
print("Issue Title: " + obj['title'])
print("Issue ID: " + str(obj['id']))
#prints a new issue, and the current number of existing open issues and closed issues to the terminal
def ProcessIssue(issue, issues):
if issue['state'] == "open":
print("New Issue!!!!")
jprint(issue)
elif issue['state'] == "closed":
print('Closed Issue!!!!')
jprint(issue)
closedIssues = 0
allIssues = 0
for issue in issues:
if issue['state'] == "closed":
closedIssues += 1
allIssues += 1
print("Total Issues: " + str(allIssues))
print("Closed Issues: " + str(closedIssues))
#moves any new events into the eventsToProcess collection
def FindNewEvents(events, eventHandled):
# #create a list of event IDs in the parameter events
# eventIDs = []
# for item in events:
# eventIDs.append(item['id'])
# #create a list of event IDs in the handled events
# eventsHandledIDs = []
# for item in eventsHandled:
# eventsHandledIDs.append(item['id'])
# #collection of IDs from the supplied collection of events that are not in the handled collection
# returnIDs = list(set(eventIDs) - set(eventsHandledIDs))
#register globals
# global eventsHandled
#collection of event objects to return
returnEvents = []
#add event objects to the return collection and return
for event in events:
foundIt = False
for eventHandled in eventsHandled:
if event['id'] == eventHandled['id']:
foundIt = True
break
if not foundIt:
returnEvents.append(event)
return returnEvents
#collection of handled event ids
eventsHandled = []
#collection of unhandled event ids in case more than 1 new event comes back
eventsToProcess = []
#git API URL for the repo of interest
issuesURL = "https://api.github.com/repos/omxhealth/t-k-interview/issues"
eventsURL = "https://api.github.com/repos/omxhealth/t-k-interview/events"
allIssues = 0
closedIssues = 0
while True:
sleep(1)
#retrieve all current events and issues
response = requests.get(eventsURL, auth=requests.auth.HTTPBasicAuth("tknick2", "GitIsGr8"))
events = response.json()
# events = []
# for item in eventsResponse:
# events.append(item['id'])
response = requests.get(issuesURL, {"state": "all"}, auth=requests.auth.HTTPBasicAuth("tknick2", "GitIsGr8"))
issues = response.json()
# print(response.status_code)
print("Checking for Issues...")
# print(str())
# jprint(issues)
#fill the processing buffer
eventsToProcess = FindNewEvents(events, eventsHandled)
for event in eventsToProcess:
for issue in issues:
if 'action' in event['payload'] and event['type'] == "IssuesEvent" and (event['payload']['action'] == "closed" or event['payload']['action'] == "opened") and issue['id'] == event['payload']['issue']['id']:
ProcessIssue(issue, issues)
eventsHandled.append(event)
break
# for event in events:
# if 'action' in event['payload'] and event['payload']['action'] == "closed" and event['type'] == "IssuesEvent":
# closedIssues += 1
# for item in issues:
# if(item['id'] == event['payload']['issue']['id']):
# print("closed issue")
# jprint(item)
# break
# elif 'action' in event['payload'] and event['payload']['action'] == "opened" and event['type'] == "IssuesEvent":
# for item in issues:
# if(item['id'] == event['payload']['issue']['id']):
# print("open issue")
# jprint(item)
# break
# else:
# print("nada")
#ok this is confusing...why do i need the 0???
# for issue in response.json():
# # if issue['updated_at'] > timestamp:
# # timestamp = issue['updated_at']
# # if issue['closed_at'] != None:
# # closedIssues += 1
# print("ID: " + str(issue['id']))
# print("Title: " + str(issue['title']))
# title = json.loads(json.dumps(response.json())
print("Closed Issues: " + str(closedIssues))
# print(json.dumps(title)) |
#-*- coding:utf-8 -*-
import os
import json
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import numpy as np
from config import config
# import torch
def main():
traindatapath = os.path.join(config['Datapath'], 'train_set')
# train_path = os.path.join(traindatapath, 'clips')
train_label = os.path.join(traindatapath, 'labels')
count = 0
for l in os.listdir(train_label):
l_path = os.path.join(train_label, l)
with open(l_path) as json_file:
for line_json in json_file:
print(count)
line_json = line_json.strip()
j_content = json.loads(line_json) #json.loads将字符串转换为字典,但是要求key必须使用双引号,不能用单引号
# j_content = eval(line_json) #可以使用eval代替json.loads
data_name = j_content['raw_file']
data_name = data_name.split('/')
data_name = '\\'.join(data_name)
widths = j_content['lanes']
height = j_content['h_samples']
label_dot = []
for i in range(len(widths)):
line = []
for j in range(len(height)):
if widths[i][j] != -2:
line.append([height[j],widths[i][j]])
if line:
label_dot.append(line)
# data_path = os.path.join(traindatapath, data_name)
# pic = Image.open(data_path)
# datas_out_path = os.path.join(traindatapath, 'datas', str(count)+'.jpg')
# pic.save(datas_out_path)
img = np.zeros((720, 1280, 3))
for i in range(len(label_dot)):
for j in label_dot[i]:
img[j[0]][j[1]]= np.array([255,255,255])
img = Image.fromarray(np.uint8(img))
# img.show()
targets_out_path = os.path.join(traindatapath, 'targets', str(count)+'.jpg')
img.save(targets_out_path)
count += 1
# for lane in label_dot:
# lane = list(zip(*lane))
# plt.scatter(lane[1], lane[0])
# plt.imshow(pic)
# plt.show()
def create_datapath_file():
traindatapath = os.path.join(config['Datapath'], 'train_set')
train_datas_path = os.path.join(traindatapath, 'datas')
train_targets_path = os.path.join(traindatapath, 'targets')
name_file = os.path.join(traindatapath, 'namelists.txt')
filename = open(name_file, 'a')
for f in os.listdir(train_datas_path):
data_path = os.path.join(train_datas_path, f)
target_path = os.path.join(train_targets_path, f)
filename.write(data_path+' '+target_path+'\r')
filename.close()
if __name__ == '__main__':
main()
# create_datapath_file()
|
class Phone():
def __init__(self,name,color):
self.name = name
self.color = color
def call(self):
print('打电话')
class meizi(Phone):
pass
class huawei(Phone):
pass
mz = meizu('魅族','白色')
print(mz.name)
print(mz.color)
mz.call(()
hw = huawei('华为','黑色')
print(hw.name)
print(hw.color)
hw.call()
|
# Generated by Django 3.1.1 on 2020-09-23 17:59
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Creado en')),
('updated_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Actualizado en')),
('name', models.CharField(max_length=255)),
('total', models.DecimalField(decimal_places=2, default=0.0, max_digits=14)),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Transfer',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Creado en')),
('updated_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Actualizado en')),
('amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=14)),
('from_account', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='from_transfers', to='wallet.account')),
('to_account', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='to_transfers', to='wallet.account')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='Movement',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True, db_index=True, verbose_name='Creado en')),
('updated_at', models.DateTimeField(auto_now=True, db_index=True, verbose_name='Actualizado en')),
('amount', models.DecimalField(decimal_places=2, default=0.0, max_digits=14)),
('movement_type', models.CharField(choices=[('INC', 'Income'), ('EXP', 'Expense')], default='INC', max_length=4)),
('account', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to='wallet.account')),
('user', models.ForeignKey(null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL)),
],
options={
'abstract': False,
},
),
]
|
consumer_key = '<consumer_key>'
consumer_secret = '<consumer_secret>'
access_token = '<access_token>'
access_secret = '<access_secret>'
|
import functools
from collections import Counter
from sympy import *
from sympy.physics.quantum import TensorProduct, tensor_product_simp, Ket
import numpy as np
import measurements
import gates
import utils
class Barrier(object):
"""Create a barrier in the circuit.
This prevents optimization of operations across the barrier.
"""
def __init__(self):
self.template = 'barrier q'
class Circuit(object):
"""Class for building a quantum circuit.
"""
def __init__(self, num_qubits, num_bits):
if num_qubits < 1:
raise ValueError('You must have at least one qubit.')
self.num_qubits = num_qubits
self.num_bits = num_bits
qubits = [Matrix([1,0])] * num_qubits
self.qubits = utils.tensorproducts(qubits)
self.bits = [0 for i in range(num_bits)]
self._measured_bits = None
self._all_operations = []
def _is_qubit_available(self, qubit_index):
if qubit_index > self.num_qubits:
raise ValueError('Not an available qubit.')
def _apply_single_gate(self, gate, qubit_index):
"""Apply a single gate by forming the tensor operation.
If qubit_index is unspecified, apply the gate to all qubits.
Otherwise, apply the gate to the qubit index.
"""
if qubit_index is None:
operator = [gate()] * self.num_qubits
else:
self._is_qubit_available(qubit_index)
operator = [gates.ID_gate()()] * self.num_qubits
operator[qubit_index] = gate()
operator = utils.tensorproducts(operator)
self.qubits = operator * self.qubits
self._all_operations.append(gate)
def H(self, qubit_index=None):
"""Apply the Hadamard gate to the qubits."""
self._apply_single_gate(gates.H_gate(qubit_index), qubit_index)
def X(self, qubit_index=None):
"""Apply the NOT operation to the qubits."""
self._apply_single_gate(gates.X_gate(qubit_index), qubit_index)
def Y(self, qubit_index=None):
"""Apply the Pauli Y operation to the qubits."""
self._apply_single_gate(gates.Y_gate(qubit_index), qubit_index)
def Z(self, qubit_index=None):
"""Apply the Pauli Z operation to the qubits."""
self._apply_single_gate(gates.Z_gate(qubit_index), qubit_index)
def ID(self, qubit_index=None):
"""Apply the identity operation to the qubits."""
self._apply_single_gate(gates.ID_gate(qubit_index), qubit_index)
def RX(self, theta, qubit_index=None):
"""Apply a rotation gate around the x-axis of the Bloch sphere."""
self._apply_single_gate(gates.RX_gate(theta, qubit_index), qubit_index)
def RY(self, theta, qubit_index=None):
"""Apply a rotation gate around the y-axis of the Bloch sphere."""
self._apply_single_gate(gates.RY_gate(theta, qubit_index), qubit_index)
def RZ(self, theta, qubit_index=None):
"""Apply a rotation gate around the z-axis of the Bloch sphere."""
self._apply_single_gate(gates.RZ_gate(theta, qubit_index), qubit_index)
def U3(self, phi, theta, lambda_, qubit_index=None):
"""Apply the universal gate."""
self._apply_single_gate(
gates.U3_gate(phi, theta, lambda_, qubit_index),
qubit_index)
def U2(self, phi, lambda_, qubit_index=None):
"""Apply the U2 gate."""
self._apply_single_gate(
gates.U2_gate(phi, lambda_, qubit_index),
qubit_index)
def U1(self, lambda_, qubit_index=None):
"""Apply the universal gate."""
self._apply_single_gate(
gates.U1_gate(lambda_, qubit_index),
qubit_index)
def CX(self, control_index, target_index):
"""Flip the target qubit if the control qubit is 1."""
self._is_qubit_available(control_index)
self._is_qubit_available(target_index)
if target_index != control_index + 1:
raise ValueError('Right now, we can only apply the controlled-X \
when the target is next to the control qubit.')
operator = [gates.ID_gate()()] * (self.num_qubits - 1)
cx_gate = gates.CX_gate([control_index, target_index])
operator[control_index] = cx_gate()
operator = utils.tensorproducts(operator)
self.qubits = operator * self.qubits
self._all_operations.append(cx_gate)
def CCX(self, control_index1, control_index2, target_index):
self._is_qubit_available(control_index1)
self._is_qubit_available(control_index2)
self._is_qubit_available(target_index)
if (target_index != control_index1 + 2) or \
(target_index != control_index2 + 1):
raise ValueError('Right now, we can only apply the CCX \
when the target is next to the control qubits.')
operator = [gates.ID_gate()()] * (self.num_qubits - 2)
cx_gate = gates.CCX_gate([control_index1, control_index2, target_index])
operator[control_index1] = cx_gate()
operator = utils.tensorproducts(operator)
self.qubits = operator * self.qubits
self._all_operations.append(cx_gate)
def barrier(self):
self._all_operations.append(Barrier())
def reset_qubit(self, qubit_index):
"""Reset a qubit to |0>"""
raise NotImplementedError
def execute(self, num_instances):
"""Execute the built circuit a certain number of times.
Args:
num_instances (int): number of times to run the circuit
Returns:
(dict) mapping possible measured results to number of occurences.
"""
results = [measurements.measure(self.qubits) for i in range(num_instances)]
return Counter(results)
def conditional_gate(self):
"""Apply a gate conditioned on a classical bit
"""
raise NotImplementedError
def compile(self, outfile=None):
"""Compile the built circuit into OpenQASM code.
"""
with open('templates/header.txt', 'r') as f:
QASM_file = f.read()
QASM_file += '\n\nqreg q[{}]\ncreg c[{}]\n\n'.format(
self.num_qubits,
self.num_bits)
for op in self._all_operations:
QASM_file += op.template + ';\n'
if outfile is not None:
with open(outfile, 'w+') as f:
f.write(QASM_file)
return QASM_file
def get_state(self, output='bracket'):
"""Returns the resulting qubit state.
output can be one of:
`bracket`: linear combination of the computational basis
`tensor`: tensor product of the state
Args:
output (str): output type.
Returns:
sympy Matrix
"""
if output == 'bracket':
lin_combin = 0
qubits = self.qubits
size = qubits.shape[0]
num_qubits = int(np.log2(size))
for i in range(size):
lin_combin += (qubits[i] * Ket(utils.int_to_binary(i, num_qubits)))
return lin_combin
elif output == 'tensor':
return self.qubits
else:
raise ValueError('Not a correct output type.')
def measure(self, qubit_index, bit_index, basis='z'):
"""Perform a measurement under a certain basis.
Args:
qubit_index (int): index of measured qubit
bit_index (int): index of bit to record measured qubit
basis (str): measurement basis
Returns:
None
"""
if self._measured_bits is None:
measured_bits = measurements.measure(self.qubits, basis)
self._measured_bits = measured_bits
new_state = list(map(
lambda x: utils.bit_to_matrix(x),
list(measured_bits)))
self.qubits = utils.tensorproducts(new_state)
self.bits[bit_index] = self._measured_bits[-1 * qubit_index - 1]
self._all_operations.append(measurements.Measure(qubit_index, bit_index))
def bit_list_to_str(self):
"""Convert the list of bits into a string.
"""
bits = [str(b) for b in self.bits]
self.bits = ''.join(bits) |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.common.exceptions import NoSuchElementException
from selenium.webdriver.support.ui import Select
from django.test.testcases import LiveServerTestCase
from django.test.testcases import TestCase
from users.models import My_custom_user
from .base import Test_base
import time
from unittest import skip
class Test_challenges(Test_base):
def auto_create_a_challenge(self):
# create a second user so they can be invited
self.auto_create_user()
self.auto_login()
#create a challenge
self.browser.find_element_by_id('challenges').click()
self.browser.find_element_by_id('create_challenge').click()
title_box = self.browser.find_element_by_name('title')
start_date_box = self.browser.find_element_by_name('start_date')
end_date_box = self.browser.find_element_by_name('end_date')
title_box.send_keys('new challenge')
start_date_box.clear()
start_date_box.send_keys('2018-03-28')
end_date_box.clear()
end_date_box.send_keys('2050-03-30')
select = Select(self.browser.find_element_by_name('challenge_health_field'))
select.select_by_value('water_points')
time.sleep(.5)
self.browser.find_element_by_id('id_invitees_1').click() # id of auto created user in invitees list
time.sleep(.5)
#user hits enter
end_date_box.send_keys(Keys.ENTER)
time.sleep(.3)
def test_create_a_challenge(self):
# create a second user so they can be invited
self.auto_create_user()
# user logs in
self.auto_login()
# user inputs relevant date input data which will later be seen in the leaderboard
self.auto_input_data('2018-03-29','0','0','0','0') # water points are added automatically
# user goes to the create challenge page
self.browser.find_element_by_id('challenges').click()
self.browser.find_element_by_id('create_challenge').click()
#user is sent to the new_challenge page
self.assertIn('challenges/new_challenge', self.browser.current_url)
# user fills out the challenge form
title_box = self.browser.find_element_by_name('title')
start_date_box = self.browser.find_element_by_name('start_date')
end_date_box = self.browser.find_element_by_name('end_date')
title_box.send_keys('new challenge')
start_date_box.clear()
start_date_box.send_keys('2018-03-28')
end_date_box.clear()
end_date_box.send_keys('2050-03-30')
select = Select(self.browser.find_element_by_name('challenge_health_field'))
select.select_by_value('water_points')
time.sleep(.5)
self.browser.find_element_by_id('id_invitees_1').click() # id of auto created user in invitees list
time.sleep(.5)
#user hits enter
end_date_box.send_keys(Keys.ENTER)
time.sleep(.3)
# user gets sent to the Accepted_challenges_list page
self.assertIn('Accepted_challenges_list', self.browser.current_url)
#user checks that the data of the challenge is correct
end_date = self.browser.find_element_by_id('end_date').text
start_date = self.browser.find_element_by_id('start_date').text
challenge_title = self.browser.find_element_by_id('challenge_title').text
challenge_field = self.browser.find_element_by_id('challenge_field').text
creator = self.browser.find_element_by_id('creator').text
challenge_leader_board = self.browser.find_element_by_id('challenge_leaderboard')
self.assertEqual(end_date, 'March 30, 2050')
self.assertEqual(start_date, 'March 28, 2018')
self.assertEqual(challenge_title, 'new challenge')
self.assertEqual(challenge_field, 'water_points')
self.assertEqual(creator, 'admin')
self.assertEqual(challenge_leader_board.text, 'LeaderBoard')
# the user then clicks on the LeaderBoard link to see the leaderboard page
challenge_leader_board.click()
self.assertIn('challenges/challenge_leaderboard/', self.browser.current_url)
#the user sees that they have 10 water_points and there username
title = self.browser.find_element_by_id('challenge_title').text
username = self.browser.find_element_by_id('challenge_username').text
points = self.browser.find_element_by_id('challenge_points').text
health_field = self.browser.find_element_by_id('challenge_health_field').text
self.assertEqual(title, 'Leader Board: new challenge')
self.assertEqual(username, 'admin')
self.assertEqual(points, '10')
self.assertEqual(health_field, 'water_points')
def test_accept_a_challenge(self):
#forced_login user created a challenge
#inviting admin2
self.auto_create_a_challenge()
#forced login user logs out
self.auto_log_out()
#admin2 logs in
self.auto_login(username='admin2', password='adminadmin2')
user_name_label = self.browser.find_element_by_id('user_name_label').text
self.assertEqual(user_name_label, 'admin2')
#user goes to the accept a challenge page
self.browser.find_element_by_id('challenges').click()
self.browser.find_element_by_id('pending_invitations').click()
self.assertIn('challenges/pending_invitations', self.browser.current_url)
# user sees the current invitation
invitation_title = self.browser.find_element_by_id('invitation_title').text
invitation_start_date = self.browser.find_element_by_id('invitation_start_date').text
invitation_end_date = self.browser.find_element_by_id('invitation_end_date').text
invitation_category = self.browser.find_element_by_id('invitation_challenge_category').text
creator =self.browser.find_element_by_id('invitation_creator').text
accept_deny_link = self.browser.find_element_by_id('accept_deny_invitation')
self.assertEqual(invitation_title, 'new challenge')
self.assertEqual(invitation_start_date,'March 28, 2018' )
self.assertEqual(invitation_end_date, 'March 30, 2050')
self.assertEqual(invitation_category, 'water_points')
self.assertEqual(creator, 'admin')
self.assertEqual(accept_deny_link.text, 'Accept or reject')
#user clicks on the accept or reject link
accept_deny_link.click()
# user is taken to a new page to update invitation status
self.assertIn('update_invitation_status', self.browser.current_url)
# user selects to accept the invitation
select = Select(self.browser.find_element_by_name('status'))
select.select_by_value('accepted')
point_on_page = self.browser.find_element_by_id('id_status')
point_on_page.send_keys(Keys.ENTER)
#user gets sent back to pending invitations page
self.assertIn('pending_invitations', self.browser.current_url)
#user goes to accepted_challenges page to make sure it was accepted
self.browser.find_element_by_id('challenges').click()
self.browser.find_element_by_id('current_future_challenges').click()
self.assertIn('challenges/Accepted_challenges_list', self.browser.current_url)
end_date = self.browser.find_element_by_id('end_date').text
start_date = self.browser.find_element_by_id('start_date').text
self.assertEqual(start_date,'March 28, 2018')
self.assertEqual(end_date,'March 30, 2050')
def test_reject_challenge_invitation(self):
self.auto_create_a_challenge()
self.auto_log_out()
self.auto_login(username='admin2', password='adminadmin2')
user_name_label = self.browser.find_element_by_id('user_name_label').text
#go to challenge pending invitations page
self.browser.find_element_by_id('challenges').click()
self.browser.find_element_by_id('pending_invitations').click()
# go to accept_deny page
self.browser.find_element_by_id('accept_deny_invitation').click()
#user rejects the invitation
select = Select(self.browser.find_element_by_name('status'))
select.select_by_value('rejected')
point_on_page = self.browser.find_element_by_id('id_status')
point_on_page.send_keys(Keys.ENTER)
#user gets sent back to pending invitations page
self.assertIn('pending_invitations', self.browser.current_url)
#user goes to accepted_challenges page to make sure it wasn't accepted
self.browser.find_element_by_id('challenges').click()
self.browser.find_element_by_id('current_future_challenges').click()
self.assertIn('challenges/Accepted_challenges_list', self.browser.current_url)
with self.assertRaises(NoSuchElementException):
end_date = self.browser.find_element_by_id('end_date').text
|
# -*- coding: utf-8 -*-
"""
create by caijinxu on 2019/6/4
"""
from app.forms.base import BaseForm, DataRequired
from wtforms import StringField, IntegerField, SubmitField, BooleanField
from wtforms.validators import Length, NumberRange
__author__ = 'caijinxu'
class SearchForm(BaseForm):
KeyWord = StringField(
validators=[
DataRequired(message='关键字不能为空'),
Length(2, 20, message='关键字长度2到20')
],
render_kw={
"placeholder": "搜索关键字,长度2-20"
}
)
StartTime = StringField(
render_kw={
"placeholder": "开始时间:2019-05-30T00:00:00"
}
)
StopTime = StringField(
render_kw={
"placeholder": "结束时间:2019-05-30T13:00:00"
}
)
Start = IntegerField(
label='起始位置',
default=0,
validators=[
NumberRange(min=0)
]
)
Size = IntegerField(
label='数量',
default=5,
validators=[
DataRequired(),
NumberRange(min=1, max=100)
]
)
Title = BooleanField(
label='标题',
default=True
)
Time = BooleanField(
label='时间',
default=True
)
Url = BooleanField(
label='url',
default=True
)
submit = SubmitField('提交') |
from appkernel import AppKernelEngine
from checkout import InventoryService
app_id = f"{InventoryService.__name__}"
kernel = AppKernelEngine(app_id)
if __name__ == '__main__':
inventory_service = InventoryService(kernel)
kernel.run()
|
import numpy as np
from fbm import *
from variables import *
class Sender(object):
def __init__(self, debug = False):
self.debug = debug
self.digit_to_hurst = dict({
"0": hurst1, "1":hurst2, "2":hurst3
})
self.letter_to_digits = dict({
"a":"000", "b":"001","c":"002",
"d":"010", "e":"011","f":"012",
"g":"020", "h":"021","i":"022",
"j":"100", "k":"101","l":"102",
"m":"110", "n":"111","o":"112",
"p":"120", "q":"121","r":"122",
"s":"200", "t":"201","u":"202",
"v":"210", "w":"211","x":"212",
"y":"220", "z":"221"," ":"222"
})
def gen_data_series_for_input(self, input_str):
series = []
for c in input_str:
digits = self.letter_to_digits[c]
if self.debug == True:
print('letter {} corresponds to digits {}'.format(c, digits))
for d in digits:
hurst = self.digit_to_hurst[d]
serie = fbm(n=seq_len-1, hurst=hurst, length=length, method=fbm_method)
series.append(serie)
return series |
import os
import sys
from loguru import logger as log
from botleague_helpers.db import get_db
from box import Box
import utils
from problem_constants.constants import JOB_STATUS_FINISHED, \
JOB_STATUS_ASSIGNED, JOB_TYPE_EVAL, JOB_TYPE_SIM_BUILD, \
JOB_TYPE_DEEPDRIVE_BUILD
from common import get_worker_instances_db
from worker import Worker
def test_build_sim():
job = get_test_job(JOB_TYPE_SIM_BUILD)
job.branch = 'v3'
job.commit = 'ee7c19d95e6b419ce70ffd8dda2acd661c1a4e3e'
job.build_id = utils.generate_rand_alphanumeric(32)
run_test_job(job)
def test_build_deepdrive():
job = get_test_job(JOB_TYPE_DEEPDRIVE_BUILD)
job.branch = 'v3'
job.commit = '9607fe0ec36642f0a8df34665b646fed29e57229'
job.build_id = utils.generate_rand_alphanumeric(32)
run_test_job(job)
def get_test_job(job_type) -> Box:
os.environ['FORCE_FIRESTORE_DB'] = '1'
instance_id = '9999999999999999999'
inst_db = get_worker_instances_db()
Worker.make_instance_available(instances_db=inst_db,
instance_id=instance_id)
os.environ['INSTANCE_ID'] = instance_id
job_id = 'TEST_JOB_' + utils.generate_rand_alphanumeric(32)
test_job = Box({
'botleague_liaison_host': 'https://liaison.botleague.io',
'status': JOB_STATUS_ASSIGNED,
'id': job_id,
'instance_id': instance_id,
'job_type': job_type, })
return test_job
def run_test_job(job, run_problem_only=False):
test_id = utils.generate_rand_alphanumeric(32)
test_jobs_collection = 'test_jobs_' + test_id
jobs_db = get_db(test_jobs_collection, use_boxes=True,
force_firestore_db=True)
try:
jobs_db.set(job.id, job)
worker = Worker(jobs_db=jobs_db, run_problem_only=run_problem_only)
job = worker.loop(max_iters=1)
assert job
assert job.results
assert job.results.logs
assert not job.results.errors
assert job.status.lower() == JOB_STATUS_FINISHED
assert not utils.dbox(job).coordinator_error
del os.environ['FORCE_FIRESTORE_DB']
assert 'FORCE_FIRESTORE_DB' not in os.environ
finally:
jobs_db.delete_all_test_data()
def run_problem_eval(
problem='problem-worker-test',
bot_tag='deepdriveio/deepdrive:problem_problem-worker-test',
run_problem_only=False,
problem_def=None):
job = get_test_job(JOB_TYPE_EVAL)
job.eval_spec = {
'docker_tag': bot_tag,
'eval_id': job.id,
'eval_key': 'fake_eval_key',
'seed': 1,
'problem': problem,
'pull_request': None,
'problem_def': problem_def or {},
'full_eval_request': {
'problem_id': 'deepdrive/dummy_test_problem',
'botname': 'dummy',
'username': 'internal_test'
},}
run_test_job(job, run_problem_only)
def test_stop_old_jobs():
worker = Worker()
worker.stop_old_containers_if_running()
def test_domain_randomization():
run_problem_eval('domain_randomization',
bot_tag='deepdriveio/deepdrive:bot_domain_randomization',
problem_def={'container_postfix': '_0'})
def test_dummy_container():
run_problem_eval(problem='problem-worker-test', run_problem_only=True)
def run_all(current_module):
log.info('Running all tests')
num = 0
for attr in dir(current_module):
if attr.startswith('test_'):
num += 1
log.info('Running ' + attr)
getattr(current_module, attr)()
log.success(f'Test: {attr} ran successfully')
return num
def main():
test_module = sys.modules[__name__]
if len(sys.argv) > 1:
test_case = sys.argv[1]
log.info('Running ' + test_case)
getattr(test_module, test_case)()
num = 1
log.success(f'{test_case} ran successfully!')
else:
num = run_all(test_module)
log.success(f'{num} tests ran successfully!')
if __name__ == '__main__':
main()
|
import os
import os.path
from wikimetrics.exceptions import PublicReportIOError
# TODO ultils imports flask response -> fix
from wikimetrics.utils import ensure_dir
class PublicReportFileManager():
"""
Encapsulates access to filesystem and application level
operations related to public reports.
Stateless, safer as a singleton or application scoped object.
There is an instance of this class that already lives on
flask application globals.
You can access this global via g:
from flask import g
file_manager = g.file_manager
Please do not add static methods as we want to be able to
mock this class easily
"""
def __init__(self, logger, root_dir):
"""
Parameters
logger :
root_dir: absolute path under which we want to create reports
"""
self.logger = logger
self.root_dir = root_dir
def get_public_report_path(self, report_id):
"""
Careful:
If directory doesn't exist it will try to create it!
This is only meaningful in local setup, in any other setup puppet
should have create this directory
Parameters
report_id : unique identifier for the report, a string
"""
report_dir = os.sep.join(('static', 'public'))
ensure_dir(self.root_dir, report_dir)
return os.sep.join((self.root_dir, report_dir, '{}.json'.format(report_id)))
def write_data(self, file_path, data):
"""
Writes data to a given path
Parameters
file_path : The path to which we are writing the public report
data: String content to write
Returns
PublicReportIOError
if an IOError was raised when creating the public report
"""
try:
with open(file_path, 'w') as saved_report:
saved_report.write(data)
except IOError:
msg = 'Could not create public report at: {0}'.format(file_path)
self.logger.exception(msg)
raise PublicReportIOError(msg)
def remove_file(self, file_path):
"""
Parameters
file_path : The path to the file to be deleted
Returns
PublicReportIOError
if an IOError was raised when deleting the public report
"""
try:
if os.path.isfile(file_path):
os.remove(file_path)
else:
raise PublicReportIOError('Could not remove public report at: '
'{0} as it does not exist'.format(file_path))
except IOError:
msg = 'Could not remove public report at: {0}'.format(file_path)
self.logger.exception(msg)
raise PublicReportIOError(msg)
|
# Author: zhangshulin
# Email: zhangslwork@yeah.net
# Date: 2018-04-18 10:41:10
# Last Modified by: zhangshulin
# Last Modified Time: 2018-04-18 10:41:10
import tensorflow as tf
import numpy as np
import helper
class CoupletsDataGenerator:
def __init__(self, set_array, shuffle=True, buffer_size=10000):
self._data = tf.data.Dataset.from_tensor_slices(set_array)
if shuffle:
self._data = self._data.shuffle(buffer_size=buffer_size)
def get_batch(self, session, batch_size, epochs):
batch_data = self._data.batch(batch_size)
repeat_data = batch_data.repeat(epochs)
iterator = repeat_data.make_one_shot_iterator()
next = iterator.get_next()
while True:
try:
data_set = session.run(next)
Y = data_set
fore_zeros = np.zeros((data_set.shape[0], 1), dtype=np.int32)
X = np.concatenate((fore_zeros, Y[:, : -1]), axis=1)
yield X, Y
except tf.errors.OutOfRangeError:
break
def test():
import helper
_, _, _, _, _, test_set = helper.process_dataset()
data_g = CoupletsDataGenerator(test_set)
sess = tf.Session()
batch_g = data_g.get_batch(sess, 128, 1)
step = 0
for X, Y in batch_g:
print('step: ', step)
print('X: ', X.shape)
print('Y: ', Y.shape)
step += 1
if __name__ == '__main__':
test()
|
import itertools
from functable import FunctionTableProperty
from twisted.internet import defer
from thinserve.api.referenceable import Referenceable
from thinserve.api.remerr import RemoteError
from thinserve.proto.shuttle import Shuttle
from thinserve.proto.error import InternalError
class Session (object):
def __init__(self, rootobj):
assert Referenceable._check(rootobj), \
'The root object must be @Referenceable.'
self._rootobj = rootobj
self._pendingcalls = {}
self._idgen = itertools.count(0)
self._shuttle = Shuttle()
def gather_outgoing_messages(self):
d = defer.Deferred()
self._shuttle.gather_messages(d)
return d
def receive_message(self, msg):
msg.apply_variant_struct(**self._receivers)
_receivers = FunctionTableProperty('_receive_')
@_receivers.register
def _receive_call(self, id, target, method):
id = id.parse_type(int)
obj = self._resolve_sref(target.unwrap())
methods = Referenceable._get_bound_methods(obj)
d = defer.maybeDeferred(method.apply_variant_struct, **methods)
d.addCallback(lambda r: ['data', r])
d.addErrback(InternalError.coerce_unexpected_failure)
d.addErrback(lambda f: ['error', f.value.as_proto_object()])
d.addCallback(lambda reply: self._send_reply(id, reply))
@_receivers.register
def _receive_reply(self, id, result):
id = id.parse_type(int)
d = self._pendingcalls.pop(id)
result.apply_variant(
data=d.callback,
error=lambda lp: d.errback(RemoteError(lp)))
def _send_call(self, target, method, params):
callid = self._idgen.next()
self._shuttle.send_message(
['call',
{'id': callid,
'target': target,
'method': [method, params]}])
d = defer.Deferred()
self._pendingcalls[callid] = d
return d
def _send_reply(self, id, result):
self._shuttle.send_message(
['reply',
{'id': id, 'result': result}])
def _resolve_sref(self, sref):
if sref is None:
return self._rootobj
else:
raise NotImplementedError(sref)
|
#!/usr/bin/env python
# grw-wrangle
# Takes the CSV file from Google Drive and creates the required JSON data.
#
# Author: Benjamin Bengfort <benjamin@bengfort.com>
# Created: Fri May 29 15:28:50 2015 -0400
#
# Copyright (C) 2015 Bengfort.com
# For license information, see LICENSE.txt
#
# ID: grw-wrangle.py [] benjamin@bengfort.com $
"""
Takes the CSV file from Google Drive and creates the required JSON data.
"""
##########################################################################
## Imports
##########################################################################
import os
import re
import json
import unicodecsv as csv
from operator import itemgetter
from unicodedata import normalize
from collections import defaultdict
from itertools import groupby, combinations
##########################################################################
## Important Paths
##########################################################################
PROJECT = os.path.normpath(os.path.join(os.path.dirname(__file__), ".."))
FIXTURES = os.path.join(PROJECT, "fixtures")
APPDIR = os.path.join(PROJECT, "app", "data")
##########################################################################
## Helper Functions
##########################################################################
_punct_re = re.compile(r'[\t !"#$%&\'()*\-/<=>?@\[\\\]^_`{|},.]+')
def slugify(text, delim=u'_'):
"""
Returns a URL safe slug of the given text.
"""
result = []
for word in _punct_re.split(text):
word = normalize('NFKD', word).encode('ascii', 'ignore')
if word:
result.append(word)
return unicode(delim.join(result))
def dotify(parts, delim=u'.'):
return delim.join(slugify(p) for p in parts)
def read_csv(path, key=None, delimiter=","):
path = os.path.join(FIXTURES, path)
with open(path, 'r') as f:
reader = csv.DictReader(f, delimiter=delimiter)
if key is not None:
reader = sorted([row for row in reader], key=itemgetter(key))
for row in reader:
yield row
##########################################################################
## Graph Constructors
##########################################################################
def graph_people(path="people.csv"):
"""
Spit out connections between people by University and also produce
the root identifiers for individual people.
"""
data = defaultdict(dict)
jkey = u'Institution'
for row in read_csv(path, key=jkey):
uid = dotify([u'root', row[u'Institution'], row[u'Name']])
data[uid] = {
'name': uid,
'imports': [],
}
return data
def graph_papers(path="papers.csv"):
"""
Spit out the connections between people by papers
"""
data = defaultdict(dict)
jkey = u'Paper'
for gkey, group in groupby(read_csv(path, key=jkey), itemgetter(jkey)):
for pair in combinations(group, 2):
for idx,row in enumerate(pair):
uid = dotify([row[u'Name']])
if uid not in data:
data[uid] = {
'name': uid,
'imports': [],
}
cpart = pair[0] if idx == 1 else pair[1]
data[uid]['imports'].append(dotify([cpart[u'Name']]))
return data
def create_graph():
"""
Does the work with defaults
"""
papers = graph_papers()
people = graph_people()
lookup = dict((p.split(".")[-1], p) for p in people.keys())
for paper in papers.values():
name = lookup[paper["name"]]
people[name]["imports"] = list(lookup[name] for name in paper["imports"])
for people in people.values():
yield people
##########################################################################
## Main Method
##########################################################################
if __name__ == '__main__':
print ("Looks for CSV files in %s, and dumps the data into %s"
% (FIXTURES, APPDIR))
with open(os.path.join(APPDIR, "grw-papers.json"), 'w') as f:
json.dump(list(create_graph()), f, indent=2)
|
from django import forms
class ProfileForm(forms.Form):
#Usand el parametro label puede cambiar el nombre a otro idioma manualmente
first_name = forms.CharField(max_length=100, required=True)
last_name = forms.CharField(max_length=100, required=True)
bibliography = forms.CharField(max_length=500, required=True)
birthdate = forms.DateField(required=False)
picture = forms.ImageField(required=False)
|
import pytest
from works import tasks
pytestmark = pytest.mark.django_db
@pytest.fixture(autouse=True)
def change_settings(settings):
settings.DISABLE_NOTIFICATIONS = False
settings.EMAIL_ENABLED = True
settings.OUR_EMAIL = 'stepik@stepik.stepik'
@pytest.fixture
def email(mailoutbox):
return lambda: mailoutbox[0]
def test_should_send_one_email(work, mailoutbox):
work.notify_user_by_email()
assert len(mailoutbox) == 1
def test_should_send_email_with_correct_data(work, email):
work.notify_user_by_email()
assert email().to == ['i@will.worry']
assert email().from_email == 'stepik@stepik.stepik'
assert 'stepik@stepik.stepik' in email().reply_to
def test_should_send_with_correct_content(work, email):
work.notify_user_by_email()
assert f'Ваша работа №{work.pk} была одобрена модератором.' in email().body
def test_should_send_using_task(work, mailoutbox):
tasks.notify_user_by_email.delay(work.pk, template='user_notification.txt')
assert len(mailoutbox) == 1
def test_should_not_send_when_notifications_disabled(work, settings, mailoutbox):
settings.DISABLE_NOTIFICATIONS = True
tasks.notify_user_by_email.delay(work.pk, template='user_notification.txt')
assert len(mailoutbox) == 0
|
from ortools.constraint_solver import routing_enums_pb2
from ortools.constraint_solver import pywrapcp
import dynet as dy
import dynet_modules as dm
import numpy as np
import random
from utils import *
from data import flatten
from time import time
from modules.seq_encoder import SeqEncoder
from modules.bag_encoder import BagEncoder
from modules.tree_encoder import TreeEncoder
class TSPDecoder(Decoder):
def __init__(self, args, model, full = False):
super().__init__(args, model)
self.train_input_key = 'input_tokens'
self.train_output_key = 'gold_linearized_tokens'
self.pred_input_key = 'input_tokens'
self.pred_output_key = 'linearized_tokens'
self.vec_key = 'tsp_vec'
if 'seq' in self.args.tree_vecs:
self.seq_encoder = SeqEncoder(self.args, self.model, 'tsp_seq')
if 'bag' in self.args.tree_vecs:
self.bag_encoder = BagEncoder(self.args, self.model, 'tsp_bag')
if 'tree' in self.args.tree_vecs:
self.tree_encoder = TreeEncoder(self.args, self.model, 'tsp_tree')
self.full = full
self.special = self.model.add_lookup_parameters((2, self.args.token_dim))
self.biaffine = dm.BiaffineAttention(self.model, self.args.token_dim, self.args.hid_dim)
if not full:
self.f_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.b_lstm = dy.VanillaLSTMBuilder(1, self.args.token_dim, self.args.token_dim, model)
self.log(f'Initialized <{self.__class__.__name__}>, params = {self.model.parameter_count()}')
def encode(self, sent):
# encode
if 'seq' in self.args.tree_vecs:
self.seq_encoder.encode(sent, 'linearized_tokens' if self.args.pred_seq else 'gold_linearized_tokens')
if 'bag' in self.args.tree_vecs:
self.bag_encoder.encode(sent)
if 'tree' in self.args.tree_vecs:
self.tree_encoder.encode(sent, self.args.pred_tree)
sum_vecs(sent, self.vec_key, ['feat', 'tsp_seq', 'tsp_bag', 'tsp_tree'])
# print([t['lemma'] for t in sent['gold_linearized_tokens']])
# print([t['lemma'] for t in sent.tokens])
# exit()
def decode(self, tokens, constraints=[], train_mode=False):
loss = 0
errs = []
fr_vecs = [self.special[0]] + [t.vecs[self.vec_key] for t in tokens]
to_vecs = [self.special[1]] + [t.vecs[self.vec_key] for t in tokens]
score_mat = self.biaffine.attend(fr_vecs, to_vecs)
scores = score_mat.npvalue()
if train_mode:
oids = [0] + [t['original_id'] for t in tokens]
gold_path = np.argsort(oids).tolist() + [0]
trans_mat = dy.transpose(score_mat)
for i, j in zip(gold_path, gold_path[1:]):
errs.append(dy.hinge(score_mat[i], j))
errs.append(dy.hinge(trans_mat[j], i))
if errs:
loss = dy.average(errs)
costs = (1000 * (scores.max() - scores)).astype(int).tolist()
solution = solve_tsp(costs, constraints, self.args.guided_local_search) # first is best
if not solution:
# self.log('no solution, remove constraints')
solution = solve_tsp(costs, [], self.args.guided_local_search)
assert solution != []
seq = [tokens[i-1] for i in solution[1:-1]]
return {'loss': loss,
'seq': seq}
def get_subtree_constraints(self, head):
lin_order = [head['domain'].index(t)+1 for t in head['order']]
constraints = list(zip(lin_order, lin_order[1:]))
return constraints
def get_tree_constraints(self, sent):
constraints = []
tokens = sent[self.pred_input_key]
for token in tokens:
lin_order = [tokens.index(t)+1 for t in token['order']]
constraints += list(zip(lin_order, lin_order[1:]))
return constraints
def predict(self, sent, pipeline=False):
self.encode(sent)
if self.full:
constraints = [] if self.args.no_lin_constraint else self.get_tree_constraints(sent)
res = self.decode(sent[self.pred_input_key], constraints)
sent['linearized_tokens'] = res['seq']
else:
for token in traverse_bottomup(sent.root):
domain = ([token] + token['pdeps']) if self.args.pred_tree else token['domain']
if len(domain) > 1:
constraints = [] if self.args.no_lin_constraint else self.get_subtree_constraints(token)
res = self.decode(domain, constraints)
token['linearized_domain'] = res['seq']
# add predicted sequential information
f_vec = self.f_lstm.initial_state().transduce([t.vecs[self.vec_key] for t in res['seq']])[-1]
b_vec = self.b_lstm.initial_state().transduce([t.vecs[self.vec_key] for t in res['seq'][::-1]])[-1]
token.vecs[self.vec_key] += (f_vec + b_vec)
else:
token['linearized_domain'] = [token]
sent['linearized_tokens'] = flatten(token, 'linearized_domain')
def train_one_step(self, sent):
total = correct = loss = 0
t0 = time()
self.encode(sent)
if self.full:
constraints = [] if self.args.no_lin_constraint else self.get_tree_constraints(sent)
res = self.decode(sent[self.train_input_key], constraints, True)
loss = res['loss']
total += 1
sent['linearized_tokens'] = res['seq']
correct += int(sent['linearized_tokens'] == sent['gold_linearized_tokens'] )
else:
for token in traverse_bottomup(sent.root):
domain = ([token] + token['pdeps']) if self.args.pred_tree else token['domain']
if len(domain) > 1:
constraints = [] if self.args.no_lin_constraint else self.get_subtree_constraints(token)
res = self.decode(domain, constraints, True)
token['linearized_domain'] = res['seq']
loss += res['loss']
total += 1
correct += int(token['linearized_domain'] == token['gold_linearized_domain'])
# add predicted sequential information
f_vec = self.f_lstm.initial_state().transduce([t.vecs[self.vec_key] for t in res['seq']])[-1]
b_vec = self.b_lstm.initial_state().transduce([t.vecs[self.vec_key] for t in res['seq'][::-1]])[-1]
token.vecs[self.vec_key] += (f_vec + b_vec)
else:
token['linearized_domain'] = [token]
sent['linearized_tokens'] = flatten(token, 'linearized_domain')
loss_value = loss.value() if loss else 0
return {'time': time()-t0,
'loss': loss_value,
'loss_expr': loss,
'total': total,
'correct': correct
}
def evaluate(self, sents):
gold_seqs = [sent[self.train_output_key] for sent in sents]
pred_seqs = [sent[self.pred_output_key] for sent in sents]
pred_bleu = eval_all(gold_seqs, pred_seqs)
print([t['lemma'] for t in gold_seqs[0]])
return pred_bleu
def solve_tsp(costs, constraints=[], beam_size=1, gls=False):
manager = pywrapcp.RoutingIndexManager(len(costs), 1, 0)
routing = pywrapcp.RoutingModel(manager)
def distance_callback(from_index, to_index):
"""Returns the distance between the two nodes."""
# Convert from routing variable Index to distance matrix NodeIndex.
from_node = manager.IndexToNode(from_index)
to_node = manager.IndexToNode(to_index)
return costs[from_node][to_node]
transit_callback_index = routing.RegisterTransitCallback(distance_callback)
# Define cost of each arc.
routing.SetArcCostEvaluatorOfAllVehicles(transit_callback_index)
solver = routing.solver()
# linear order constraints
if constraints:
order_callback_index = routing.RegisterUnaryTransitCallback(lambda x: 1) # always add 1
routing.AddDimension(order_callback_index, 0, len(costs)+1, True, 'Order')
order = routing.GetDimensionOrDie('Order')
for i, j in constraints:
solver.Add(order.CumulVar(i) < order.CumulVar(j))
# Setting first solution heuristic.
search_parameters = pywrapcp.DefaultRoutingSearchParameters()
search_parameters.first_solution_strategy = routing_enums_pb2.FirstSolutionStrategy.GLOBAL_CHEAPEST_ARC
search_parameters.time_limit.seconds = 1
search_parameters.solution_limit = 100
search_parameters.log_search = False
if gls:
search_parameters.local_search_metaheuristic = routing_enums_pb2.LocalSearchMetaheuristic.GUIDED_LOCAL_SEARCH
# Solve the problem.
assignment = routing.SolveWithParameters(search_parameters)
if assignment:
out = []
index = routing.Start(0)
while not routing.IsEnd(index):
out.append(manager.IndexToNode(index))
index = assignment.Value(routing.NextVar(index))
out.append(manager.IndexToNode(index))
return out
else:
return []
|
# Create your views here.
from django.http import HttpResponse
from django.contrib.auth.decorators import login_required
from book.models import Book
from book.forms import addBookForm
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.http import HttpResponse, HttpResponseBadRequest
def pdf_view(request,title):
book = Book.objects.filter(user=request.user).get(title=title)
with open(book.book.path, 'r') as pdf:
response = HttpResponse(pdf.read(), mimetype='application/pdf')
response['Content-Disposition'] = 'inline;filename=some_file.pdf'
return response
pdf.closed
@login_required
def add_book_view(request):
info = ""
info_send = True
if request.method == 'POST':
book = ""
user = ""
year = ""
gender = ""
title = ""
form = addBookForm(request.POST, request.FILES)
if form.is_valid():
book = form.cleaned_data['book']
user = request.user
year = form.cleaned_data['year']
gender = form.cleaned_data['gender']
title = form.cleaned_data['title']
book = Book.objects.create(
book=book, user=user, year=year, gender=gender, title=title)
info = "Guardado exitosamente, puedes agregar otro "
info_send = True
else:
info = "No se ha guardado"
#form = addBookForm()
ctx = {'form':form,'info':info,'info_send':info_send}
return render_to_response('book/add_book.html',ctx,context_instance=RequestContext(request))
else:
form = addBookForm()
ctx = {'form':form,'info':info,'info_send':info_send}
return render_to_response('book/add_book.html',ctx,context_instance=RequestContext(request))
@login_required
def my_books_view(request):
my_books = Book.objects.filter(user=request.user)
ctx = {'my_books':my_books}
return render_to_response('book/my_books.html',ctx,context_instance=RequestContext(request))
|
def persistenciaAditiva(numero):
contador = 0
acumulador = 0
while (numero >= 10):
while (numero != 0):
acumulador += (numero %10)
numero //= 10
contador+=1
numero = acumulador
acumulador = 0
return contador
print(persistenciaAditiva(7865))
print(persistenciaAditiva(55))
|
import numpy as np
MAX_NUM=0x7fffffff
class Merge(object):
@staticmethod
def show():
# 石子堆数
n = int(input())
N = 41000
v = np.array(np.arange(N).reshape(N,1))
ans = 0
v[0] = MAX_NUM
v[n + 1] = MAX_NUM
# 每堆石子数
for i in range(1, n + 1) :
v[i] = int(input())
while n > 1:
n -= 1
for k in range(1, (n + 1) + 1):
if v[k - 1] < v[k + 1]:
break
sum = v[k] + v[k - 1]
for j in range(k - 1, -1, -1):
if v[j] > v[k] + v[k - 1]:
break
v = np.delete(v, k - 1, 0)
v = np.delete(v, k - 1, 0)
v = np.insert(v, j + 1, sum, 0)
ans += sum
#输出
print(ans[0])
def main():
mer = Merge()
mer.show()
if __name__ == '__main__':
main() |
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
import config
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
config.config_app(app)
# see: https://stackoverflow.com/questions/33241050/trailing-slash-triggers-404-in-flask-path-rule
app.url_map.strict_slashes = False
db.init_app(app)
# Because there is no global Flask app instance,
# in order for views to access the current_app,
# we need to import and register them using the
# created app's context.
with app.app_context():
from .views import users
app.register_blueprint(users.blueprint)
return app
|
import os
#Get absolute path of file
current_file_path = os.path.abspath(__file__)
BASE_DIR = os.path.dirname(current_file_path)
ROOT_PROJECT_DIR = os.path.dirname(BASE_DIR)
#Join BASE_DIR with folder and filename
email_text = os.path.join(BASE_DIR, 'templates', 'email.txt')
content = ''
with open(email_text, 'r') as f:
content = f.read()
print(content.format(name='Zlatan'))
|
import copy
import h5py
import numpy
import pickle
import os
import torch
import platform
import logging
import nmtpytorch
from nmtpytorch import logger
from nmtpytorch import models
from nmtpytorch.mainloop import MainLoop
from nmtpytorch.config import Options, TRAIN_DEFAULTS
from nmtpytorch.utils.misc import setup_experiment, fix_seed
from nmtpytorch.utils.device import DeviceManager
from nmtpytorch.utils.beat import beat_separate_train_valid
from pathlib import Path
import ipdb
beat_logger = logging.getLogger('beat_lifelong_mt')
class Algorithm:
# initialise fields to store cross-input data (e.g. machines, aggregations, etc.)
def __init__(self):
self.data_dict_train = None
self.data_dict_dev = None
def setup(self, parameters):
self.params={}
self.params['train']=TRAIN_DEFAULTS
self.params['model']={}
self.params['train']['seed']=int(parameters['seed'])
self.params['train']['model_type']=parameters['model_type']
self.params['train']['patience']=int(parameters['patience'])
self.params['train']['max_epochs']=int(parameters['max_epochs'])
self.params['train']['eval_freq']=int(parameters['eval_freq'])
self.params['train']['eval_metrics']=parameters['eval_metrics']
self.params['train']['eval_filters']=parameters['eval_filters']
self.params['train']['eval_beam']=int(parameters['eval_beam'])
self.params['train']['eval_batch_size']=int(parameters['eval_batch_size'])
self.params['train']['save_best_metrics']=parameters['save_best_metrics']
self.params['train']['eval_max_len']=int(parameters['eval_max_len'])
self.params['train']['checkpoint_freq']=int(parameters['checkpoint_freq'])
#self.params['train']['n_checkpoints']=parameters['n_checkpoints']
self.params['train']['l2_reg']=int(parameters['l2_reg'])
self.params['train']['lr_decay']=parameters['lr_decay']
self.params['train']['lr_decay_revert']=parameters['lr_decay_revert']
self.params['train']['lr_decay_factor']=parameters['lr_decay_factor']
self.params['train']['lr_decay_patience']=int(parameters['lr_decay_patience'])
self.params['train']['gclip']=int(parameters['gclip'])
self.params['train']['optimizer']=parameters['optimizer']
self.params['train']['lr']=parameters['lr']
self.params['train']['batch_size']=int(parameters['batch_size'])
self.params['train']['save_optim_state']=False
self.params['train']['save_path']=Path("/not/used/because/beat_platform")
#self.params['train']['tensorboard_dir']="/lium/users/barrault/llmt/tensorboard"
self.params['model']['att_type']=parameters['att_type']
self.params['model']['att_bottleneck']=parameters['att_bottleneck']
self.params['model']['enc_dim']=int(parameters['enc_dim'])
self.params['model']['dec_dim']=int(parameters['dec_dim'])
self.params['model']['emb_dim']=int(parameters['emb_dim'])
self.params['model']['dropout_emb']=parameters['dropout_emb']
self.params['model']['dropout_ctx']=parameters['dropout_ctx']
self.params['model']['dropout_out']=parameters['dropout_out']
self.params['model']['n_encoders']=int(parameters['n_encoders'])
self.params['model']['tied_emb']=parameters['tied_emb']
self.params['model']['dec_init']=parameters['dec_init']
self.params['model']['bucket_by']="src"
if parameters['max_len']=="None":
self.params['model']['max_len']=None
else:
self.params['model']['max_len']=int(parameters['max_len'])
self.params['model']['direction']="src:Text -> trg:Text"
return True
# this will be called each time the sync'd input has more data available to be processed
def process(self, data_loaders, outputs):
beat_logger.debug("############### mt_train_initial_model")
dl = data_loaders[0]
(data, _,end_data_index) = dl[0]
# separate train and dev data
data_dict = pickle.loads(data["train_data"].text.encode("latin1"))
self.data_dict_train, self.data_dict_dev = beat_separate_train_valid(data_dict)
self.params['data']={}
self.params['data']['train_set']={}
self.params['data']['train_set']['src']=self.data_dict_train['src']
self.params['data']['train_set']['trg']=self.data_dict_train['trg']
self.params['data']['val_set']={}
self.params['data']['val_set']['src']=self.data_dict_dev['src']
self.params['data']['val_set']['trg']=self.data_dict_dev['trg']
self.params['vocabulary']={}
self.params['vocabulary']['src']=data['source_vocabulary'].text
self.params['vocabulary']['trg']=data['target_vocabulary'].text
self.params['filename']='/not/needed/beat_platform'
self.params['sections']=['train', 'model', 'data', 'vocabulary']
opts = Options.from_dict(self.params,{})
setup_experiment(opts, beat_platform=True)
dev_mgr = DeviceManager("gpu")
# If given, seed that; if not generate a random seed and print it
if opts.train['seed'] > 0:
seed = fix_seed(opts.train['seed'])
else:
opts.train['seed'] = fix_seed()
# Instantiate the model object
model = getattr(models, opts.train['model_type'])(opts=opts, beat_platform=True)
beat_logger.info("Python {} -- torch {} with CUDA {} (on machine '{}')".format(
platform.python_version(), torch.__version__,
torch.version.cuda, platform.node()))
beat_logger.info("nmtpytorch {}".format(nmtpytorch.__version__))
beat_logger.info(dev_mgr)
beat_logger.info("Seed for further reproducibility: {}".format(opts.train['seed']))
loop = MainLoop(model, opts.train, dev_mgr, beat_platform = True)
model = loop()
# The model is Pickled with torch.save() and converted into a 1D-array of uint8
# Pass the model to the next block
outputs['model'].write({'value': model}, end_data_index)
beat_logger.debug("############## End of mt_train_model ############")
return True
|
import os
import sys
import setuptools
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
README = open(os.path.join(here, 'README.md')).read()
CHANGES = open(os.path.join(here, 'CHANGES.txt')).read()
requires = [
'simplejson',
'psycopg2',
'pyparsing',
]
#
# eggs that you need if you're running a version of python lower than 2.7
#
if sys.version_info[:2] < (2, 7):
requires.extend(['argparse>=1.2.1', 'unittest2>=0.5.1'])
#
# eggs you need for development, but not production
#
dev_extras = (
'zc.buildout',
'coverage>=3.5.2',
'fabric>=1.4.3',
'zest.releaser>=3.37',
'nose'
)
setup(
name='ott.osm',
version='0.1.0',
description='Open Transit Tools - OSM Tools (Python)',
long_description=README + '\n\n' + CHANGES,
classifiers=[
"Programming Language :: Python",
],
author="Open Transit Tools",
author_email="info@opentransittools.com",
dependency_links=('http://opentransittools.com',),
license="Mozilla-derived (http://opentransittools.com)",
url='http://opentransittools.com',
keywords='ott, otp, view, transit, osm',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
install_requires=requires,
extras_require=dict(dev=dev_extras),
tests_require=requires,
test_suite="ott.osm",
entry_points="""\
[paste.app_factory]
main = ott.view.pyramid.config:main
""",
)
|
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from subscriptions.views import get_user_membership, get_user_subscription
# Create your views here.
def index(request):
return render(request, 'index.html')
def strange(request):
return render(request, 'index.html')
# @login_required
def profile(request):
user_membership = get_user_membership(request)
user_subscription = get_user_subscription(request)
context = {
'user_membership':user_membership,
'user_subscription':user_subscription,
}
return render(request, 'profile.html',context)
def pay(request):
return render(request,'pay.html')
# def Video(request):
# return render(request,'video.html') |
import pygame
class ButtonSet:
def __init__(self, window):
self.buttons = {}
self.window = window
self.hidden = False
def add_button(self, name, button):
self.buttons[name] = button
def check_pressed(self, mouse_x, mouse_y):
for button in self.buttons:
self.buttons[button].check_pressed(mouse_x, mouse_y)
def release(self):
for button in self.buttons:
self.buttons[button].is_pressed = False
def draw(self):
if self.hidden:
return
for button in self.buttons:
button = self.buttons[button]
self.window.blit(button.image(), (button.x, button.y))
def remove_button(self, button):
del self.buttons[button]
def __getitem__(self, name):
return self.buttons[name]
def __call__(self):
for button in self.buttons:
self.buttons[button]()
def hide(self):
self.hidden = True
def show(self):
self.hidden = False
class Button:
def __init__(self, x, y, unpressed, pressed):
self.x = x
self.y = y
up_size = unpressed.get_size()
p_size = pressed.get_size()
if p_size != up_size:
raise Exception("Sizes of pressed and unpressed images for button don't match")
self.size = up_size
self.unpressed = unpressed
self.pressed = pressed
self.is_pressed = False
def default_event():
pass
self.event = default_event
def image(self):
return self.pressed if self.is_pressed else self.unpressed
def check_pressed(self, mouse_x, mouse_y):
x = mouse_x >= self.x and mouse_x < (self.x+self.size[0])
y = mouse_y >= self.y and mouse_y < (self.y+self.size[1])
self.is_pressed = x and y
return self.is_pressed
def set_event(self, event):
self.event = event
def __call__(self):
if self.is_pressed:
self.event()
def change_images(self, images):
self.pressed = images.pressed
self.unpressed = images.unpressed
def from_object(x, y, images):
return Button(x, y, images.unpressed, images.pressed)
Button.from_object = from_object
del from_object |
import nltk
from nltk.sentiment import SentimentAnalyzer
from nltk.classify import NaiveBayesClassifier
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.corpus import subjectivity
from nltk.sentiment.util import *
from nltk.corpus import stopwords
from flask import Flask
from flask import request
import requests.auth
import requests
import re
try:
# For Python 3.0 and later
from urllib.request import urlopen
except ImportError:
# Fall back to Python 2's urllib2
from urllib2 import urlopen
from bs4 import BeautifulSoup
app = Flask(__name__)
@app.route('/')
def index():
return "Hello, World!"
@app.route('/getNER',methods=['GET'])
def getStory():
story = request.form['story']
tokens = nltk.word_tokenize(story)
tagged = nltk.pos_tag(tokens)
entities = nltk.chunk.ne_chunk(tagged)
return "Hello, World!"
@app.route('/postStory',methods=['POST'])
def postStory():
print '*********'
story = request.json
print 'STORY' , story
return getSentimentAna(story)
@app.route('/postLyrics',methods=['POST'])
def postLyrics():
# story = request.form['body']
artist='kanyewest'
song_title='goodmorning'
artist = artist.lower()
song_title = song_title.lower()
# remove all except alphanumeric characters from artist and song_title
artist = re.sub('[^A-Za-z0-9]+', "", artist)
song_title = re.sub('[^A-Za-z0-9]+', "", song_title)
if artist.startswith("the"): # remove starting 'the' from artist e.g. the who -> who
artist = artist[3:]
url = "http://azlyrics.com/lyrics/"+artist+"/"+song_title+".html"
try:
content = urlopen(url)
soup = BeautifulSoup(content, 'html.parser')
lyrics = str(soup)
return getSentimentAna(lyrics)
except Exception as e:
return "Exception occurred \n" +str(e)
def features(sentence):
words = sentence.lower().split()
for w in words:
return dict(('contains(%s)' % w, True))
def getRedditStores():
client_auth = requests.auth.HTTPBasicAuth('xPfqMy9KUhrOvQ', 'lhJZGKDpwGosU1Ed87ps63KMrW0')
headers = {"User-Agent": "Mozilla/5.0 AppleWebKit/537.36"}
# response = requests.post("https://www.reddit.com/api/v1/access_token", auth=client_auth, data=post_data, headers=headers)
# print response.json().access_token
credJson = "bearer " + "8UTUaRcvtmnPqkdNZNdi0pA1Ygc"
headers = {"Authorization": credJson, "User-Agent": "Mozilla/5.0 AppleWebKit/537.36"}
response = requests.get("https://oauth.reddit.com/r/writingprompts/top/?sort=top&t=week", headers=headers)
data = response.json()
# print(data)
# result = []
# for tupl in data:
# comments = request.get("https://oauth.reddit.com/"+tupl.permalink,headers=header)
# result.append((tupl,list(comments.json()[1].data.children)))
requests.post("http://localhost:3000/sendTitles",data=response.json())
return response.json()
def getSentimentAna(lyrics):
try:
print 'here'
# lyrics lies between up_partition and down_partition
# up_partition = '<!-- Usage of azlyrics.com content by any third-party lyrics provider is prohibited by our licensing agreement. Sorry about that. -->'
# down_partition = '<!-- MxM banner -->'
# lyrics = lyrics.split(up_partition)[1]
# lyrics = lyrics.split(down_partition)[0]
lyrics = lyrics.replace('<br>','').replace('<','').replace('br','').replace('/>','').replace('i>','').replace('</div>','').replace('/','').strip().lower()
lyrics = re.sub(r'\[(.*?)\]','',lyrics)
def filterStopWord(word):
if word in stopwords.words('english'):
return False
else:
return True
arr = lyrics.splitlines()
print(arr)
tokens_arr = list(map(nltk.word_tokenize,arr))
def ripOutEmpty(arr):
return arr and len(arr) > 0
tokens_arr = filter(ripOutEmpty, tokens_arr)
def appendSubj(arr):
return (arr,'obj')
tokens_arr = map(appendSubj,tokens_arr)
n_instances = 100
subj_docs = [(sent, 'subj') for sent in subjectivity.sents(categories='subj')[:n_instances]]
obj_docs = [(sent, 'obj') for sent in subjectivity.sents(categories='obj')[:n_instances]]
len(subj_docs), len(obj_docs)
train_subj_docs = subj_docs[:80]
test_subj_docs = subj_docs[80:100]
train_obj_docs = obj_docs[:80]
test_obj_docs = obj_docs[80:100]
training_docs = train_subj_docs+train_obj_docs
testing_docs = test_subj_docs+test_obj_docs
sentim_analyzer = SentimentAnalyzer()
all_words_neg = sentim_analyzer.all_words([mark_negation(doc) for doc in training_docs])
unigram_feats = sentim_analyzer.unigram_word_feats(all_words_neg, min_freq=4)
len(unigram_feats)
sentim_analyzer.add_feat_extractor(extract_unigram_feats, unigrams=unigram_feats)
training_set = sentim_analyzer.apply_features(training_docs)
test_set = sentim_analyzer.apply_features(testing_docs)
trainer = NaiveBayesClassifier.train
classifier = sentim_analyzer.train(trainer, training_set)
fake = ['0']*len(arr)
feature = sentim_analyzer.apply_features(tokens_arr, True)
for key,value in sorted(sentim_analyzer.evaluate(feature).items()):
print('{0}: {1}'.format(key, value))
sim = SentimentIntensityAnalyzer()
sentence_neg = 0
sentence_pos = 0
sentence_neu = 0
for sentence in arr:
print 'sentence',sentence
sentence = " ".join(filter(filterStopWord,sentence.replace(',','').split()))
print sentence
ss = sim.polarity_scores(sentence)
for k in sorted(ss):
if k == 'neg':
sentence_neg = sentence_neg + ss[k]
elif k == 'neu':
sentence_neu = sentence_neu + ss[k]
else:
sentence_pos = sentence_pos + ss[k]
print k, ss[k]
if sentence_neg > sentence_pos and sentence_neg > sentence_neu:
print(sentence_neg)
return 'neg'
else:
print(sentence_pos)
return 'pos'
except Exception as e:
return "Exception occurred \n" +str(e)
if __name__ == '__main__':
postLyrics()
app.run(debug=True)
|
import os
import click
import gin
import tensorflow as tf
import numpy as np
from mlagents_envs.environment import UnityEnvironment
from tf_agents.agents import PPOAgent
from tf_agents.drivers import driver
from tf_agents.drivers.dynamic_step_driver import DynamicStepDriver, is_bandit_env
from tf_agents.environments.tf_py_environment import TFPyEnvironment
from tf_agents.metrics import tf_metrics
from tf_agents.networks.actor_distribution_network import ActorDistributionNetwork
from tf_agents.networks.value_network import ValueNetwork
from tf_agents.policies.policy_saver import PolicySaver
from tf_agents.replay_buffers.tf_uniform_replay_buffer import TFUniformReplayBuffer
from tf_agents.trajectories import time_step as ts, trajectory
from tf_agents.trajectories.trajectory import Trajectory
from tf_agents.utils import nest_utils, common
from tf_agents.utils.common import Checkpointer
from tf_agents.eval.metric_utils import log_metrics, MetricsGroup
from ai_playground.selfdrive.haar_ppo_agent import HaarPPOAgent
from ai_playground.selfdrive.environments import UnityEnv, HighLvlEnv, LowLvlEnv
from ai_playground.selfdrive.optimizers import get_optimizer
from ai_playground.selfdrive.train_drivers import HaarLowLvlDriver
from ai_playground.utils.exp_data import create_exp_local, init_neptune
from ai_playground.utils.logger import get_logger
import matplotlib.pyplot as plt
import matplotlib.animation as animation
from matplotlib import style
style.use('fivethirtyeight')
logger = get_logger()
class SelfDriveAgent:
def __init__(self, ctx: click.Context):
self.ctx = ctx
if self.ctx.obj['config']['utils']['load_exp']:
self.exp_dir = self.ctx.obj['config']['utils']['load_exp']
logger.info("Loading experiment from: " + self.exp_dir)
else:
self.exp_dir = create_exp_local(
experiments_dir=self.ctx.obj['config']['utils']['experiments_dir'],
exp_name=self.ctx.obj['exp_name']
)
if ctx.obj['log2neptune']:
self.neptune_exp = init_neptune(ctx)
logger.info("This experiment is logged to neptune.ai")
self.unity_env: UnityEnvironment = UnityEnv(ctx).get_env()
self.unity_env.reset()
self.high_lvl_env: TFPyEnvironment = TFPyEnvironment(HighLvlEnv(self.ctx, self.unity_env))
self.low_lvl_env: TFPyEnvironment = TFPyEnvironment(LowLvlEnv(self.ctx, self.unity_env))
self.haar_config = self.ctx.obj['config']['algorithm']['params']['haar']
self.optim_config = self.ctx.obj['config']['algorithm']['params'][
self.ctx.obj['config']['algorithm']['params']['haar']['policy_optimizer']]
self.high_lvl_agent: PPOAgent = PPOAgent(
time_step_spec=self.high_lvl_env.time_step_spec(),
action_spec=self.high_lvl_env.action_spec(),
optimizer=get_optimizer(self.optim_config['optimizer'],
self.optim_config['optimizer_params'][self.optim_config['optimizer']]),
actor_net=create_network(self.optim_config['ppo_actor_net'], self.high_lvl_env.observation_spec(),
self.high_lvl_env.action_spec()),
value_net=create_network(self.optim_config['ppo_value_net'], self.high_lvl_env.observation_spec(),
self.high_lvl_env.action_spec()),
importance_ratio_clipping=self.optim_config['importance_ratio_clipping'],
discount_factor=self.haar_config['discount_factor_high_lvl'],
entropy_regularization=self.optim_config['entropy_regularization'],
num_epochs=self.optim_config['num_epochs'],
use_gae=self.optim_config['use_gae'],
use_td_lambda_return=self.optim_config['use_td_lambda_return'],
gradient_clipping=self.optim_config['gradient_clipping'],
debug_summaries=True,
summarize_grads_and_vars=True,
train_step_counter=tf.Variable(0)
)
self.low_lvl_agent: PPOAgent = PPOAgent(
time_step_spec=self.low_lvl_env.time_step_spec(),
action_spec=self.low_lvl_env.action_spec(),
optimizer=get_optimizer(self.optim_config['optimizer'],
self.optim_config['optimizer_params'][self.optim_config['optimizer']]),
actor_net=create_network(self.optim_config['ppo_actor_net'], self.low_lvl_env.observation_spec(),
self.low_lvl_env.action_spec()),
value_net=create_network(self.optim_config['ppo_value_net'], self.low_lvl_env.observation_spec(),
self.low_lvl_env.action_spec()),
importance_ratio_clipping=self.optim_config['importance_ratio_clipping'],
discount_factor=self.haar_config['discount_factor_low_lvl'],
entropy_regularization=self.optim_config['entropy_regularization'],
num_epochs=self.optim_config['num_epochs'],
use_gae=self.optim_config['use_gae'],
use_td_lambda_return=self.optim_config['use_td_lambda_return'],
gradient_clipping=self.optim_config['gradient_clipping'],
debug_summaries=True,
summarize_grads_and_vars=True,
train_step_counter=tf.Variable(0)
)
self.high_lvl_agent.train_step_counter.assign(0)
self.low_lvl_agent.train_step_counter.assign(0)
self.high_lvl_agent.initialize()
self.low_lvl_agent.initialize()
self.low_lvl_replay_buffer = TFUniformReplayBuffer(
data_spec=self.low_lvl_agent.collect_data_spec,
batch_size=1,
max_length=1000
)
self.high_lvl_replay_buffer = TFUniformReplayBuffer(
data_spec=self.high_lvl_agent.collect_data_spec,
batch_size=1,
max_length=1000
)
self.modified_low_lvl_rep_buffer = TFUniformReplayBuffer(
data_spec=self.low_lvl_agent.collect_data_spec,
batch_size=1,
max_length=1000
)
train_dir = os.path.join(self.exp_dir, 'train')
eval_dir = os.path.join(self.exp_dir, 'eval')
self.train_summary_writer = tf.compat.v2.summary.create_file_writer(
train_dir, flush_millis=self.ctx.obj['config']['train_session']['summaries_flush_secs'] * 1000)
# self.train_summary_writer.set_as_default()
#
self.eval_summary_writer = tf.compat.v2.summary.create_file_writer(
eval_dir, flush_millis=self.ctx.obj['config']['train_session']['summaries_flush_secs'] * 1000)
self.h_eval_metrics = [
tf_metrics.ChosenActionHistogram()
]
self.l_eval_metrics = [
tf_metrics.AverageReturnMetric(buffer_size=self.ctx.obj['config']['train_session']['num_eval_episodes']),
tf_metrics.AverageEpisodeLengthMetric(buffer_size=self.ctx.obj['config']['train_session']['num_eval_episodes'])
]
self.h_step_observers = [
tf_metrics.EnvironmentSteps(),
tf_metrics.AverageReturnMetric(batch_size=1)
]
self.l_step_observers = [
tf_metrics.EnvironmentSteps(),
tf_metrics.AverageReturnMetric(batch_size=1)
]
self.h_checkpoint_dir = os.path.join(self.exp_dir, 'h_checkpoints')
self.h_checkpointer = Checkpointer(
ckpt_dir=self.h_checkpoint_dir,
max_to_keep=1,
agent=self.high_lvl_agent,
policy=self.high_lvl_agent.policy,
replay_buffer=self.high_lvl_replay_buffer,
global_step=self.high_lvl_agent.train_step_counter,
metrics=MetricsGroup(self.h_step_observers, 'high_lvl_train_metrics')
)
self.h_tf_policy_saver_dir = os.path.join(self.exp_dir, 'h_policy')
self.h_tf_policy_saver = PolicySaver(self.high_lvl_agent.policy)
self.l_checkpoint_dir = os.path.join(self.exp_dir, 'l_checkpoints')
self.l_checkpointer = Checkpointer(
ckpt_dir=self.l_checkpoint_dir,
max_to_keep=1,
agent=self.low_lvl_agent,
policy=self.low_lvl_agent.policy,
replay_buffer=self.modified_low_lvl_rep_buffer,
global_step=self.low_lvl_agent.train_step_counter,
metrics=MetricsGroup(self.l_step_observers, 'low_lvl_train_metrics')
)
self.l_tf_policy_saver_dir = os.path.join(self.exp_dir, 'l_policy')
self.l_tf_policy_saver = PolicySaver(self.low_lvl_agent.policy)
self.h_checkpointer.initialize_or_restore()
self.l_checkpointer.initialize_or_restore()
self.k_0 = self.haar_config['k_0']
self.k_s = self.haar_config['k_s']
self.j = self.haar_config['j']
def train(self):
high_lvl_driver = DynamicStepDriver(
env=self.high_lvl_env,
policy=self.high_lvl_agent.collect_policy,
observers=[self.high_lvl_replay_buffer.add_batch] + self.h_step_observers,
num_steps=1
)
low_lvl_driver = HaarLowLvlDriver(
env=self.low_lvl_env,
policy=self.low_lvl_agent.collect_policy,
observers=[self.low_lvl_replay_buffer.add_batch] + self.l_step_observers,
num_steps=1
)
high_lvl_driver._num_steps = 1
high_lvl_driver.run()
high_lvl_dataset = self.high_lvl_replay_buffer.as_dataset(sample_batch_size=1)
high_lvl_iter = iter(high_lvl_dataset)
h_exp_prev, _ = next(high_lvl_iter)
for ep_count in range(1, self.haar_config['num_eps'] + 1):
sds = list(self.unity_env.side_channels.values())
sds[1].set_float_parameter("num_steps", self.k_0 * self.j)
high_lvl_action = h_exp_prev.action
low_lvl_driver._num_steps = self.k_0
low_lvl_driver.run(high_lvl_action=high_lvl_action)
if ep_count % self.j == 0:
exp = self.high_lvl_replay_buffer.gather_all()
self.high_lvl_agent.train(exp)
self.h_checkpointer.save(self.high_lvl_agent.train_step_counter)
self.h_tf_policy_saver.save(self.h_tf_policy_saver_dir)
self.l_checkpointer.save(self.low_lvl_agent.train_step_counter)
self.l_tf_policy_saver.save(self.l_tf_policy_saver_dir)
logger.info("Saved artifacts at: " + self.exp_dir)
self.high_lvl_env.reset()
self.unity_env.reset()
self.high_lvl_replay_buffer.clear()
print("High level trained.")
# modified_rewards = tf.math.scalar_mul(1 / self.k_0, advs)
# sliced_mod_rewards = []
# for i in range(0, int(self.k_0) - 1):
# slice = tf.slice(modified_rewards, [0, i], [-1, 1])
# sliced_mod_rewards.append(slice)
# new_low_lvl_dataset: MapDataset = MapDataset()
# for el in low_lvl_dataset:
# new_trajectory = Trajectory(el[0].step_type, el[0].observation, el[0].action, el[0].policy_info,
# el[0].next_step_type, tf.math.scalar_mul(1/k_0, el[0].reward), el[0].discount)
# new_buff_info = el[1]
# new_dataset_el = (new_trajectory, new_buff_info)
# print(new_dataset_el)
# low_lvl_dataset = low_lvl_dataset.map(lambda traj_info: (tf.math.scalar_mul(1/k_0, traj_info[0].reward), traj_info[1]))
# for el in low_lvl_dataset: #todo, not same nest structure
# values_batched = tf.nest.map_structure(lambda t: tf.stack([t] * 1), el)
# new_low_lvl_rep_buffer.add_batch(values_batched)
# mod_reward = None
# for i in range(0, self.haar_config['num_low_lvl_steps']):
# l_exp, _ = next(low_lvl_iter)
# if i % k_0 == 0:
# if i / k_0 < len(sliced_mod_rewards):
# mod_reward = sliced_mod_rewards[int(i / k_0)]
# else:
# mod_reward = l_exp.reward
# rew = mod_reward.numpy()
# rew = np.asscalar(rew)
#
# indices = tf.constant([[1]])
# updates = tf.constant([rew])
# tf.tensor_scatter_nd_update(l_exp.reward, indices, updates)
low_lvl_dataset = self.low_lvl_replay_buffer.as_dataset(sample_batch_size=1)
iterator = iter(low_lvl_dataset)
# calculare r_t^h
low_lvl_cumulative_reward = 0
for _ in range(self.k_0):
traj, _ = next(iterator)
low_lvl_cumulative_reward += traj.reward.numpy()
high_lvl_driver._num_steps = 1
high_lvl_driver.run()
high_lvl_dataset = self.high_lvl_replay_buffer.as_dataset(sample_batch_size=1)
high_lvl_iter = iter(high_lvl_dataset)
h_exp_current, _ = next(high_lvl_iter)
self.advantage = self.calculate_advantage(low_lvl_cumulative_reward, h_exp_current, h_exp_prev)
low_lvl_dataset = low_lvl_dataset.map(self.modify_reward)
iterator = iter(low_lvl_dataset)
for _ in range(self.k_0):
transition, _ = next(iterator)
values_batched = tf.nest.map_structure(lambda t: tf.stack([t] * 1), transition)
self.modified_low_lvl_rep_buffer.add_batch(values_batched)
low_lvl_exp = self.modified_low_lvl_rep_buffer.gather_all()
loss = self.low_lvl_agent.train(low_lvl_exp)
if ep_count % 5 == 0:
avg_return = self.haar_compute_avg_reward(5)
print("Average return: " + str(avg_return))
print("Low lvl Loss: " + str(loss.loss.numpy()))
# print(self.l_step_observers[1].result().numpy())
# print(self.h_step_observers[1].result().numpy())
if self.ctx.obj['log2neptune']:
pass
h_exp_prev = h_exp_current
self.low_lvl_env.reset()
self.low_lvl_replay_buffer.clear()
self.modified_low_lvl_rep_buffer.clear()
def modify_reward(self, traj, buff):
new_trajectory = Trajectory(traj.step_type, traj.observation, traj.action, traj.policy_info,
traj.next_step_type, self.advantage, traj.discount)
return (new_trajectory, buff)
def calculate_advantage(self, cumulative_reward, h_experience_current, h_experience_prev):
h_value_estimate_current, _ = self.high_lvl_agent.collect_policy.apply_value_network(
h_experience_current.observation, h_experience_current.step_type, value_state=(),
training=False)
h_value_estimate_prev, _ = self.high_lvl_agent.collect_policy.apply_value_network(
h_experience_prev.observation, h_experience_prev.step_type, value_state=(),
training=False)
adv = (1/self.k_0) * (cumulative_reward + self.haar_config['discount_factor_high_lvl'] * h_value_estimate_current.numpy() - h_value_estimate_prev.numpy())
return adv
def haar_compute_avg_reward(self, num_eps):
total_return = 0.0
for _ in range(num_eps):
time_step_h = self.high_lvl_env.reset()
time_step_l = self.low_lvl_env.reset()
self.unity_env.reset()
ep_return = 0.0
h_action = self.high_lvl_agent.policy.action(time_step_h)
for i in range(self.k_0):
low_lvl_obs = {'image': time_step_l.observation['image'], 'vector': h_action.action}
time_step_l = time_step_l._replace(
observation=low_lvl_obs
)
l_action = self.low_lvl_agent.policy.action(time_step_l)
time_step_l = self.low_lvl_env.step(l_action)
ep_return += time_step_l.reward
self.low_lvl_env.reset()
self.high_lvl_env.reset()
self.unity_env.reset()
total_return += ep_return
avg_return = total_return / num_eps
return avg_return.numpy()[0]
def create_network(name: str, input_spec, output_spec):
preprocessing_layers = {
'image': tf.keras.models.Sequential([tf.keras.layers.Conv2D(4, (3, 3)),
tf.keras.layers.Conv2D(8, (3, 3)),
tf.keras.layers.Flatten()]),
'vector': tf.keras.layers.Dense(4)
}
preprocessing_combiner = tf.keras.models.Sequential(
[tf.keras.layers.Concatenate(axis=-1)])
if name == "actor_preproc":
return ActorDistributionNetwork(
input_tensor_spec=input_spec,
output_tensor_spec=output_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
# conv_layer_params=[(16, 8, 4), (32, 4, 2)],
fc_layer_params=(128, 75),
activation_fn=tf.nn.elu
)
if name == "value_preproc":
return ValueNetwork(
input_tensor_spec=input_spec,
preprocessing_layers=preprocessing_layers,
preprocessing_combiner=preprocessing_combiner,
# conv_layer_params=[(16, 8, 4), (32, 4, 2)],
fc_layer_params=(75, 40),
activation_fn=tf.nn.elu
)
|
#!/usr/bin/env python
runToFbset = {
273158 : "fb_all_withuTCA_consolidated3_no1240_TOTEM",
275832 : "fb_all_withuTCA_with_CTPPS_TOT",
273301 : "fb_all_withuTCA_consolidated3_no1240_TOTEM",
276870 : "fb_all",
282092 : "/daq2/eq_160913_01/fb_all_with1240_withCASTOR",
283171 : "/daq2/eq_160913_01/fb_all_with1240_withCASTOR_w582_583",
296702 : "/daq2/eq_170531/fb_all",
301694 : "/daq2/eq_170622/fb_all",
}
def getFbsetFromRun(run):
if not runToFbset.has_key(run):
raise Exception("don't know fbset for run " + str(run))
return runToFbset[run]
|
NUM_OF_NODES = 10
BLOCK_SIZE = 10
MINING_DIFFICULTY = 4
KEY_LEN = 2048
BOOTSTRAP_IP = '127.0.0.1' #For local use
#BOOTSTRAP_IP = '192.168.0.1' #For okeanos use
BOOTSTRAP_PORT = '5000'
|
import enum
from collections import namedtuple
class Room(object):
def __init__(self, rows, slots):
self.rows = [Row(self, y, slots) for y in range(rows)]
def __getitem__(self, pos):
y = pos
x = None
if isinstance(pos, (tuple)):
(y, x) = pos
row = self.rows[y]
if x is None:
return row
slot = row[x]
return slot
def __setitem__(self, pos, item):
(y, x) = pos
row = self.rows[y]
row[x] = item
def __len__(self):
return len(self.rows)
def __iter__(self):
return iter(self.rows)
def dump(self):
s = ''
return s
class Row(object):
MultiSlot = namedtuple('MultiSlot', ['slot', 'size'])
def __init__(self, room, y, size):
self.room = room
self.y = y
self.slots = [Slot(self.room, self.y, x) for x in range(size)]
def find_slot(self, item):
_filter = lambda slot: not slot.slot.occupied
for slot in filter(_filter, self.slots_iter()):
if slot.size >= item.size:
return slot.slot
return None
def add(self, item):
slot = self.find_slot(item)
if slot is not None:
slot.put(item)
return True
return False
def slots_iter(self, merged=True):
i = 0
while i < len(self.slots):
slot = self.slots[i]
if slot.occupied:
size = slot.content.size
yield Row.MultiSlot(slot, size)
else:
size = 1
if merged:
for j in range(i + 1, len(self.slots)):
other = self.slots[j]
if other.occupied:
break
size += 1
yield Row.MultiSlot(slot, size)
i += size
@property
def capacity(self):
_filter = lambda slot: slot.slot.occupied and isinstance(slot.slot.content, Machine)
slots = [slot.slot for slot in filter(_filter, self.slots_iter())]
return sum(slot.content.capacity for slot in slots)
def __getitem__(self, pos):
return self.slots[pos]
def __setitem__(self, pos, item):
start = pos
end = pos + item.size
slots = self.slots[start:end]
for slot in slots:
slot.content = item
item.slot = slots[0]
def __len__(self):
return sum(1 for slot in self.slots if not slot.occupied)
def __iter__(self):
return iter(self.slots)
class Slot(object):
def __init__(self, room, y, x):
self.room = room
self.y = y
self.x = x
self.content = None
def put(self, item):
self.row[self.x] = item
@property
def pos(self):
return (self.y, self.x)
@property
def row(self):
return self.room[self.y]
@property
def occupied(self):
return self.content is not None
def __repr__(self):
return '<Slot (%d, %d) = %r>' % (self.y, self.x, self.content)
class Content(object):
def __init__(self, slot=None):
self.slot = slot
class UnknownContent(Content):
size = 1
class Machine(Content):
def __init__(self, spec, slot=None):
super().__init__(slot)
self.spec = spec
self.pool = None
def dump(self):
if self.slot is None:
return 'x'
assert self.pool is not None
return '%d %d %d' % (self.slot.y, self.slot.x, self.pool)
@property
def size(self):
return self.spec.size
@property
def capacity(self):
return self.spec.capacity
def __repr__(self):
return '<%r %r>' % (self.spec, self.pool)
|
# !/usr/bin/env python
# encoding: utf-8
import sys
import pandas as pd
def get_login_information():
"""
读取表中存取的用户名密码
:return:
"""
get_data = pd.read_csv("basic_information", sep=" ")
deal_data = get_data.groupby("username")["password"].apply(list).to_dict()
return deal_data
def locking(username):
"""
将要锁定的用户写入表
:param username:
:return:
"""
with open("locking_user", mode='a+', encoding="utf-8") as w:
w.write("\n" + username)
return None
def get_locking_name(username):
"""
获取锁定的用户名
:param username:
:return:
"""
locking_name = open("locking_user", mode='r', encoding="utf-8").read().split("\n")
flag = False
for locked_name in locking_name:
if username == locked_name:
flag = True
return flag
def login_judge_locked(locked_sige, username, password):
"""
判断用户是否被锁定
:return:
"""
while locked_sige:
print("该用户已锁定")
username = input("请重新输入用户名:")
password = input("请重新输入密码:")
locked_sige = get_locking_name(username)
return username, password
def login_judge_username(username, password):
"""
判断用户名是否正确
:return:
"""
flag = False
data_name = data.keys()
while not flag:
for i in data_name:
if username == i:
flag = True
if not flag:
username = input("无此用户,请重新输入用户名:")
password = input("请重新输入密码:")
return flag, username, password
def login_judge_password(flag, username, password):
"""
判断密码是否正确, 多次错误锁定该用户
:return:
"""
if flag and data[username][0] == password:
print("欢迎登陆")
sys.exit()
else:
for i in range(3):
password = input("密码错误,请重新输入:")
if data[username][0] == password:
print("欢迎登陆")
sys.exit()
else:
continue
locking(username)
username = input("密码多次错误,该用户被锁定,请重新输入用户名:")
password = input("请输入密码:")
login(username, password)
return flag, username, password
def login(username, password):
"""
判断输入的用户名密码是否正确
:param username:
:param password:
:return:
"""
locked_sige = get_locking_name(username)
username, password = login_judge_locked(locked_sige, username, password)
flag, username, password = login_judge_username(username, password)
login_judge_password(flag, username, password)
return None
if __name__ == '__main__':
data = get_login_information()
get_name = input("请输入用户名;")
get_password = input("请输入密码:")
login(get_name, get_password)
|
'''
Given a N X N matrix Matrix[N][N] of positive integers. There are only three possible moves from a cell Matrix[r][c].
1. Matrix[r+1][c]
2. Matrix[r+1][c-1]
3. Matrix[r+1][c+1]
Starting from any column in row 0, return the largest sum of any of the paths up to row N-1.
Input:
The first line of the input contains an integer T denoting the number of test cases. The description of T test cases follows.
The first line of each test case contains a single integer N denoting the order of matrix. Next line contains N*N integers denoting the elements of the matrix in row-major form.
Output:
Output the largest sum of any of the paths starting from any cell of row 0 to any cell of row N-1. Print the output of each test case in a new line.
'''
def Path(m):
dp = [[0 for i in range(N)]for i in range(N)]
for j in range(N):
dp[0][j] = m[0][j]
for i in range(1,N):
for j in range(N):
if j == 0:
dp[i][j] = m[i][j]+max(dp[i-1][j],dp[i-1][j+1])
elif j == N-1:
dp[i][j] = m[i][j]+max(dp[i-1][j],dp[i-1][j-1])
else:
x = max(dp[i-1][j],dp[i-1][j+1])
dp[i][j] = m[i][j] + max(x,dp[i-1][j-1])
return max(dp[N-1])
for _ in range(int(input())):
N = int(input())
arr = [int(x) for x in input().split()]
temp=[]
k=0
for i in range(N):
a=[]
for j in range(N):
a.append(arr[k])
k+=1
temp.append(a)
print(Path(temp))
|
# -*- coding: utf-8 -*-
import requests
from bs4 import BeautifulSoup
from time import strftime
import csv
bloggerList = []
class r(object):
def __init__(self,r0=0, r1=0, r2=0, r3=0, r4=0):
self.r0, self.r1, self.r2, self.r3, self.r4 = r0, r1, r2, r3, r4
def getHref(user_info):
return user_info.select('a')[0].get('href').split('/')[2]
def getSoup(site):
res = requests.get(site, verify=False)
soup = BeautifulSoup(res.text, "html.parser")
return soup
def getNowTime():
time = strftime('%Y-%m-%d %H:%M:%S')
return time
def getNickname(soup):
for nickname in soup.select('.nickname'):
return nickname.text
def getProfileRight(soup):
profile = soup.select('.mbd-aut-right')
try:
return r(profile[0].text, profile[1].text, profile[2].text, profile[3].text, 'none')
except:
return r('none', 'none', 'none', 'none')
def getProfileRightH(soup):
profile = soup.select('.mbd-aut-right-h')
try:
return r(profile[0].text, profile[1].text, profile[2].text, profile[3].text, profile[4].text)
except:
return r('error', 'error', 'error',' error', 'error')
def getPost(soup):
opinion = soup.select('.beauty-diary-opinion-stat')
try:
a = opinion[0].select('a')
del a[0:2]
s = ''
for i in a:
s = s + i.text + '\n'
return r(a[1].text, s)
except:
return r('error', 'error')
def getBuy(soup):
try:
div = soup.find_all("div", class_="menu-item v-align-middle ")
return r(div[0].text.split('(')[1].split(')')[0], div[1].text.split('(')[1].split(')')[0])
except:
return r('error', 'error')
def checkBlogger(blogger):
global bloggerList
for i in bloggerList:
if blogger == i:
return True
break
elif blogger == u'438359':
return True
break
return False
def checkUpdate(bloggerName, L):
if bloggerName in L:
return True
else:
return False
if __name__ == '__main__':
global bloggerList
try:
updatefile = open('data.csv', 'rb')
upfileCsv = csv.reader(updatefile, dialect='excel', quoting=csv.QUOTE_MINIMAL)
for i in upfileCsv:
bloggerList = bloggerList + i
updatefile.close()
except:
p=1
urlBlogger = 'https://www.urcosme.com/new-reviews'
blogger = open('blogger.csv', 'a')
bloggerCsv = csv.writer(blogger, dialect='excel', quoting=csv.QUOTE_MINIMAL)
title = ['建立時間 ', '更新時間', '來源網站', '部落客名稱', '網站Profile連結', '性別', '膚質',
'居住地', '年齡', '星座', '關於我', '個人特質', '常逛的地方', '我的Blog', '喜歡的美容流行雜誌',
'全部心得數', '心得分類', '買過數', '升火數']
bloggerCsv.writerow(title)
for page in range(1, 1000):
url = urlBlogger + '?page=' + str(page)
print ('------------'), (page), ('-------------')
res = requests.get(url, verify=False)
if len(res.text) < 40000:
break
soup = BeautifulSoup(res.text, "html.parser")
for user_info in soup.select('.user-info'):
href = getHref(user_info)
if checkBlogger(href):
continue
reviewsHref = 'https://www.urcosme.com/beauty_diary/' + href + '/reviews'
profileHref = 'https://www.urcosme.com/beauty_diary/' + href + '/profile'
reviewsSoup = getSoup(reviewsHref)
profileSoup = getSoup(profileHref)
createTime = getNowTime()
updateTime = 'none'
sourceSite = reviewsHref
bloggerName = getNickname(reviewsSoup)
bloggerList.append(href)
print (href)
siteProfileLink = profileHref
profileRight = getProfileRight(profileSoup)
profileRightH = getProfileRightH(profileSoup)
post = getPost(reviewsSoup)
num = getBuy(reviewsSoup)
sex = profileRight.r0
skin = profileRight.r1
location = 'tempNone'
age = profileRight.r2
constellation = profileRight.r3
aboutMe = profileRightH.r0
personalStyle = profileRightH.r1
placeToGo = profileRightH.r2
myBlog = profileRightH.r3
magazine = profileRightH.r4
allPostNum = post.r0
postCategory = post.r1
buyNum = num.r0
fireNum = num.r1
csvList = [createTime.encode('utf8'), updateTime.encode('utf8'), sourceSite.encode('utf8'), bloggerName.encode('utf8'), siteProfileLink.encode('utf8'), sex.encode('utf8'), skin.encode('utf8'),
location.encode('utf8'), age.encode('utf8'), constellation.encode('utf8'), aboutMe.encode('utf8'), personalStyle.encode('utf8'), placeToGo.encode('utf8'), myBlog.encode('utf8'),
magazine.encode('utf8'), allPostNum.encode('utf8'), postCategory.encode('utf8'), buyNum.encode('utf8'), fireNum.encode('utf8')]
bloggerCsv.writerow(csvList)
f = open('data.csv', 'wb')
c = csv.writer(f, dialect='excel', quoting=csv.QUOTE_MINIMAL)
c.writerow(bloggerList)
f.close()
blogger.close()
print ('---------finish---------')
x = raw_input()
pass |
MAX = 100 + 1
NEG_INF = -10**9
T = int(input())
def bellmanFord(n, edges):
dist = [NEG_INF]*n
dist[0] = 0
for _ in range(n-1):
for v, u, w in edges:
if dist[v] == NEG_INF: continue
if dist[u] < dist[v] + w:
dist[u] = dist[v] + w
for v, u, w in edges:
if dist[v] == NEG_INF: continue
if dist[u] < dist[v] + w:
return True
return False
for _ in range(T):
n, m = map(int, input().split())
edges = []
for k in range(m):
x, y, t = map(int,input().split())
edges.append((x-1,y-1,t))
print(["No", "Yes"][bellmanFord(n, edges)]) |
import json
import socket
print('Server started.')
NUM_OF_TURNS = 10
# VICTORIES_CONDITIONS = ['Max', 'Min', 'Linear', 'Quadratic', 'ZeroM', 'SumNeg', 'SumPos']
# VICTORIES_CONDITIONS = ['Max', 'Min']
# VICTORIES_CONDITIONS = ['ZeroM']
VICTORIES_CONDITIONS1 = ['Min']
VICTORIES_CONDITIONS2 = ['Min']
# VICTORIES_CONDITIONS2 = ['SumNeg', 'SumPos']
# VICTORIES_CONDITIONS1 = ['Linear']
# VICTORIES_CONDITIONS2 = ['Min']
result_table = {}
p1_name = 'assign_stupid2_bkp.py'
p2_name = 'player_new_two.py'
def send_to_server(js):
"""Open socket and send the json string js to server with EOM appended, and wait
for \n terminated reply.
js - json object to send to server
"""
clientsocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
clientsocket.connect(('128.250.106.25', 5002))
clientsocket.send("""{}EOM""".format(js).encode('utf-8'))
data = ''
while data == '' or data[-1] != "\n":
data += clientsocket.recv(1024).decode('utf-8')
# print(data)
parsed = json.loads(data[8:])
result_table[(parsed[1][1], parsed[2][1])] = parsed[-2]
print(json.dumps(parsed, indent=4, sort_keys=True))
clientsocket.close()
p1 = open(p1_name, 'r').read()
p2 = open(p2_name, 'r').read()
print('Tested Player goes first')
for vt1 in VICTORIES_CONDITIONS1:
for vt2 in VICTORIES_CONDITIONS2:
send_to_server(json.dumps({"cmd": "TEST", "syn": 12, "name": "The Last Jedi", "data": p1,
"data2": p2, "vt1": vt1, "vt2": vt2}))
print(str(result_table))
result_table = {}
print('Tested Player goes second')
for vt1 in VICTORIES_CONDITIONS1:
for vt2 in VICTORIES_CONDITIONS2:
send_to_server(json.dumps({"cmd": "TEST", "syn": 12, "name": "The Last Jedi", "data": p2,
"data2": p1, "vt1": vt2, "vt2": vt1}))
print(str(result_table))
print('Simulation finished')
|
import numpy as np
from task import Task
from collections import defaultdict, deque
import sys
class Quadcop_Policy():
def __init__(self, task):
# Task (environment) information
self.task = task
self.state_size = task.state_size
self.action_size = task.action_size
self.action_low = task.action_low
self.action_high = task.action_high
self.action_range = self.action_high - self.action_low
self.w = np.random.normal(
size=(self.state_size, self.action_size), # weights for simple linear policy: state_space x action_space
scale=(self.action_range / (2 * self.state_size))) # start producing actions in a decent range
# Episode variables
self.reset_episode()
def reset_episode(self):
self.total_reward = 0.0
self.count = 0
state = self.task.reset()
return state
def step(self, action):
next_state, reward, done = self.task.step(action)
return next_state, reward, done
def act(self, state):
# Choose action based on given state and policy
action = np.dot(state, self.w) # simple linear policy
return action
def update_Q(self, Qsa, Qsa_next, reward, alpha, gamma):
""" updates the action-value function estimate using the most recent time step """
return Qsa + (alpha * (reward + (gamma * Qsa_next) - Qsa))
def epsilon_greedy_probs(self, Q_s, i_episode, eps=None):
""" obtains the action probabilities corresponding to epsilon-greedy policy """
epsilon = 1.0 / i_episode
if eps is not None:
epsilon = eps
nA = self.action_size
policy_s = np.ones(nA) * epsilon / nA
policy_s[np.argmax(Q_s)] = 1 - epsilon + (epsilon / nA)
return policy_s
def sarsa(self, num_episodes, alpha, gamma=1.0):
# initialize action-value function (empty dictionary of arrays)
nA = self.action_size
Q = defaultdict(lambda: np.zeros(nA))
# initialize performance monitor
plot_every = 100
tmp_scores = deque(maxlen=plot_every)
scores = deque(maxlen=num_episodes)
# loop over episodes
for i_episode in range(1, num_episodes + 1):
# monitor progress
if i_episode % 100 == 0:
print("\rEpisode {}/{}".format(i_episode, num_episodes), end="")
sys.stdout.flush()
# initialize score
score = 0
# begin an episode, observe S
state = self.reset_episode()
nS = self.state_size
# get epsilon-greedy action probabilities
policy_s = self.epsilon_greedy_probs(Q[nS], i_episode)
# pick action
#action = np.random.choice(np.arange(nA), p=policy_s)
action_all = np.random.uniform(self.action_low, self.action_high) * policy_s
action = np.int(action_all[0])
action = [action, action, action, action]
# --------------------------------------------------------
# Getting stuck here. "action" is of size 1 (between 0 and 3)
# But what I need is a 4x1 vector ranging between
# task.action_low and task.action_high to run:
# "next_state, reward, done = self.task.step(action)"
# --------------------------------------------------------
# limit number of time steps per episode
for t_step in np.arange(300):
# take action A, observe R, S'
next_state, reward, done = self.task.step(action)
#next_state, reward, done = self.step(action)
# add reward to score
score += reward
if not done:
# Convert to tuple to avoid errors
state = tuple(state)
next_state = tuple(next_state)
# get epsilon-greedy action probabilities
policy_s = self.epsilon_greedy_probs(Q[next_state], i_episode)
# pick next action A'
next_action = np.random.uniform(self.action_low, self.action_high) * policy_s
# Convert to tuple to avoid errors
action = tuple(action)
next_action = tuple(next_action)
# update TD estimate of Q
Q[state][action] = self.update_Q(Q[state][action], Q[next_state][next_action],
reward, alpha, gamma)
# S <- S'
state = next_state
# A <- A'
action = next_action
if done:
# update TD estimate of Q
Q[state][action] = self.update_Q(Q[state][action], 0, reward, alpha, gamma)
# append score
tmp_scores.append(score)
break
if (i_episode % plot_every == 0):
scores.append(np.mean(tmp_scores))
# plot performance
#plt.plot(np.linspace(0, num_episodes, len(scores), endpoint=False), np.asarray(scores))
#plt.xlabel('Episode Number')
#plt.ylabel('Average Reward (Over Next %d Episodes)' % plot_every)
#plt.show()
# print best 100-episode performance
print(('Best Average Reward over %d Episodes: ' % plot_every), np.max(scores))
return Q
|
# -*- coding: utf-8 -*-
"""Parser has another main Curly's function, :py:func:`parse`.
The main idea of parsing is to take a stream of
tokens and convert it into `abstract syntax tree
<https://en.wikipedia.org/wiki/Abstract_syntax_tree>`_. Each node in the
tree is present by :py:class:`Node` instances and each instance has a
list of nodes (therefore - tree).
:py:func:`parse` produces such tree and it is possible to render it with
method :py:meth:`Node.process`.
Example:
.. code-block:: python3
>>> from curly.lexer import tokenize
>>> from curly.parser import parse
>>> text = '''\\
... Hello! My name is {{ name }}.\\
... {% if likes %}And I like these things: {% loop likes %}\\
... {{ item }},{% /loop %}{% /if %}'''
>>> tokens = tokenize(text)
>>> print(repr(parse(tokens)))
[{'done': True,
'nodes': [],
'raw_string': "<LiteralToken(raw=' Hello! My name is ', contents={\
'text': "
"' Hello! My name is '})>",
'text': ' Hello! My name is ',
'type': 'LiteralNode'},
{'done': True,
'expression': ['name'],
'nodes': [],
'raw_string': "<PrintToken(raw='{{ name }}', contents={'expression': "
"['name']})>",
'type': 'PrintNode'},
{'done': True,
'nodes': [],
'raw_string': "<LiteralToken(raw='.', contents={'text': '.'})>",
'text': '.',
'type': 'LiteralNode'},
{'done': True,
'else': {},
'expression': ['likes'],
'nodes': [{'done': True,
'nodes': [],
'raw_string': "<LiteralToken(raw='And I like these things: ', "
"contents={'text': 'And I like these things: '})>",
'text': 'And I like these things: ',
'type': 'LiteralNode'},
{'done': True,
'expression': ['likes'],
'nodes': [{'done': True,
'expression': ['item'],
'nodes': [],
'raw_string': "<PrintToken(raw='{{ item }}', "
"contents={'expression': ['item']})>",
'type': 'PrintNode'},
{'done': True,
'nodes': [],
'raw_string': "<LiteralToken(raw=',', contents=\
{'text': "
"','})>",
'text': ',',
'type': 'LiteralNode'}],
'raw_string': "<StartBlockToken(raw='{% loop likes %}', "
"contents={'expression': ['likes'], 'function': "
"'loop'})>",
'type': 'LoopNode'}],
'raw_string': "<StartBlockToken(raw='{% if likes %}', contents=\
{'expression': "
"['likes'], 'function': 'if'})>",
'type': 'IfNode'}]
"""
import collections
import pprint
import subprocess
from curly import exceptions
from curly import lexer
from curly import utils
class ExpressionMixin:
"""A small helper mixin for :py:class:`Node` which adds
expression related methods.
"""
@property
def expression(self):
"""*expression* from underlying token."""
return self.token.contents["expression"]
def evaluate_expression(self, context):
"""Evaluate *expression* in given context.
:param dict context: Variables for template rendering.
:return: Evaluated expression.
"""
value = subprocess.list2cmdline(self.expression)
value = utils.resolve_variable(value, context)
return value
class Node(collections.UserList):
"""Node of an AST tree.
It has 2 methods for rendering of the node content:
:py:meth:`Node.emit` and :py:meth:`Node.process`. First one
is the generator over the rendered content, second one just
concatenates them into a single string. So, if you defines your
own node type, you want to define :py:meth:`Noed.emit` only,
:py:meth:`Node.process` stays the same.
If you want to render template to the string, use
:py:meth:`Node.process`. This is a thing you are looking for.
:param token: Token which produced that node.
:type token: :py:class:`curly.lexer.Token`
"""
def __init__(self, token):
super().__init__()
self.token = token
self.done = False
def __str__(self):
return ("<{0.__class__.__name__}(done={0.done}, token={0.token!r}, "
"data={0.data!r})>").format(self)
def __repr__(self):
return pprint.pformat(self._repr_rec())
def _repr_rec(self):
return {
"raw_string": repr(self.token) if self.token else "",
"type": self.__class__.__name__,
"done": self.done,
"nodes": [node._repr_rec() for node in self.data]}
@property
def raw_string(self):
"""Raw content of the related token.
For example, for token ``{{ var }}`` it returns literally ``{{
var }}``.
"""
return self.token.raw_string
def process(self, context):
"""Return rendered content of the node as a string.
:param dict context: Dictionary with a context variables.
:return: Rendered template
:rtype: str
"""
return "".join(self.emit(context))
def emit(self, context):
"""Return generator which emits rendered chunks of text.
Axiom: ``"".join(self.emit(context)) == self.process(context)``
:param dict context: Dictionary with a context variables.
:return: Generator with rendered texts.
:rtype: Generator[str]
"""
for node in self:
yield from node.emit(context)
class RootNode(Node):
"""Node class for the most top-level node, root.
:param list[Node] nodes: Nodes for root.
"""
def __init__(self, nodes):
super().__init__(None)
self.data = nodes
self.done = True
def __repr__(self):
return pprint.pformat(self.data)
class LiteralNode(Node):
"""Node which presents literal text.
This is one-to-one representation of
:py:class:`curly.lexer.LiteralToken` in AST tree.
:param token: Token which produced that node.
:type token: :py:class:`curly.lexer.LiteralToken`
"""
def __init__(self, token):
super().__init__(token)
self.done = True
def _repr_rec(self):
struct = super()._repr_rec()
struct["text"] = self.text
return struct
@property
def text(self):
"""Rendered text."""
return self.token.contents["text"]
def emit(self, _):
yield self.text
class PrintNode(ExpressionMixin, Node):
"""Node which presents print token.
This is one-to-one representation of
:py:class:`curly.lexer.PrintToken` in AST tree. Example of such node
is the node for ``{{ var }}`` token.
:param token: Token which produced that node.
:type token: :py:class:`curly.lexer.PrintToken`
"""
def __init__(self, token):
super().__init__(token)
self.done = True
def _repr_rec(self):
struct = super()._repr_rec()
struct["expression"] = self.expression
return struct
def emit(self, context):
yield str(self.evaluate_expression(context))
class BlockTagNode(ExpressionMixin, Node):
"""Node which presents block tag token.
Block tag example: ``{% if something %}``. This, with ``{%`` stuff.
This is one-to-one representation of
:py:class:`curly.lexer.StartBlockToken` token.
"""
@property
def function(self):
"""*function* from underlying token."""
return self.token.contents["function"]
class ConditionalNode(BlockTagNode):
"""Node which represent condition.
This is a not real node in AST tree, this is a preliminary node
which should be popped on closing and be replaced by actual
:py:class:`IfNode`.
Such fictional node is required to simplify logic of parsing
for if/elif/elif/else blocks. If conditions are nested, we
need to identify the groups of conditional flows and attach
:py:class:`IfNode` and :py:class:`ElseNode` for correct parents.
:param token: Token, which starts to produce that node. Basically,
it is a first token from the ``if`` block.
:type token: :py:class:`curly.lexer.BlockTagNode`
"""
def __init__(self, token):
super().__init__(token)
self.ifnode = None
def _repr_rec(self):
struct = super()._repr_rec()
struct["if"] = self.ifnode._repr_rec() if self.ifnode else {}
return struct
def emit(self, context):
return self.ifnode.emit(context)
class IfNode(BlockTagNode):
"""Node which represents ``if`` statement. And ``elif``.
Actually, since we have :py:class:`ConditionalNode`, it is possible
to use only 1 node type for ifs. Here is why:
.. code-block:: json
{
"conditional": [
{
"if": "expression1",
"nodes": []
},
{
"if": "expression2",
"nodes": []
},
{
"else": "",
"nodes": []
}
]
}
Here is an idea how does ``if``/``elif``/``else`` looks like with
conditional You have a list of :py:class:`IfNode` instances and one
(optional) :py:class:`ElseNode` at the end. So if first ``if`` does
not match, you go to the next one. If it is ``true``, emit its nodes
and exit ``conditional``.
"""
def __init__(self, token):
super().__init__(token)
self.elsenode = None
def _repr_rec(self):
struct = super()._repr_rec()
struct["else"] = self.elsenode._repr_rec() if self.elsenode else {}
struct["expression"] = self.expression
return struct
def emit(self, context):
if self.evaluate_expression(context):
yield from super().emit(context)
elif self.elsenode:
yield from self.elsenode.emit(context)
class ElseNode(BlockTagNode):
"""Node which represents ``else`` statement.
For idea how it works, please check description of
:py:class:`IfNode`.
"""
class LoopNode(BlockTagNode):
"""Node which represents ``loop`` statement.
This node repeats its content as much times as elements found in its
evaluated expression. Every iteration it injects ``item`` variable
into the context (incoming context is safe and untouched).
For dicts, it emits `{"key": k, "value": v}` where ``k`` and ``v``
are taken from ``expression.items()``. For any other iterable
it emits item as is.
"""
def _repr_rec(self):
struct = super()._repr_rec()
struct["expression"] = self.expression
return struct
def emit(self, context):
resolved = self.evaluate_expression(context)
context_copy = context.copy()
if isinstance(resolved, dict):
for key, value in sorted(resolved.items()):
context_copy["item"] = {"key": key, "value": value}
yield from super().emit(context_copy)
else:
for item in resolved:
context_copy["item"] = item
yield from super().emit(context_copy)
def parse(tokens):
"""One of the main functions (see also :py:func:`curly.lexer.tokenize`).
The idea of parsing is simple: we have a flow of well defined tokens
taken from :py:func:`curly.lexer.tokenize` and now we need to build
`AST tree <https://en.wikipedia.org/wiki/Abstract_syntax_tree>`_
from them.
Curly does that maintaining a single stack. There could be different
implementations, some of them more efficient, but we are using
single stack implementation because it is most obvious way of
representing and idea on current scale of the template language. If
you decide to fork one day, please consider other options.
Please read following stuff before (at least Wikipedia articles):
* https://en.wikipedia.org/wiki/Shift-reduce_parser
* https://en.wikipedia.org/wiki/LR_parser
* https://en.wikipedia.org/wiki/Shunting-yard_algorithm
* https://en.wikipedia.org/wiki/Operator-precedence_parser
* http://blog.reverberate.org/2013/09/\
ll-and-lr-in-context-why-parsing-tools.html
* http://blog.reverberate.org/2013/07/ll-and-lr-parsing-demystified.html
Current implementation is *LR(0)* parser. Feel free to compose
formal grammar if you want (:py:class:`curly.lexer.LiteralToken`
is terminal, everything except of it - non terminal). I am going
to describe just a main idea in a simple words pretendind that no
theory was created before.
Now, algorithm.
#. Read from the Left (look, ma! *L* from *LR*!) of stream,
without returning back. This allow us to use ``tokens`` as
an iterator.
#. For any token, check its class and call corresponding function
which manages it.
.. list-table::
:header-rows: 1
* - Token type
- Parsing function
* - :py:class:`curly.lexer.LiteralToken`
- :py:func:`parse_literal_token`
* - :py:class:`curly.lexer.PrintToken`
- :py:func:`parse_print_token`
* - :py:class:`curly.lexer.StartBlockToken`
- :py:func:`parse_start_block_token`
* - :py:class:`curly.lexer.EndBlockToken`
- :py:func:`parse_end_block_token`
#. After all tokens are consumed, check that all nodes in the
stack are done (``done`` attribute) and build resulting
:py:class:`RootNode` instance.
The main idea is to maintain stack. Stack is a list of the
children for the root node. We read a token by token and put
corresponding nodes into stack. Each node has 2 states: done or
not done. Done means that node is ready and processed, not
done means that further squashing will be performed when
corresponding terminating token will come to the parser.
So, let's assume that we have a following list of tokens (stack on
the left, incoming tokens on the right. Top of the token stream is
the same one which is going to be consumed).
Some notation: exclamation mark before a node means that node is
finished; it means that it is ready to participate into rendering,
finalized.
::
| | | LiteralToken |
| | | StackBlockToken(if) |
| | | PrintToken |
| | | StartBlockToken(elif) |
| | | StartBlockToken(loop) |
| | | PrintToken |
| | | EndBlockToken(loop) |
| | | EndBlockToken(if) |
Read ``LiteralToken``. It is fine as is, so wrap it into
:py:class:`LiteralNode` and put it into stack.
::
| | | |
| | | StackBlockToken(if) |
| | | PrintToken |
| | | StartBlockToken(elif) |
| | | StartBlockToken(loop) |
| | | PrintToken |
| | | EndBlockToken(loop) |
| !LiteralNode | | EndBlockToken(if) |
And now it is a time for :py:class:`curly.lexer.StartBlockToken`.
A kind remember, this is a start tag of ``{% function expression
%}...{% /function %}`` construction. The story about such tag is
that it has another tokens it encloses. So other tokens has to
be subnodes of related node. This would be done of reduce phase
described in a few paragraphs below but right now pay attention to
``done`` attribute of the node: if it is ``False`` it means that we
still try to collect all contents of this block subnodes. ``True``
means that node is finished.
Function of this token is ``if`` so we need to add
:py:class:`ConditionalNode` as a marker of the closure and the first
:py:class:`IfNode` in this enclosement.
::
| | | |
| | | |
| | | PrintToken |
| | | StartBlockToken(elif) |
| | | StartBlockToken(loop) |
| IfNode | | PrintToken |
| ConditionalNode | | EndBlockToken(loop) |
| !LiteralNode | | EndBlockToken(if) |
The upcoming :py:class:`curly.lexer.PrintToken` is a single
functional node: to emit rendered template, we need to resolve
its *expression* in given context. This is one finished node
:py:class:`PrintNode`.
::
| | | |
| | | |
| | | |
| | | StartBlockToken(elif) |
| !PrintNode | | StartBlockToken(loop) |
| IfNode | | PrintToken |
| ConditionalNode | | EndBlockToken(loop) |
| !LiteralNode | | EndBlockToken(if) |
Now it is a time for next :py:class:`curly.lexer.StartBlockToken`
which is responsible for ``{% elif %}``. It means, that
scope of first, initial ``if`` is completed, but not for
corresponding :py:class:`ConditionalNode`! Anyway, we can
safely add :py:class:`PrintNode` from the top of the stack to
nodelist of :py:class:`IfNode`. To do so, we pop stack till that
:py:class:`IfNode` and add popped content to the nodelist. After
that, we can finally mark :py:class:`IfNode` as done.
::
| | | |
| | | |
| | | |
| | | StartBlockToken(elif) |
| | | StartBlockToken(loop) |
| !IfNode(!PrintNode) | | PrintToken |
| ConditionalNode | | EndBlockToken(loop) |
| !LiteralNode | | EndBlockToken(if) |
Stack was rewinded and we can add new :py:class:`IfNode` to
condition.
::
| | | |
| | | |
| | | |
| | | |
| IfNode | | StartBlockToken(loop) |
| !IfNode(!PrintNode) | | PrintToken |
| ConditionalNode | | EndBlockToken(loop) |
| !LiteralNode | | EndBlockToken(if) |
Next token is a loop (``{% loop items %}``). The same story as with
:py:class:`IfNode`: emit :py:class:`LoopNode` to the top of the
stack.
::
| | | |
| | | |
| | | |
| LoopNode | | |
| IfNode | | |
| !IfNode(!PrintNode) | | PrintToken |
| ConditionalNode | | EndBlockToken(loop) |
| !LiteralNode | | EndBlockToken(if) |
Add :py:class:`curly.lexer.PrintToken` as a :py:class:`PrintNode`.
::
| | | |
| | | |
| PrintToken | | |
| LoopNode | | |
| IfNode | | |
| !IfNode(!PrintNode) | | |
| ConditionalNode | | EndBlockToken(loop) |
| !LiteralNode | | EndBlockToken(if) |
Next token is :py:class:`curly.lexer.EndBlockToken` for the loop
( ``{% /loop %}``). So we can rewind the stack to the loop node,
putting all popped nodes as a nodelist for :py:class:`LoopNode`.
::
| | | |
| | | |
| | | |
| !LoopNode(!PrintNode) | | |
| IfNode | | |
| !IfNode(!PrintNode) | | |
| ConditionalNode | | |
| !LiteralNode | | EndBlockToken(if) |
And it is a time for :py:class:`curly.lexer.EndBlockToken` for
``if`` (``{% /if %}``). Now we need to rewind stack twice. First
rewind is to complete :py:class:`IfNode` which is almost on the top
of the stack.
::
| | | |
| | | |
| | | |
| !LoopNode(!PrintNode) | | |
| !IfNode(!LoopNode(...)) | | |
| !IfNode(!PrintNode) | | |
| ConditionalNode | | |
| !LiteralNode | | EndBlockToken(if) |
And the second rewind is to finish nearest
:py:class:`ConditionalNode`.
::
| | | |
| | | |
| | | |
| | | |
| | | |
| | | |
| !ConditionalNode(!IfNode,!IfNode) | | |
| !LiteralNode | | |
And that is all. Token list is empty, so it is a time to compose
relevant :py:class:`RootNode` with the contents of the stack.
::
!RootNode(!LiteralNode, !ConditionalNode(!IfNode, !IfNode))
We've just made AST tree.
:param token: A stream with tokens.
:type token: Iterator[:py:class:`curly.lexer.Token`]
:return: Parsed AST tree.
:rtype: :py:class:`RootNode`
:raises:
:py:exc:`curly.exceptions.CurlyParserError`: if token is unknown.
"""
stack = []
for token in tokens:
if isinstance(token, lexer.LiteralToken):
stack = parse_literal_token(stack, token)
elif isinstance(token, lexer.PrintToken):
stack = parse_print_token(stack, token)
elif isinstance(token, lexer.StartBlockToken):
stack = parse_start_block_token(stack, token)
elif isinstance(token, lexer.EndBlockToken):
stack = parse_end_block_token(stack, token)
else:
raise exceptions.CurlyParserUnknownTokenError(token)
root = RootNode(stack)
validate_for_all_nodes_done(root)
return root
def parse_literal_token(stack, token):
"""This function does parsing of :py:class:`curly.lexer.LiteralToken`.
Since there is nothing to do with literals, it is just put
corresponding :py:class:`LiteralNode` on the top of the stack.
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.LiteralToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
"""
stack.append(LiteralNode(token))
return stack
def parse_print_token(stack, token):
"""This function does parsing of :py:class:`curly.lexer.PrintToken`.
Since there is nothing to do with literals, it is just put
corresponding :py:class:`PrintNode` on the top of the stack.
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.PrintToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
"""
stack.append(PrintNode(token))
return stack
def parse_start_block_token(stack, token):
"""This function does parsing of :py:class:`curly.lexer.StartBlockToken`.
Actually, since this token may have different behaviour, dependend
on *function* from that token.
.. list-table::
:header-rows: 1
* - Token function
- Parsing function
* - if
- :py:func:`parse_start_if_token`
* - elif
- :py:func:`parse_start_elif_token`
* - else
- :py:func:`parse_start_else_token`
* - loop
- :py:func:`parse_start_loop_token`
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.StartBlockToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
:raises:
:py:exc:`curly.exceptions.CurlyParserUnknownStartBlockError`: if
token function is unknown.
"""
function = token.contents["function"]
if function == "if":
return parse_start_if_token(stack, token)
elif function == "elif":
return parse_start_elif_token(stack, token)
elif function == "else":
return parse_start_else_token(stack, token)
elif function == "loop":
return parse_start_loop_token(stack, token)
else:
raise exceptions.CurlyParserUnknownStartBlockError(token)
def parse_end_block_token(stack, token):
"""This function does parsing of :py:class:`curly.lexer.EndBlockToken`.
Actually, since this token may have different behaviour, dependend
on *function* from that token.
.. list-table::
:header-rows: 1
* - Token function
- Parsing function
* - if
- :py:func:`parse_end_if_token`
* - loop
- :py:func:`parse_end_loop_token`
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.EndBlockToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
:raises:
:py:exc:`curly.exceptions.CurlyParserUnknownEndBlockError`: if
function of end block is unknown.
"""
function = token.contents["function"]
if function == "if":
return parse_end_if_token(stack, token)
elif function == "loop":
return parse_end_loop_token(stack, token)
else:
raise exceptions.CurlyParserUnknownEndBlockError(token)
def parse_start_if_token(stack, token):
"""Parsing of token for ``{% if function expression %}``.
It puts 2 nodes on the stack: :py:class:`ConditionalNode` and
:py:class:`IfNode`. Checks docs for :py:func:`parse` to understand
why.
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.StartBlockToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
"""
stack.append(ConditionalNode(token))
stack.append(IfNode(token))
return stack
def parse_start_elif_token(stack, token):
"""Parsing of token for ``{% elif function expression %}``.
It rewinds stack with :py:func:`rewind_stack_for` till previous
:py:class:`IfNode` first and appends new one. Checks docs for
:py:func:`parse` to understand why.
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.StartBlockToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
"""
stack = rewind_stack_for(stack, search_for=IfNode)
stack.append(IfNode(token))
return stack
def parse_start_else_token(stack, token):
"""Parsing of token for ``{% else %}``.
It rewinds stack with :py:func:`rewind_stack_for` till previous
:py:class:`IfNode` first and appends new :py:class:`ElseNode`.
Checks docs for :py:func:`parse` to understand why.
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.StartBlockToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
"""
stack = rewind_stack_for(stack, search_for=IfNode)
stack.append(ElseNode(token))
return stack
def parse_end_if_token(stack, token):
"""Parsing of token for ``{% /if %}``.
Check :py:func:`parse` for details. Also, it pops out redundant
:py:class:`ConditionalNode` and make chaining of :py:class:`IfNode`
and :py:class:`ElseNode` verifying that there is only one possible
else and it placed at the end.
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.EndBlockToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
"""
stack = rewind_stack_for(stack, search_for=(IfNode, ElseNode))
stack = rewind_stack_for(stack, search_for=ConditionalNode)
cond = stack.pop()
previous_node, *rest_nodes = cond
for next_node in rest_nodes:
if isinstance(previous_node, ElseNode):
raise ValueError(
"If statement {0} has multiple elses".format(
cond[0].raw_string))
previous_node.elsenode = next_node
previous_node = next_node
stack.append(cond[0])
return stack
def parse_start_loop_token(stack, token):
"""Parsing of token for ``{% loop iterable %}``.
Check :py:func:`parse` for details.
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.StartBlockToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
"""
stack.append(LoopNode(token))
return stack
def parse_end_loop_token(stack, token):
"""Parsing of token for ``{% /loop %}``.
Check :py:func:`parse` for details. Stack rewinding is performed
with :py:func:`rewind_stack_for`.
:param stack: Stack of the parser.
:param token: Token to process.
:type stack: list[:py:class:`Node`]
:type token: :py:class:`curly.lexer.EndBlockToken`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
"""
return rewind_stack_for(stack, search_for=LoopNode)
def rewind_stack_for(stack, *, search_for):
"""Stack rewinding till some node found.
This function performes stack reducing on parsing. Idea is quite
simple: we pop out nodes until some not done is found. If it has a
type of node we are looking for, we are good: it basically means
that we've found the node which should have popped results as
subnodes. Otherwise: exception.
At the end of the procedure updated node is placed on the top of the
stack.
:param stack: Stack of the parser.
:param search_for: Type of the node we are searching for.
:type stack: list[:py:class:`Node`]
:type search_for: :py:class:`Node`
:return: Updated stack.
:rtype: list[:py:class:`Node`]
:raises:
:py:exc:`curly.exceptions.CurlyParserNoUnfinishedNodeError`: if
not possible to find open start statement.
:py:exc:`curly.exceptions.CurlyParserUnexpectedUnfinishedNodeError`: if
we expected to find one open statement but found another.
"""
nodes = []
node = None
while stack:
node = stack.pop()
if not node.done:
break
nodes.append(node)
else:
raise exceptions.CurlyParserNoUnfinishedNodeError()
if not isinstance(node, search_for):
raise exceptions.CurlyParserUnexpectedUnfinishedNodeError(
search_for, node)
node.done = True
node.data = nodes[::-1]
stack.append(node)
return stack
def validate_for_all_nodes_done(root):
"""Validates that all nodes in given AST trees are marked as done.
It simply does in-order traversing of the tree, verifying attribute.
:param root: Root of the tree.
:type root: :py:class:`RootNode`
:raises:
:py:exc:`curly.exceptions.CurlyParserFoundNotDoneError`: if
node which is not closed is found.
"""
for node in root:
if not node.done:
raise exceptions.CurlyParserFoundNotDoneError(node)
for subnode in root:
validate_for_all_nodes_done(subnode)
|
from flask import Flask,render_template,redirect,request,url_for,flash
from flask_sqlalchemy import SQLAlchemy
app=Flask(__name__)
app.config['SECRET_KEY'] = 'dev'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///mydb.db'
db = SQLAlchemy(app)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.String(120))
body = db.Column(db.Text)
@app.route('/home',methods=['GET','POST'])
def index():
if request.method=='POST':
title=request.form.get('title')
body=request.form.get('body')
post = Post(title=title, body=body)
db.session.add(post)
db.session.commit()
flash('Your post has been created!')
return redirect('display')
return render_template('index.html')
@app.route('/display')
def display():
posts=Post.query.all()
return render_template('display.html',posts=posts)
if __name__=='__main__':
app.run(debug=True)
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
This is a basic calculator that can do four operations; addition, subtraction, summation and division.
"""
def addition(a, b):
""" Addition Operator """
return a + b
def subtraction(a, b):
""" Subtraction Operator """
return a - b
def summation(a, b):
""" Summation Operator """
return a * b
def division(a, b):
""" Division Operator """
if b == 0:
raise ValueError("Divison by Zero!")
return a / b
|
'''
Write ts.data.new file, deleting transition states that connect a minimum with ID greater than the argument
'''
import sys
max_min_id = int(sys.argv[1])
tdnew_f = open("ts.data.new","w")
with open("ts.data","r") as td_f:
for line in td_f.readlines():
line = line.split()
if not ((int(line[3]) > max_min_id) or (int(line[4]) > max_min_id)):
tdnew_f.write("%7.10f %7.10f %1i %6i %6i %6.10f %6.10f %6.10f\n" %
(float(line[0]), float(line[1]), int(line[2]),
int(line[3]), int(line[4]),
float(line[5]), float(line[6]), float(line[7])))
tdnew_f.close()
|
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 17 00:52:23 2018
AMAC:
1-Olen insanlarin F-M sayilari
2-Silah çeşitlerine göre ölüm sayıları
3- Yaşı 25ten küçük/Büyük olanların sayıları
4-Irklara gore olum sayıları
Bu sayi olaylarını Visualization ypamaya yarar
"""
#%% GENEL KUTUPHANELER VE CSV
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
killData=pd.read_csv('csv/PoliceKillingsUS.csv',encoding="windows-1252")
killData.head()
killData.info()
killData.gender.value_counts()
#M=2428 F 107 bunu görsellestirelim
sns.countplot(killData.gender)
plt.title('gender',color='r',fontsize=5)
killData.armed.value_counts()
#=> bir suru farklı data varmıs gorsellestırelım
#%% Burada silah ceşitlerine gore olum sayısı dedıgımızde x1,x2,x3,x4,y1 vs..
plt.figure(figsize=(101,7))
#plt.xticks(rotation=90)
#sns.countplot(killData.armed) #=> bu cok saglıklı olmadı
armed=killData.armed.value_counts()
sns.barplot(x=armed[:7].index,y=armed[:7].values)
plt.ylabel('Number of Death')
plt.xlabel('Wapon Types')
plt.title('Kill Weapon',color='b',fontsize=15)
#%% 25yasından buyuk olan ve olmayanları sayılarını
filte1=['above25' if i>=25 else 'below25' for i in killData.age]
dataframe1=pd.DataFrame({'age':filte1})
sns.countplot(x=dataframe1.age)
plt.ylabel('Number of Killed People')
plt.xlabel('Age of Killed People',color='blue',fontsize=15)
plt.show()
#%% Irklara gore olum sayiları
sns.countplot(data=killData,x='race')
#%% EN TEHLIKELI SEHIRLER
city=killData.city.value_counts()
plt.figure(figsize=(10,7))
sns.barplot(x=city[:12].index,y=city[:12].values) #=> ilk 12 veriyi al dıyoz
plt.xticks(rotation=45)
plt.title('Most dangerous cities',color='r',fontsize=15)
|
#!/usr/bin/python3
"""
Something is weird about this library or maybe I'm not getting something.
It seems from the old code that we do the following:
a) make the PICC_REQIDL request
b) ignore its return value
c) do the anti-collision routine
d) extract a UID and ignore other statuses...
Going to start with this for now but it doesnt seem right...
"""
import MFRC522
from time import sleep
from typing import Callable
MIFAREReader = MFRC522.MFRC522()
def handle_MFRC522_blocking(uid_callback: Callable[[str], None], poll_delay: int) -> None:
while True:
(status, data) = MIFAREReader.MFRC522_Request(MIFAREReader.PICC_REQIDL)
if status == MIFAREReader.MI_OK:
pass
else:
#print("PICC_REQIDL error: {}".format(status))
#break
pass #Need to look in to why an error is returned before being able to read the uid...
(status, data) = MIFAREReader.MFRC522_Anticoll()
if status == MIFAREReader.MI_OK:
uid = ''
for byte in data[:-1]:
if byte < 16:
uid += '0'
uid += hex(byte)[2:]
uid_callback(uid)
else:
#print("PICC_AntiColl error: {}".format(status))
#break
pass
sleep(poll_delay)
|
import pygame
import cfg
import os
images = []
for i in range(0, 12):
images.append(pygame.image.load(os.path.join('sprite', 'piece_'+str(i)+'.png')))
class Piece:
#(x,y) coords not pixel coords, i.e. (1,2) instead of (100,200)
def __init__(self, x, y, white, img, moved = False, pickedUp = False):
self.x = x
self.y = y
self.isWhite = white
self.img = pygame.transform.scale(img, (cfg.SIZE, cfg.SIZE))
self.moved = moved
self.pickedUp = pickedUp
def draw(self):
boardPos = (self.x*cfg.SIZE, self.y*cfg.SIZE)
if self.pickedUp:
imgScaled = pygame.transform.scale(self.img, (int(cfg.SIZE*1.25), int(cfg.SIZE*1.25)))
cfg.screen.blit(imgScaled, boardPos)
else:
cfg.screen.blit(self.img, boardPos)
def allValidMoves(self, board):
return [(0,0),(0,1),(0,2),(0,3),(0,4),(0,5),(0,6),(0,7),
(1,0),(1,1),(1,2),(1,3),(1,4),(1,5),(1,6),(1,7),
(2,0),(2,1),(2,2),(2,3),(2,4),(2,5),(2,6),(2,7),
(3,0),(3,1),(3,2),(3,3),(3,4),(3,5),(3,6),(3,7),
(4,0),(4,1),(4,2),(4,3),(4,4),(4,5),(4,6),(4,7),
(5,0),(5,1),(5,2),(5,3),(5,4),(5,5),(5,6),(5,7),
(6,0),(6,1),(6,2),(6,3),(6,4),(6,5),(6,6),(6,7),
(7,0),(7,1),(7,2),(7,3),(7,4),(7,5),(7,6),(7,7)]
def removeOutBounds(self, moves):
valid = moves.copy()
for move in moves:
if not (0 <= move[0] <= 7) or not (0 <= move[1] <= 7):
valid.remove(move)
return valid
#defines whether or not there are pieces in the way of movement to target (x,y)
def pieceInBetween(self, x, y, board):
if (x-self.x) > 0: x_vector = 1
elif (x-self.x) < 0: x_vector = -1
else: x_vector = 0
if (y-self.y) > 0: y_vector = 1
elif (y-self.y) < 0: y_vector = -1
else: y_vector = 0
targetX = self.x + x_vector
targetY = self.y + y_vector
while not(targetX == x and targetY == y):
if board.getPiece(targetX, targetY) is not None:
return True
targetX += x_vector
targetY += y_vector
return False
#moves self to position (x,y) and removes piece at (x,y) if there is one
def move(self, x, y, board):
board.removePiece(x, y)
self.x = x
self.y = y
class King(Piece):
def __init__(self, x, y, white):
if white:
img = images[0]
else:
img = images[6]
super().__init__(x, y, white, img)
def allValidMoves(self, board):
valid = []
t1 = (self.x-1, self.y)
t2 = (self.x-1, self.y-1)
t3 = (self.x, self.y-1)
t4 = (self.x+1, self.y-1)
t5 = (self.x+1, self.y)
t6 = (self.x+1, self.y+1)
t7 = (self.x, self.y+1)
t8 = (self.x-1, self.y+1)
allTargets = [t1, t2, t3, t4, t5, t6, t7, t8]
for target in allTargets:
pieceAtTarget = board.getPiece(target[0], target[1])
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
valid.append(target)
valid = Piece.removeOutBounds(self, valid)
return valid
class Queen(Piece):
def __init__(self, x, y, white):
if white:
img = images[1]
else:
img = images[7]
super().__init__(x, y, white, img)
def allValidMoves(self, board):
valid = []
#horizontal moves
for i in range(0, 8):
targetX = i
targetY = self.y
if not (targetX == self.x):
pieceAtTarget = board.getPiece(targetX, targetY)
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
if not Piece.pieceInBetween(self, targetX, targetY, board):
valid.append((targetX, targetY))
#vertical moves
for j in range(0, 8):
targetX = self.x
targetY = j
if not (targetY == self.y):
pieceAtTarget = board.getPiece(targetX, targetY)
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
if not Piece.pieceInBetween(self, targetX, targetY, board):
valid.append((targetX, targetY))
for i in range(0, 8):
targetX = i
targetY = self.y - self.x + i
if not targetX == self.x:
pieceAtTarget = board.getPiece(targetX, targetY)
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
if not Piece.pieceInBetween(self, targetX, targetY, board):
valid.append((targetX, targetY))
for j in range(0, 8):
targetX = self.x + self.y - j
targetY = j
if not targetX == self.x:
pieceAtTarget = board.getPiece(targetX, targetY)
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
if not Piece.pieceInBetween(self, targetX, targetY, board):
valid.append((targetX, targetY))
valid = Piece.removeOutBounds(self, valid)
return valid
class Bishop(Piece):
def __init__(self, x, y, white):
if white:
img = images[2]
else:
img = images[8]
super().__init__(x, y, white, img)
def allValidMoves(self, board):
valid = []
for i in range(0, 8):
targetX = i
targetY = self.y - self.x + i
if not targetX == self.x:
pieceAtTarget = board.getPiece(targetX, targetY)
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
if not Piece.pieceInBetween(self, targetX, targetY, board):
valid.append((targetX, targetY))
for j in range(0, 8):
targetX = self.x + self.y - j
targetY = j
if not targetX == self.x:
pieceAtTarget = board.getPiece(targetX, targetY)
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
if not Piece.pieceInBetween(self, targetX, targetY, board):
valid.append((targetX, targetY))
valid = Piece.removeOutBounds(self, valid)
return valid
class Knight(Piece):
def __init__(self, x, y, white):
if white:
img = images[3]
else:
img = images[9]
super().__init__(x, y, white, img)
def allValidMoves(self, board):
valid = []
t1 = (self.x-1, self.y-2)
t2 = (self.x+1, self.y-2)
t3 = (self.x-2, self.y-1)
t4 = (self.x+2, self.y-1)
t5 = (self.x-2, self.y+1)
t6 = (self.x+2, self.y+1)
t7 = (self.x-1, self.y+2)
t8 = (self.x+1, self.y+2)
allTargets = [t1, t2, t3, t4, t5, t6, t7, t8]
for target in allTargets:
pieceAtTarget = board.getPiece(target[0], target[1])
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
valid.append(target)
valid = Piece.removeOutBounds(self, valid)
return valid
class Rook(Piece):
def __init__(self, x, y, white):
if white:
img = images[4]
else:
img = images[10]
super().__init__(x, y, white, img)
def allValidMoves(self, board):
valid = []
#horizontal moves
for i in range(0, 8):
targetX = i
targetY = self.y
if not (targetX == self.x):
pieceAtTarget = board.getPiece(targetX, targetY)
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
if not Piece.pieceInBetween(self, targetX, targetY, board):
valid.append((targetX, targetY))
#vertical moves
for j in range(0, 8):
targetX = self.x
targetY = j
if not (targetY == self.y):
pieceAtTarget = board.getPiece(targetX, targetY)
if pieceAtTarget is None or not (pieceAtTarget.isWhite == self.isWhite):
if not Piece.pieceInBetween(self, targetX, targetY, board):
valid.append((targetX, targetY))
valid = Piece.removeOutBounds(self, valid)
return valid
class Pawn(Piece):
def __init__(self, x, y, white):
if white:
img = images[5]
else:
img = images[11]
super().__init__(x, y, white, img)
def allValidMoves(self, board):
valid = []
if self.isWhite: #moves up
if self.y == 6: #move up twice
target = (self.x, self.y-2)
if board.getPiece(target[0], target[1]) is None:
if not Piece.pieceInBetween(self, target[0], target[1], board):
valid.append(target)
if board.getPiece(self.x, self.y-1) is None: #move up once
valid.append((self.x, self.y-1))
pieceToTake = board.getPiece(self.x-1, self.y-1) #take piece diagonal left
if pieceToTake is not None and not (pieceToTake.isWhite == self.isWhite):
valid.append((self.x-1, self.y-1))
pieceToTake = board.getPiece(self.x+1, self.y-1) #take piece diagonal right
if pieceToTake is not None and not (pieceToTake.isWhite == self.isWhite):
valid.append((self.x+1, self.y-1))
elif not self.isWhite: #moves down
if self.y == 1:
target = (self.x, self.y+2)
if board.getPiece(target[0], target[1]) is None:
if not Piece.pieceInBetween(self, target[0], target[1], board):
valid.append(target)
if board.getPiece(self.x, self.y+1) is None:
valid.append((self.x, self.y+1))
pieceToTake = board.getPiece(self.x-1, self.y+1) #take piece diagonal left
if pieceToTake is not None and not (pieceToTake.isWhite == self.isWhite):
valid.append((self.x-1, self.y+1))
pieceToTake = board.getPiece(self.x+1, self.y+1) #take piece diagonal right
if pieceToTake is not None and not (pieceToTake.isWhite == self.isWhite):
valid.append((self.x+1, self.y+1))
valid = Piece.removeOutBounds(self, valid)
return valid |
#coding=utf-8
import requests
from lxml import etree
def getMovieData():
movies = []
url = r'https://movie.douban.com/top250'
parameter = r'?start=0&filter='
# 经过观察可以知道网站链接的特点是url加上下面这个参数
# 而这个参数可以从每一页的“后一页>”的a标签中获得
# 另外由于最后一页没有parameter这个参数,所以还需要做个条件判断
try:
while str(parameter[0]):
# 生成链接
urls = url + str(parameter[0])
r = requests.get(urls)
#获取网站源代码
sourceCode = r.text
# 将源码转化为能被XPath匹配的格式
html = etree.HTML(sourceCode)
# 获取网站链接的参数,即下一页地址
# parameter = html.xpath('/*/span[@class="next"]/a/@href')
parameter = html.xpath('//*[@id="content"]/div/div[1]/div[2]/span[3]/a/@href')
items = html.xpath('//ol/li/div[@class="item"]')
for item in items:
movie = {
"name": '',
"info": '',
"star": '',
"quote": ''
}
name, info, star, quote = "", "", "", ""
try:
name = item.xpath('./div[@class="info"]/div[@class="hd"]/a//span/text()')
infos = item.xpath('./div[@class="info"]/div[@class="bd"]/p[@class=""]//text()')
star = item.xpath('./div[@class="info"]/div[@class="bd"]/div[@class="star"]/span[@class="rating_num"]/text()')
quote = item.xpath('./div[@class="info"]/div[@class="bd"]/p[@class="quote"]/span/text()')
# 电影简介获取之后是一个list,且一般有两个元素,所以要拼接起来
# 另外凭借后得到的字符串含有大量空格符和换行符,所以需要去掉
info = (infos[0] + infos[1]).replace(" ", '').replace('\n', '').replace('\xa0', '')
movie["name"] = name
movie["info"] = info
movie["star"] = star
# 有的电影是没有quote的,所以需要if判断一下
if quote:
movie["quote"] = quote
else:
movie["quote"] = " "
movies.append(movie)
except Exception as e:
print("加载失败1")
# raise e
except Exception as e:
print('加载失败2')
# raise e
# 按评分排序
movies = sorted(movies, key=lambda x: x['star'], reverse=True)
return movies
|
import motor.motor_asyncio
async def init_pg(app):
# conf = app['config']['postgres']
# engine = await aiopg.sa.create_engine(
# database=conf['database'],
# user=conf['user'],
# password=conf['password'],
# host=conf['host'],
# port=conf['port'],
# minsize=conf['minsize'],
# maxsize=conf['maxsize'],
# )
engine = motor.motor_asyncio.AsyncIOMotorClient("127.0.0.1", 27017)
app['cmdb_con'] = engine
app['cmdb'] = engine["test"]
async def close_pg(app):
app['cmdb_con'].close()
# await app['cmdb_con'].wait_closed()
|
# coding: utf-8
import json
import itertools
from django.core.exceptions import ValidationError
from django.core.validators import validate_ipv46_address
from django.utils.encoding import force_text
def pretty_data(request):
if request.META.get('CONTENT_TYPE', None) == 'application/json':
# json encoded request
try:
form = json.loads(force_text(request.body))
except Exception: # this should never fail
form = {}
elif request.META.get('CONTENT_TYPE', '').startswith('multipart'):
# regular form
form = dict(itertools.chain(request.POST.items(), request.FILES.items()))
else:
# other cases
form = {}
return u'\n'.join('%s: %s' % (key, value)
for key, value in form.items())
def pretty_headers_request(request):
return u'\n'.join('%s: %s' % (key, value)
for key, value in request.META.items())
def pretty_headers_response(response):
return u'\n'.join('%s: %s' % (key, value)
for key, value in response._headers.values())
def get_client_ip(request):
x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')
if x_forwarded_for:
ip = x_forwarded_for.split(',')[-1].strip()
else:
ip = request.META.get('REMOTE_ADDR')
try:
validate_ipv46_address(ip)
except ValidationError:
ip = None
return ip
|
import boto3
import logging
from configparser import ConfigParser
from botocore.exceptions import ClientError
from tweepy import StreamListener, Stream, OAuthHandler
class TweetListener(StreamListener):
"""
Streams the recent tweets related to the query to AWS Kinesis
"""
def __init__(self, config):
super(StreamListener, self).__init__()
# Set up AWS configurations
self.config = config
self.aws_access_key_id = self.config.get("AWS", "ACCESS_KEY_ID")
self.aws_secret_access_key = self.config.get("AWS", "SECRET_ACCESS_KEY")
self.delivery_stream = self.config.get("KINESIS", "DELIVERY_STREAM")
self.firehose_client = boto3.client("firehose",
aws_access_key_id=self.aws_access_key_id,
aws_secret_access_key=self.aws_secret_access_key)
def on_data(self, tweet):
"""
Pushes tweets to AWS Kinesis.
:param tweet: Tweet data (dictionary)
:return:
"""
try:
# Start logging
logging.info("Streaming tweet data to AWS Kinesis")
# Push the record
response = self.firehose_client.put_record(DeliveryStreamName=self.delivery_stream,
Record={"Data": tweet})
logging.debug(response)
return True
except ClientError as ex:
# In case of client error log the error
logging.exception(f"Failed to stream tweet data to AWS Kinesis: {ex}.")
def on_error(self, status_code):
"""
Handle errors and exceptions
:param status_code: HTTP status code
:return:
"""
# Log the error status code
logging.error(status_code)
# Rate limit status is not considered as error
if status_code == 420:
return False
def main():
# Read config settings
config = ConfigParser()
config.read('../config.cfg')
# Start logging
logging.basicConfig(level=config.get("LOGGING", "LEVEL"), format="%(asctime)s - %(levelname)s - %(message)s")
logging.info("Started logging")
# Authenticating twitter API
logging.info("Authenticating Twitter API")
auth = OAuthHandler(config.get("TWITTER", "CONSUMER_KEY"), config.get("TWITTER", "CONSUMER_SECRET"))
auth.set_access_token(config.get("TWITTER", "ACCESS_TOKEN"), config.get("TWITTER", "ACCESS_TOKEN_SECRET"))
# Start streaming to AWS Kinesis
logging.info(f"Start streaming tweets matching: {config.getlist('TWEEPY', 'TRACK_TOPICS')}")
twitter_stream = Stream(auth, TweetListener(config))
twitter_stream.filter(track=config.getlist("TWEEPY", "TRACK_TOPICS"),
languages=config.getlist("TWEEPY", "TRACK_LANGS"))
if __name__ == "__main__":
main()
|
# Enter your code here. Read input from STDIN. Print output to STDOUT
import sys
n,m = map(int,sys.stdin.readline().split())
mid =int(n/2)
mid2=int(m/2)
for i in range(0,mid2):
if(i%2!=0):
if (i == n):
break
else:
print("---"*mid+".|."*(i) + "---"*mid)
mid=mid-1
print("WELCOME".center(m,"-"))
n=n-2
j=1
for i in range(n,0,-1):
if(i%2!=0):
print("---"*j +".|."*n+"---"*j)
n=n-2
j=j+1
|
#!/usr/bin/env python3
grid = {}
def sum_neighboors(x, y):
sum = 0
for i in range(-1, 2):
for k in range(-1, 2):
sum = sum + grid.get((x + i, y + k), 0)
return sum
def print_grid(grid):
print("---")
for i in range(-10, 10):
for k in range(-10, 10):
print(grid.get((i, k), 0), end=" ")
print()
print("---")
def move_right(x, y):
return x, y + 1
def move_down(x, y):
return x - 1, y
def move_up(x, y):
return x + 1, y
def move_left(x, y):
return x, y - 1
def change_move(move):
if move == move_right:
return move_down
if move == move_down:
return move_left
if move == move_left:
return move_up
if move == move_up:
return move_right
value = 289326
# Starting point
x = 0
y = 0
grid[(x, y)] = 1
steps = 1
next_steps = 1
move = move_right
index_loop = 0
while grid[(x, y)] < value:
print("Looped: %s times" % index_loop)
while steps > 0:
print("Moving: %s for %s steps" % (move, steps))
x, y = move(x, y)
grid[(x, y)] = sum_neighboors(x, y)
if grid[(x, y)] > value:
break # Value found, exit early
steps = steps - 1
steps = next_steps
index_loop = index_loop + 1
next_steps = next_steps + (index_loop % 2)
move = change_move(move)
print_grid(grid)
print(grid[(x, y)])
|
import numpy as np
import matplotlib.pyplot as plt
def generateMVNRandData(Npts, mu, sigma):
data = np.random.multivariate_normal(mu, sigma*np.eye(len(mu)), Npts)
return data
def plotLine(weights, range):
x = np.array(range)
y = -(weights[0]/weights[1])-(weights[2]/weights[1])*x
plt.plot(y,x)
plt.pause(2)
def perceptronLearningAlg(data,labels,eta,nIterations):
nPts = data.shape[0]
weights = np.random.rand(data.shape[1])
print('Initial weights:', weights)
error = 1
iter = 0
while(error > 0 & iter < nIterations):
print('Iteration: ', iter,'; Error: ', error)
error = 0
iter += 1
for i in range(nPts):
activation = data[i,:]@weights
activation = (activation>0) # add a condition to separate the classes
if activation != True:
activation = -1
if (activation-labels[i])!= 0: #0 with original labels {0,1}
plt.cla()
weights-=eta*data[i,:]*(activation-labels[i])
error += 1
plt.scatter(data[:,1],data[:,2], c=labels, linewidth=0)
plotLine(weights, [-2,2])
print('Final Iteration: ', iter,'; Final Error: ', error)
return weights
#%%
if __name__ == '__main__':
Npts = 100
mu1 = [2,2]
mu2 = [0,0]
var = .1
eta = 10
nIterations = 10;
plt.ion()
fig = plt.figure()
data1 = np.array(generateMVNRandData(Npts, mu1, .1))
data1 = np.hstack((np.ones((Npts,1)),data1))
data2 = np.array(generateMVNRandData(Npts, mu2, .1))
data2 = np.hstack((np.ones((Npts,1)),data2))
data = np.vstack(( data1, data2))
labels= np.hstack((1*np.ones(Npts), -1*(np.ones(Npts)))) # for {-1,1} use activation 1
plt.scatter(data[:,1],data[:,2], c=labels, linewidth=0)
plt.pause(2)
perceptronLearningAlg(data,labels,eta,nIterations) |
import RPi.GPIO as GPIO
class LED:
def __init__(self, pin):
self.__pin = pin
def on(self):
GPIO.setup(self.__pin, GPIO.OUT)
GPIO.output(self.__pin, GPIO.HIGH)
def off(self):
GPIO.setup(self.__pin, GPIO.OUT)
GPIO.output(self.__pin, GPIO.LOW)
|
import random
import math
import operator
def objective(chromosome, target):
# Returns the fitness score for the given chromosome
score = 0
for i in range(0, len(chromosome)):
score += math.pow(chromosome[i] - target[i], 2)
return score
def crossover(chromosome1, chromosome2):
# Performs crossover on the pair of chromosomes across middle
x = len(chromosome1) / 2
for i in range(x, len(chromosome1)):
chromosome1[i], chromosome2[i] = chromosome2[i], chromosome1[i]
return chromosome1, chromosome2
def mutate(chromosome, probability):
# Performs mutation on a given chromosome with certain probability
dice = random.uniform(0,1)
idx = random.choice(chromosome)
new_value = random.randint(0,10)
if dice < probability:
chromosome[idx] = new_value
return chromosome
def create_population(size, length):
population = []
for i in range(0, size):
population.append([])
for j in range(0, length):
population[i].append(random.randint(0, 10))
return population
def create_table_by_fitness(population, target_value):
ranking = []
for i in range(0, len(population)):
ranking.append([])
ranking[i].append(objective(population[i], target_value))
ranking[i].append(population[i])
return ranking
def sort_table(table, col):
# Sorts a population of chromosomes by their fitness score column
return sorted(table, key=operator.itemgetter(col))
print
# Unit tests for score
print "Unit tests for score"
print objective([1,2,3,4,5,6], [1,2,3,4,5,6]) # should be zero
print objective([1,2,4,4,5,6], [1,2,3,4,5,6]) # should be non-zero
print
# Unit tests for crossover
print "Unit tests for crossover"
print crossover([0,1,2,3,4,5], [9,8,7,6,5,4])
print
# Unit tests for mutate
print "Unit tests for mutate"
print mutate([0,1,2,3,4,5], 0.5)
print
population1 = create_population(10, 6)
print population1
print
print "Population table with score"
ranking1 = create_table_by_fitness(population1, [1,2,3,4,5,6])
print ranking1
print
#Unit tests for sort_by_fitness
print "Unit tests for sort_by_fitness"
print sort_table(ranking1, 0)
|
题目类似于一个正整数可以拆成其他正整数的和,求这些正整数的最大连乘积。
将绳子 以相等的长度等分为多段 ,得到的乘积最大。
y=x^[(1/x)*n],n是常数,对y=x^(1/x)求导得知2.7几时有极大值。
x取整数3.
x对3取余得到b。对3整除得到a。
当b=0 时,直接返回 3^a,
当b=1 时,要将一个1+3 转换为2+2,因此返回 3^(a−1)×4,
当b=2 时,返回3^a×2。
class Solution:
def cuttingRope(self, n: int) -> int:
if n<=3:
return n-1
a = n//3
b = n%3
if b==0:
return int(math.pow(3,a))
elif b==1:
return int(math.pow(3,a-1)*4)
else:
return int(math.pow(3,a)*2)
执行用时:44 ms, 在所有 Python3 提交中击败了54.23%的用户
内存消耗:14.9 MB, 在所有 Python3 提交中击败了5.22%的用户
|
__all__ = ('GameWorld',)
from appuifw import Canvas, EEventKey, EEventKeyUp, app, popup_menu
from e32 import ao_yield, ao_sleep
from graphics import Image
from sysinfo import free_ram, total_ram
from pyboom.colors import BLACK, WHITE
from pyboom.types import SingletonType
FPS_DEFAULT = (1.0 / 60.0) # ~60FPS
COLOR_MODE = "RGB12" # 4096 colors @ 12bits/pixel
BG_COLOR = BLACK
class GameWorld(object):
"""Game world class
"""
__metaclass__ = SingletonType
def __init__(
self, title=None, screen_mode=None, fps=None,
color_mode=None, bg_color=None,
):
app.screen = screen_mode or "full"
app.title = title or u"PyS60 Game"
self._game_obj = None
self._is_running = 0
if fps is None:
self.fps = FPS_DEFAULT
else:
assert isinstance(fps, int) or isinstance(fps, float)
self.fps = fps
self.color_mode = color_mode or COLOR_MODE
self.bg_color = bg_color or BG_COLOR
self.canvas = Canvas(
redraw_callback=self.handle_redraw(),
event_callback=self.handle_event()
)
app.body = self.canvas
app.exit_key_handler = self.handle_exit()
self.buffer = Image.new(self.canvas.size, mode=self.color_mode)
self.buffer.clear(self.bg_color)
self.handle_redraw()(None)
self.key_click = None
self._key_click_release = 1
self.key_down = None
self._key_down_release = 1
def _get_is_running(self): return self._is_running
def _set_is_running(self, v): self._is_running = v
is_running = property(_get_is_running, _set_is_running)
def _get_key_click_release(self): return self._key_click_release
def _set_key_click_release(self, v): self._key_click_release = v
key_click_release = property(
_get_key_click_release, _set_key_click_release)
def _get_key_down_release(self): return self._key_down_release
def _set_key_down_release(self, v): self._key_down_release = v
key_down_release = property(
_get_key_down_release, _set_key_down_release)
def _get_game_obj(self): return self._game_obj
def _set_game_obj(self, v): self._game_obj = v
game_obj = property(_get_game_obj, _set_game_obj)
def main(self, game_obj):
self.game_obj = game_obj
# Run the game initializer and
# pass in a reference to to the game world here.
game_obj.initialize(self)
# Start the game loop
assert hasattr(game_obj, 'run'), "Game object needs a run method."
if not self.is_running:
self.is_running = 1
if not hasattr(game_obj, 'handle_timers'):
game_obj.handle_timers = 0
handle_timers = game_obj.handle_timers
run_fn = game_obj.run
while self.is_running:
ao_yield()
run_fn()
if handle_timers:
continue
else:
ao_sleep(self.fps)
# Run any termination/cleanup sequence here if any.
if hasattr(game_obj, 'terminate'):
game_obj.terminate()
def show_ram_usage(self):
x1 = 0
y1 = 0
x2 = x1 + 130
y2 = y1 + 15
x3 = x1 + 1
y3 = y1 + 12
conv = 1024.0
txt = u"RAM: %.2f / %.2f MB" % (
(free_ram() / conv / conv),
(total_ram() / conv / conv)
)
self.buffer.rectangle((
x1, y1, x2, y2
), fill=WHITE)
self.buffer.text((x3, y3), txt, fill=BLACK)
def handle_exit(self):
# The game should handle setting the `is_running`
# flag to 0 from here.
handled = 0
handle_exit = None
if hasattr(self.game_obj, 'handle_exit'):
handled = 1
handle_exit = self.game_obj.handle_exit
def call(*args, **kwargs):
if handled:
handle_exit()
else:
ans = popup_menu([u"Yes", u"No"], u"Confirm Exit?")
if ans == 0:
self.is_running = 0
return call
def handle_redraw(self):
# Takes as its argument a four-element tuple that contains the
# top-left and the bottom-right corner of the area
# that needs to be redrawn.
has_buffer = hasattr(self, 'buffer')
has_canvas = hasattr(self, 'canvas')
if has_canvas and has_buffer:
buffer = self.buffer
canvas = self.canvas
def call(*args, **kwargs):
rect = args[0]
canvas.blit(buffer)
return call
def handle_event(self):
def call(*args, **kwargs):
event = args[0]
# Sets flags for keypad short clicks (pressed) or holds (down)
# key releases
if event["type"] == EEventKey:
if self.key_down:
# It's the second (or more) times we've checked so this
# means the user is keeping holding down a button.
self.key_down = (event["keycode"], "down")
self.key_down_release = 0
else:
self.key_down = (event["keycode"], "pressed")
self.key_click_release = 0
elif event["type"] == EEventKeyUp and self.key_down:
code, mode = self.key_down
if mode == "pressed":
self.key_click = code
self.key_click_release = 1
if mode == "down":
self.key_down_release = 1
self.key_down = None
self.game_obj.handle_event(event)
return call
def is_key_clicked(self, code):
if code == self.key_click:
self.key_click = None
return 1
return 0
def is_key_held(self, code):
if self.key_down and self.key_down == (code, "down"):
return 1
return 0
|
# encoding:utf-8
import re
from utils.fileUtil import FileUtil
# from fileUtil import FileUtil
class ReportUtil(object):
# 异常类型
# 1空指针异常
NullPointerException="java.lang.NullPointerException"
NullPointerExceptionCounter=0
# 2数组溢出
ArrayIndexOutOfBoundsException="java.lang.ArrayIndexOutOfBoundsException"
ArrayIndexOutOfBoundsExceptionCounter=0
# 3类不存在
ClassNotFoundException="java.lang.ClassNotFoundException"
ClassNotFoundExceptionCounter=0
# 4数学运算异常
ArithmeticException="java.lang.ArithmeticException"
ArithmeticExceptionCounter=0
# 5方法参数异常
IllegalArgumentException="java.lang.IllegalArgumentException"
IllegalArgumentExceptionCounter=0
# 6文件未找到
FileNotFoundException="java.lang.FileNotFoundException"
FileNotFoundExceptionCounter=0
# 7数值转化异常
NumberFormatException="java.lang.NumberFormatException"
NumberFormatExceptionCounter=0
# 8堆栈异常错误
StackOverflowError="java.lang.StackOverflowError"
StackOverflowErrorCounter=0
# 9内存溢出错误
OutOfMemoryError="java.lang.OutOfMemoryError"
OutOfMemoryErrorCounter=0
def __init__(self, filePath):
self.filePath = filePath
# 存储报告的三维数组
self.currentData =[]
# 解析日志中的错误信息
def analysisLog(self,flag):
if flag:
fileUtil = FileUtil(self.filePath,"")
allDataRows=fileUtil.readDataFromTXT()
for row in allDataRows:
if re.findall(self.NullPointerException,row):
self.NullPointerExceptionCounter=self.NullPointerExceptionCounter+1
if re.findall(self.ArrayIndexOutOfBoundsException,row):
self.ArrayIndexOutOfBoundsExceptionCounter=self.ArrayIndexOutOfBoundsExceptionCounter+1
if re.findall(self.ClassNotFoundException,row):
self.ClassNotFoundExceptionCounter=self.ClassNotFoundExceptionCounter+1
if re.findall(self.ArithmeticException,row):
self.ArithmeticExceptionCounter=self.ArithmeticExceptionCounter+1
if re.findall(self.IllegalArgumentException,row):
self.IllegalArgumentExceptionCounter=self.IllegalArgumentExceptionCounter+1
if re.findall(self.FileNotFoundException,row):
self.FileNotFoundExceptionCounter=self.FileNotFoundExceptionCounter+1
if re.findall(self.NumberFormatException,row):
self.NumberFormatExceptionCounter=self.NumberFormatExceptionCounter+1
if re.findall(self.StackOverflowError,row):
self.StackOverflowErrorCounter=self.StackOverflowErrorCounter+1
if re.findall(self.OutOfMemoryError,row):
self.OutOfMemoryErrorCounter=self.OutOfMemoryErrorCounter+1
# 装载测试报告的三维数组
self.currentData.append([self.NullPointerException,self.NullPointerExceptionCounter,"空指针异常"])
self.currentData.append([self.ArrayIndexOutOfBoundsException,self.ArrayIndexOutOfBoundsExceptionCounter,"数组溢出"])
self.currentData.append([self.ClassNotFoundException,self.ClassNotFoundExceptionCounter,"类不存在"])
self.currentData.append([self.ArithmeticException,self.ArithmeticExceptionCounter,"数学运算异常"])
self.currentData.append([self.IllegalArgumentException,self.IllegalArgumentExceptionCounter,"方法参数异常"])
self.currentData.append([self.FileNotFoundException,self.FileNotFoundExceptionCounter,"文件未找到"])
self.currentData.append([self.NumberFormatException,self.NumberFormatExceptionCounter,"数值转化异常"])
self.currentData.append([self.StackOverflowError,self.StackOverflowErrorCounter,"堆栈异常错误"])
self.currentData.append([self.OutOfMemoryError,self.OutOfMemoryErrorCounter,"内存溢出错误"])
return self.currentData
if __name__ == "__main__":
reportUtil = ReportUtil("E:\\pythonWorkSpace\\androidSourceMonitor\\ouput\\2017-05-17-10-49-19.txt")
print(reportUtil.analysisLog(True))
|
# Generated by Django 2.2.6 on 2021-04-09 10:35
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.AlterModelOptions(
name='newsarticle',
options={'permissions': (('add_article', 'Can publish articles'),)},
),
]
|
from datetime import datetime
from rest_framework import serializers
# from rest_framework_cache.serializers import CachedSerializerMixin
# from rest_framework_cache.registry import cache_registry
from .models import Task
class TaskSerializer(serializers.ModelSerializer):
owner = serializers.StringRelatedField()
class Meta:
model = Task
fields = ('id', 'name', 'content', 'finished', 'start', 'owner', )
read_only_fields = ('owner', 'id',)
# cache_registry.register(TaskSerializer)
class TaskCreateSerializer(serializers.ModelSerializer):
owner = serializers.StringRelatedField()
class Meta:
model = Task
fields = ('name', 'content', 'start', 'owner', 'finished')
read_only_fields = ('owner', 'finished',)
def validate(self, attrs):
if attrs['start'].replace(tzinfo=None) < datetime.now():
raise serializers.ValidationError('Bad date of start task', 400)
else:
return attrs
|
# -*- coding: utf-8 -*-
"""
Created on Sat Jul 15 11:26:51 2017
@author: Administrator
"""
# Import modules
from OCC.gp import *
from OCC.GC import *
from OCC.BRep import *
from OCC.BRepAlgoAPI import *
from OCC.BRepBuilderAPI import *
from OCC.BRepFilletAPI import *
from OCC.BRepPrimAPI import *
from OCC.TopAbs import *
from OCC.TopoDS import *
from OCC.TopExp import *
from OCC.TopLoc import *
# Define the PartA dimensions
aLength = 200.
aWidth = 15.
aHeight = 115.
# Define the PartB dimensions
bLength = 200.
bWidth = 50.
bHeight = 25.
# Define the PartC dimensions
cLength = 200.
cWidth = 10.
cHeight = 115.
# Creat the PartA
aPnt1 = gp_Pnt(-aWidth / 2.0, 0, 0)
aPnt2 = gp_Pnt(-aWidth / 2.0, -aLength / 2.0, 0)
aPnt3 = gp_Pnt(aWidth / 2.0, -aLength / 2.0, 0)
aPnt4 = gp_Pnt(aWidth / 2.0, 0, 0)
aSegment1 = GC_MakeSegment(aPnt1, aPnt2)
aSegment2 = GC_MakeSegment(aPnt2, aPnt3)
aSegment3 = GC_MakeSegment(aPnt3, aPnt4)
aEdge1 = BRepBuilderAPI_MakeEdge(aSegment1.Value()).Edge()
aEdge2 = BRepBuilderAPI_MakeEdge(aSegment2.Value()).Edge()
aEdge3 = BRepBuilderAPI_MakeEdge(aSegment3.Value()).Edge()
aWire = BRepBuilderAPI_MakeWire(aEdge1, aEdge2, aEdge3).Wire()
aOrigin = gp_Pnt(0, 0, 0)
xDir = gp_Dir(1, 0, 0)
xAxis = gp_Ax1(aOrigin, xDir)
aTrsf = gp_Trsf()
aTrsf.SetMirror(xAxis)
aMirroredShape = BRepBuilderAPI_Transform(aWire, aTrsf).Shape()
aMirroredWire = topods.Wire(aMirroredShape)
akWire = BRepBuilderAPI_MakeWire()
akWire.Add(aWire)
akWire.Add(aMirroredWire)
aWireProfile = akWire.Wire()
aFaceProfile = BRepBuilderAPI_MakeFace(aWireProfile).Face()
aPrismVec = gp_Vec(0, 0, aHeight)
aBody = BRepPrimAPI_MakePrism(aFaceProfile, aPrismVec).Shape()
aTrsf = gp_Trsf()
aTrsf.SetTranslationPart(gp_Vec(-bWidth+aWidth/2.0+cWidth/2.0,0,25))
alocation = TopLoc_Location(aTrsf)
aBody.Location (alocation)
# Creat the PartB dimensions
bPnt1 = gp_Pnt(-bWidth / 2.0, 0, 0)
bPnt2 = gp_Pnt(-bWidth / 2.0, -bLength / 2.0, 0)
bPnt3 = gp_Pnt(bWidth / 2.0, -bLength / 2.0, 0)
bPnt4 = gp_Pnt(bWidth / 2.0, 0, 0)
bSegment1 = GC_MakeSegment(bPnt1, bPnt2)
bSegment2 = GC_MakeSegment(bPnt2, bPnt3)
bSegment3 = GC_MakeSegment(bPnt3, bPnt4)
bEdge1 = BRepBuilderAPI_MakeEdge(bSegment1.Value()).Edge()
bEdge2 = BRepBuilderAPI_MakeEdge(bSegment2.Value()).Edge()
bEdge3 = BRepBuilderAPI_MakeEdge(bSegment3.Value()).Edge()
bWire = BRepBuilderAPI_MakeWire(bEdge1, bEdge2, bEdge3).Wire()
bOrigin = gp_Pnt(0, 0, 0)
bxDir = gp_Dir(1, 0, 0)
bAxis = gp_Ax1(bOrigin, bxDir)
bTrsf = gp_Trsf()
bTrsf.SetMirror(bAxis)
bMirroredShape = BRepBuilderAPI_Transform(bWire, bTrsf).Shape()
bMirroredWire = topods.Wire(bMirroredShape)
bkWire = BRepBuilderAPI_MakeWire()
bkWire.Add(bWire)
bkWire.Add(bMirroredWire)
bWireProfile = bkWire.Wire()
bFaceProfile = BRepBuilderAPI_MakeFace(bWireProfile).Face()
bPrismVec = gp_Vec(0, 0, bHeight)
bBody = BRepPrimAPI_MakePrism(bFaceProfile, bPrismVec).Shape()
bTrsf = gp_Trsf()
bTrsf.SetTranslationPart(gp_Vec(-bWidth/2.0+cWidth/2.0,0,0))
blocation = TopLoc_Location(bTrsf)
bBody.Location (blocation)
# Creat the PartC
cPnt1 = gp_Pnt(-cWidth / 2.0, 0, 0)
cPnt2 = gp_Pnt(-cWidth / 2.0, -cLength / 2.0, 0)
cPnt3 = gp_Pnt(cWidth / 2.0, -cLength / 2.0, 0)
cPnt4 = gp_Pnt(cWidth / 2.0, 0, 0)
cSegment1 = GC_MakeSegment(cPnt1, cPnt2)
cSegment2 = GC_MakeSegment(cPnt2, cPnt3)
cSegment3 = GC_MakeSegment(cPnt3, cPnt4)
cEdge1 = BRepBuilderAPI_MakeEdge(cSegment1.Value()).Edge()
cEdge2 = BRepBuilderAPI_MakeEdge(cSegment2.Value()).Edge()
cEdge3 = BRepBuilderAPI_MakeEdge(cSegment3.Value()).Edge()
cWire = BRepBuilderAPI_MakeWire(cEdge1, cEdge2, cEdge3).Wire()
cOrigin = gp_Pnt(0, 0, 0)
cxDir = gp_Dir(1, 0, 0)
cAxis = gp_Ax1(cOrigin, cxDir)
cTrsf = gp_Trsf()
cTrsf.SetMirror(cAxis)
cMirroredShape = BRepBuilderAPI_Transform(cWire, cTrsf).Shape()
cMirroredWire = topods.Wire(cMirroredShape)
ckWire = BRepBuilderAPI_MakeWire()
ckWire.Add(cWire)
ckWire.Add(cMirroredWire)
cWireProfile = ckWire.Wire()
cFaceProfile = BRepBuilderAPI_MakeFace(cWireProfile).Face()
cPrismVec = gp_Vec(0, 0, cHeight)
cBody = BRepPrimAPI_MakePrism(cFaceProfile, cPrismVec).Shape()
cCutBody = BRepPrimAPI_MakeBox(30,190,100).Shape()
cCutTrsf = gp_Trsf()
cCutTrsf.SetTranslationPart(gp_Vec(-15,-95,5))
clocation = TopLoc_Location(cCutTrsf)
cCutBody.Location (clocation)
cBody = BRepAlgoAPI_Cut(cBody, cCutBody).Shape()
cTrsf = gp_Trsf()
cTrsf.SetTranslationPart(gp_Vec(0,0,25))
clocation = TopLoc_Location(cTrsf)
cBody.Location (clocation)
# Combine A, B and C
myBody = BRepAlgoAPI_Fuse(aBody,bBody).Shape()
myBody = BRepAlgoAPI_Fuse(myBody,cBody).Shape()
# Cut inner part
transform = gp_Trsf()
aCut = BRepPrimAPI_MakeBox(aWidth-4.0, aLength-4.0, (aHeight+25.)-1.0).Shape()
transform.SetTranslationPart(gp_Vec(-aWidth*0.5-bWidth+aWidth/2.0+cWidth/2.0+2.0, -aLength/2.0+2.0, -1.0))
alocation = TopLoc_Location(transform)
aCut.Location (alocation)
transform = gp_Trsf()
bCut = BRepPrimAPI_MakeBox(bWidth-4.0, bLength-4.0, bHeight-1.0).Shape()
transform.SetTranslationPart(gp_Vec(-bWidth+6.5,-bLength*0.5+2.0,-1.0))
blocation = TopLoc_Location(transform)
bCut.Location (blocation)
transform = gp_Trsf()
cCut = BRepPrimAPI_MakeBox(12.0, 50.0, 10.0).Shape()
transform.SetTranslationPart(gp_Vec(-bWidth+4.5,-bLength*0.5+10.0,5.0))
clocation = TopLoc_Location(transform)
cCut.Location (clocation)
myCut = BRepAlgoAPI_Fuse(aCut, bCut).Shape()
myCut = BRepAlgoAPI_Fuse(myCut, cCut).Shape()
myBody = BRepAlgoAPI_Cut(myBody, myCut).Shape()
# Display the result in OCC viewer
from zeeko.occ.opencascade import *
occ, start_display = occviewer()
occ.Draw(myBody)
occ.FitAll()
# Export to STEP file
occexport('C:\Python27\myBody.step', [myBody])
|
# -*- coding: utf-8 -*-
"""
Created on Thu Aug 22 10:50:05 2019
Example for reading fire output
@author: Mika Peace
"""
import matplotlib
matplotlib.use('Agg')
import matplotlib as mpl
import time
from datetime import datetime, timedelta
import numpy as np
from matplotlib.backends.backend_pdf import PdfPages
import matplotlib.pyplot as plt
from netCDF4 import Dataset
from scipy import interpolate
from numpy import ma
with PdfPages('multipage_pdf.pdf') as pdf:
datadir = '/short/en0/hxy548/tmp/waroona/0p3/'
txr=range(0,780,5)
# txr=range(1000,1440,5)
for tx in txr:
t0=0
ncfile = Dataset(datadir+'f1_terrain_height.CSIRO_24h.nc', 'r')
lon = ncfile.variables['lon'][:]
lat = ncfile.variables['lat'][:]
terrain = ncfile.variables['terrain_height'][t0,:,:].transpose()
ncfile.close()
ncfile = Dataset(datadir+'sensible_heat.CSIRO_24h.20160107T0300Z.nc', 'r')
sheat = ncfile.variables['SHEAT_2'][tx,:,:].transpose()
time = ncfile.variables['time'][:]
ncfile.close()
ncfile = Dataset(datadir+'fire_speed.CSIRO_24h.20160107T0300Z.nc', 'r')
firespeed = ncfile.variables['fire_speed'][tx,:,:].transpose()
time = ncfile.variables['time'][:]
ncfile.close()
ncfile = Dataset(datadir+'firefront.CSIRO_24h.20160107T0300Z.nc', 'r')
ff = ncfile.variables['firefront'][tx,:,:].transpose()
time = ncfile.variables['time'][tx]
ncfile.close()
ncfile = Dataset(datadir+'10m_uwind.CSIRO_24h.20160107T0300Z.nc', 'r')
u = ncfile.variables['UWIND_2'][tx,:,:].transpose()
u1 = u[::8,::8]
ncfile.close()
ncfile = Dataset(datadir+'10m_vwind.CSIRO_24h.20160107T0300Z.nc', 'r')
v = ncfile.variables['VWIND_2'][tx,:,:].transpose()
v1 = v[::8,::8]
ncfile.close()
wspd=np.hypot(u,v)#wind speed
wdir=(270-(np.arctan2(v,u)*180/np.pi))%360
ts=datetime(2016,01,07,03,00,00)#starttime from netcdf file
mytime=(ts+timedelta(seconds=time))#time adds to deconds from .nc file
print(mytime)
lon1 = lon[::8]
lat1 = lat[::8]
lons,lats = np.meshgrid(lon,lat)
lons1,lats1 = np.meshgrid(lon1,lat1)
#fig = plt.figure()
plt.figure()
plt.clf()
plt.subplot(221,aspect=1.0)
clevs= np.linspace(-150,550,29,endpoint=True)#linspace seems to work better than arange in colorbar
cmaptr=plt.cm.get_cmap("terrain")
plt.contourf(lons,lats,terrain,clevs,cmap=cmaptr)
cb=plt.colorbar(ticks=clevs, fraction=0.045, pad=0.01)
cb.set_label(' ', size=20)
cb.ax.tick_params(labelsize=6)
plt.contour(lons,lats,ff,np.array([0.0]), colors='red')
##annotations
plt.annotate('Fire ignition', xy=(116.2,-32.9), xytext=(116.10,-32.92), fontsize=6)
plt.plot([116.17],[-32.89], 'ro', ms=3)
plt.annotate('Waroona', xy=(116.2,-32.8), xytext=(115.9,-32.82), fontsize=6)
plt.plot([115.93],[-32.84], 'ko', ms=3)
plt.annotate('Yarloop', xy=(116.1,-33.1), xytext=(115.85,-32.99), fontsize=6)
plt.plot([115.90],[-32.96], 'ko', ms=3)
##sets limits and increments on x and y axes
plt.xlim([115.6,116.2])# was [115.8,116.2]
plt.ylim([-33.1,-32.7])# was [-33.1,32.7]
lonlab = (np.arange(115.6,116.2,0.1)) #was [115.8,116.2,0.1]
latlab = (np.arange(-33.1,-32.7,0.1)) #was [33.1-32.7,0.1]
plt.xticks(lonlab,fontsize=6)
plt.yticks(latlab,fontsize=6)
plt.ticklabel_format(useOffset=False)#keeps the tick lables from switching to scientific format
plt.title('Topography and fire perimeter', fontsize=8)
plt.subplot(222,aspect=1.0)
clevs= np.linspace(0,5.5,12, endpoint=True)#linspace seems to work beter than arange in colorbar
cmap2=plt.cm.get_cmap("Oranges")
plt.contourf(lons,lats,np.log10(sheat+1),clevs,cmap=cmap2)
cb=plt.colorbar(ticks=clevs, fraction=0.045, pad=0.01)
cb.set_label(' ', size=20)
cb.ax.tick_params(labelsize=6)
cmap2=plt.cm.get_cmap("YlOrRd")
##annotations
plt.annotate('Fire ignition', xy=(116.2,-32.9), xytext=(116.10,-32.92), fontsize=6)
plt.plot([116.17],[-32.89], 'ro', ms=3)
plt.annotate('Waroona', xy=(116.2,-32.8), xytext=(115.9,-32.82), fontsize=6)
plt.plot([115.93],[-32.84], 'ko', ms=3)
plt.annotate('Yarloop', xy=(116.1,-33.1), xytext=(115.85,-32.99), fontsize=6)
plt.plot([115.90],[-32.96], 'ko', ms=3)
##sets limits and increments on x and y axes
plt.xlim([115.6,116.2])# was [115.8,116.2]
plt.ylim([-33.1,-32.7])# was [-33.1,32.7]
lonlab = (np.arange(115.6,116.2,0.1)) #was [115.8,116.2,0.1]
latlab = (np.arange(-33.1,-32.7,0.1)) #was [33.1-32.7,0.1]
plt.xticks(lonlab,fontsize=6)
plt.yticks(latlab,fontsize=6)
plt.ticklabel_format(useOffset=False)#keeps the tick lables from switching to scientific format
plt.title('log(Sens heat+1)', fontsize=8)
plt.subplot(223,aspect=1.0)
#contour levels for colorbar
clevs= np.linspace(0,20,11, endpoint=True)#linspace seems to work beter than arange in colorbar
cmap1=plt.cm.get_cmap("PuBuGn") # was "summer"
plt.contourf(lons,lats,wspd,clevs,cmap=cmap1)
cb=plt.colorbar(ticks=clevs, fraction=0.045, pad=0.01)
cb.set_label(' ', size=20)
cb.ax.tick_params(labelsize=6)
##annotations
plt.annotate('Fire ignition', xy=(116.2,-32.9), xytext=(116.10,-32.92), fontsize=6)
plt.plot([116.17],[-32.89], 'ro', ms=3)
plt.annotate('Waroona', xy=(116.2,-32.8), xytext=(115.9,-32.82), fontsize=6)
plt.plot([115.93],[-32.84], 'ko', ms=3)
plt.annotate('Yarloop', xy=(116.1,-33.1), xytext=(115.85,-32.99), fontsize=6)
plt.plot([115.90],[-32.96], 'ko', ms=3)
##sets limits and increments on x and y axes
plt.xlim([115.6,116.2])# was [115.8,116.2]
plt.ylim([-33.1,-32.7])# was [-33.1,32.7]
lonlab = (np.arange(115.6,116.2,0.1)) #was [115.8,116.2,0.1]
latlab = (np.arange(-33.1,-32.7,0.1)) #was [33.1-32.7,0.1]
plt.xticks(lonlab,fontsize=6)
plt.yticks(latlab,fontsize=6)
plt.ticklabel_format(useOffset=False)#keeps the tick lables from switching to scientific format
plt.title('Wind speed (m/s)', fontsize=8)
#
plt.subplot(224,aspect=1.0)
clevs= np.linspace(0,360,25, endpoint=True)#linspace seems to work beter than arange in colorbar
cmap2=plt.cm.get_cmap("rainbow")
plt.contourf(lons,lats,wdir,clevs,cmap=cmap2)
cb=plt.colorbar(ticks=clevs, fraction=0.045, pad=0.01)
cb.set_label(' ', size=20)
cb.ax.tick_params(labelsize=6)
plt.contour(lons,lats,ff,np.array([0.0]), colors='red')
#
##annotations
plt.annotate('Fire ignition', xy=(116.2,-32.9), xytext=(116.10,-32.92), fontsize=6)
plt.plot([116.17],[-32.89], 'ro', ms=3)
plt.annotate('Waroona', xy=(116.2,-32.8), xytext=(115.9,-32.82), fontsize=6)
plt.plot([115.93],[-32.84], 'ko', ms=3)
plt.annotate('Yarloop', xy=(116.1,-33.1), xytext=(115.85,-32.99), fontsize=6)
plt.plot([115.90],[-32.96], 'ko', ms=3)
##sets limits and increments on x and y axes
plt.xlim([115.6,116.2])# was [115.8,116.2]
plt.ylim([-33.1,-32.7])# was [-33.1,32.7]
lonlab = (np.arange(115.6,116.2,0.1)) #was [115.8,116.2,0.1]
latlab = (np.arange(-33.1,-32.7,0.1)) #was [33.1-32.7,0.1]
plt.xticks(lonlab,fontsize=6)
plt.yticks(latlab,fontsize=6)
plt.ticklabel_format(useOffset=False)#keeps the tick lables from switching to scientific format
plt.title('Wind direction (degN)', fontsize=8)
# #show plot
plt.suptitle(str(mytime))
# plt.savefig(str(mytime)+'wind.png')
plt.savefig(str(mytime)+'wind.png', dpi=600)
plt.close()
plt.cla()
|
from django import forms
from django.forms import PasswordInput
from .models import *
class UserForm(forms.ModelForm):
class Meta:
model= userInfo
fields = [
# 'firstName',
# 'lastName',
'email',
'password',
# 'age',
# 'address',
# 'phoneNumber'
]
widgets = {
# 'firstName': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'First Name', 'autofocus': 'autofocus'}),
# 'lastName': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Last Name'}),
'email': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Email'}),
'password': forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}),
# 'address': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Address'}),
# 'phoneNumber': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Phone Number'})
}
labels = {
# 'firstName': 'First Name',
# 'lastName': 'Last Name',
'email': 'Email',
'password': 'Password',
# 'image': 'Image',
# 'address': 'Address',
# 'phoneNumber': 'Phone Number'
}
class AppForm(forms.ModelForm):
class Meta:
model = addapps
fields = [
# 'firstName',
'app_id',
'appname',
'webaddress',
'user_id',
# 'age',
# 'address',
# 'phoneNumber'
]
widgets = {
# 'firstName': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'First Name', 'autofocus': 'autofocus'}),
'app_id': forms.HiddenInput(),
'appname': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'AppName'}),
'webaddress': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'WebAddress'}),
'user_id': forms.HiddenInput(),
# 'address': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Address'}),
# 'phoneNumber': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Phone Number'})
}
labels = {
# 'firstName': 'First Name',
# 'lastName': 'Last Name',
'appname': 'AppName',
'webaddress': 'WebAddress',
'user_id': 'UserId',
# 'image': 'Image',
# 'address': 'Address',
# 'phoneNumber': 'Phone Number'
}
class CompanyForm(forms.ModelForm):
class Meta:
model= company
fields = [
# 'firstName',
# 'lastName',
'company_id',
'email',
'password',
# 'age',
# 'address',
# 'phoneNumber'
]
widgets = {
# 'firstName': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'First Name', 'autofocus': 'autofocus'}),
# 'lastName': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Last Name'}),
'company_id': forms.HiddenInput(),
'email': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Email'}),
'password': forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}),
# 'address': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Address'}),
# 'phoneNumber': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Phone Number'})
}
labels = {
# 'firstName': 'First Name',
# 'lastName': 'Last Name',
'email': 'Email',
'password': 'Password',
# 'image': 'Image',
# 'address': 'Address',
# 'phoneNumber': 'Phone Number'
}
class NewAppForm(forms.ModelForm):
class Meta:
model = APP
fields = [
# 'firstName',
'app_id',
'appname',
'webaddress',
'company_id',
# 'age',
# 'address',
# 'phoneNumber'
]
widgets = {
# 'firstName': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'First Name', 'autofocus': 'autofocus'}),
'app_id': forms.HiddenInput(),
'appname': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'AppName'}),
'webaddress': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'WebAddress'}),
'company_id': forms.HiddenInput(),
# 'address': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Address'}),
# 'phoneNumber': forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Phone Number'})
}
labels = {
# 'firstName': 'First Name',
# 'lastName': 'Last Name',
'appname': 'AppName',
'webaddress': 'WebAddress',
'user_id': 'UserId',
# 'image': 'Image',
# 'address': 'Address',
# 'phoneNumber': 'Phone Number'
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.