text stringlengths 38 1.54M |
|---|
# -*- coding: utf-8 -*-
import hashlib
import os
import re
import subprocess # nosec: B404
from typing import TYPE_CHECKING
import requests
# with open(os.path.expanduser('~/GitHub/poseur/poseur.py'), 'r') as file:
# for line in file:
# match = re.match(r"^__version__ = '(.*)'", line)
# if match is None:
# continue
# VERSION = match.groups()[0]
# break
# # print(VERSION)
if TYPE_CHECKING:
VERSION: str
subprocess.check_call(['pip', 'install', 'poseur']) # nosec: B603 B607
for line in subprocess.check_output(['pip', 'freeze']).decode().splitlines(): # nosec: B603 B607
match = re.match(r"poseur==(.*)", line, re.IGNORECASE)
if match is not None:
VERSION = match.groups()[0]
POSEUR_URL = f'https://github.com/pybpc/poseur/archive/v{VERSION}.tar.gz'
POSEUR_SHA = hashlib.sha256(requests.get(POSEUR_URL).content).hexdigest()
# print(POSEUR_URL)
# print(POSEUR_SHA)
PARSO = subprocess.check_output(['poet', 'parso']).decode().strip() # nosec: B603 B607
TBTRIM = subprocess.check_output(['poet', 'tbtrim']).decode().strip() # nosec: B603 B607
BPC_UTILS = subprocess.check_output(['poet', 'bpc-utils']).decode().strip() # nosec: B603 B607
# print(PARSO)
# print(TBTRIM)
match = re.match(r'([0-9.]+)\.post([0-9])', VERSION)
if match is None:
POSEUR = (f'url "{POSEUR_URL}"\n'
f' sha256 "{POSEUR_SHA}"')
else:
version, subversion = match.groups()
revision = chr(96 + int(subversion)) # ord('a') -> 97
POSEUR = (f'url "{POSEUR_URL}"\n'
f' version "{version}{revision}"\n'
f' sha256 "{POSEUR_SHA}"')
FORMULA = f'''\
class Poseur < Formula
include Language::Python::Virtualenv
desc "Backport compiler for Python 3.8 positional-only parameters syntax"
homepage "https://github.com/pybpc/poseur#poseur"
{POSEUR}
head "https://github.com/pybpc/poseur.git", branch: "master"
depends_on "homebrew/core/python@3.9"
{BPC_UTILS}
{PARSO}
{TBTRIM}
def install
rm "setup.py"
cp "scripts/setup.pypi.py", "setup.py"
virtualenv_install_with_resources
man1.install "share/poseur.1"
bash_completion.install "share/poseur.bash-completion"
end
test do
(testpath/"test.py").write <<~EOS
def func_g(p_a, p_b=lambda p_a=1, /: p_a):
pass
EOS
std_output = <<~EOS
def _poseur_decorator(*poseur):
"""Positional-only arguments runtime checker.
Args:
*poseur (List[str]): Name of positional-only arguments.
See Also:
https://mail.python.org/pipermail/python-ideas/2017-February/044888.html
"""
import functools
def caller(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
for poseur_args in poseur:
if poseur_args in kwargs:
raise TypeError('%s() got an unexpected keyword argument %r' % (func.__name__, poseur_args))
return func(*args, **kwargs)
return wrapper
return caller
def func_g(p_a, p_b=_poseur_decorator('p_a')(lambda p_a=1: p_a)):
pass
EOS
system bin/"poseur", "--no-archive", "test.py"
assert_match std_output, shell_output("cat test.py")
end
end
'''
if os.path.basename(__file__) == 'setup-formula.py':
repo_root = subprocess.check_output(['brew', '--repository', 'jarryshaw/tap'], # nosec: B603 B607
encoding='utf-8').strip()
formula = os.path.join(repo_root, 'Formula', 'poseur.rb')
else:
formula = os.path.join(os.path.dirname(os.path.realpath(__file__)), '..', '..', 'Formula',
f'{os.path.splitext(os.path.basename(__file__))[0]}.rb')
with open(formula, 'w', encoding='utf-8') as file:
file.write(FORMULA)
subprocess.check_call(['pip-autoremove', '-y', 'poseur']) # nosec: B603 B607
|
"""15-1. Cubes: A number raised to the third power is a cube. Plot the first five
cubic numbers, and then plot the first 5000 cubic numbers.
15-2. Colored Cubes: Apply a colormap to your cubes plot."""
import matplotlib.pyplot as plt
x_values = range(1, 5001)
y_values = [x**3 for x in x_values]
plt.style.use('seaborn')
fig, ax = plt.subplots()
ax.scatter(x_values, y_values, c=y_values, cmap=plt.cm.Blues, s=3)
# Set title and labels
ax.set_title('Cubic Numbers', fontsize=14)
ax.set_xlabel('Values', fontsize=14)
ax.set_ylabel('Cubic Values', fontsize=14)
# Set range for each axis
ax.axis([0, 5100, 0, 140_000_000_000])
plt.show()
|
#!/usr/bin/python3
import fabric
def do_pack():
from fabric.operations import run, sudo, local, get, put, prompt, reboot
import os
import datetime
if ((os.path.exists('./versions') is True) and (os.path.isfile
('./versions') is False)):
pass
elif ((os.path.exists('./versions') is True) and (os.path.isfile
('./versions') is True)):
local("rm versions && mkdir versions")
else:
local("mkdir versions")
current_date = datetime.datetime.now()
file_name = current_date.strftime("web_static_%Y%-m%d%H%M%S.tgz")
command = "tar -cavf " + file_name + " web_static"
move_file = "mv " + file_name + " versions"
local(command)
local(move_file)
if (os.path.exists('./versions/' + file_name) is True):
return (os.path.abspath('./versions/' + file_name))
else:
return None
|
from clean_read_data import read_data
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pprint import pprint
import os
## Imports for Tweet Text Cleaning Pipeline
import preprocessor as p
import re
import nltk
from nltk.stem.wordnet import WordNetLemmatizer
from nltk.stem.snowball import SnowballStemmer
import string
### WordCloud
from wordcloud import WordCloud
# LDA Implementation by Gensim
import gensim
import gensim.corpora as corpora
from gensim.models import CoherenceModel
# LDA visualizations
import pyLDAvis.gensim
import warnings
warnings.filterwarnings("ignore",category=DeprecationWarning)
####################################################
#####DATA CLEANING AND HANDLING#####################
####################################################
############################################################
############### LDA MODEL ##################################
############################################################
def lda_model(bow):
'''
INPUT:
LST: bag of words (output of read_data.py)
OUTPUT:
'''
# Create Dictionary
# <gensim.corpora.dictionary.Dictionary at 0x1a550e2518>
id2word = corpora.Dictionary(bow)
# Create Corpus
texts = bow
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
# LDA Model
lda_model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=7,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
### Computing Coherence Score for the Model ###
coherence_model_lda = CoherenceModel(model=lda_model, texts=bow, dictionary=id2word, coherence='c_v')
coherence_lda = coherence_model_lda.get_coherence()
print('\nPerplexity: ', lda_model.log_perplexity(corpus)) # a measure of how good the model is. lower the better.
print('\nCoherence Score: ', coherence_lda)
return (pprint(lda_model.print_topics())),
##################################################################################################
############## Computing Coherence Values to determine optimal numebr of topics ##################
##################################################################################################
def compute_coherence_values(dictionary, corpus, texts, limit = 10, start=2, step=2):
"""
Compute c_v coherence for various number of topics
Parameters:
----------
dictionary : Gensim dictionary
corpus : Gensim corpus
texts : List of input texts
limit : Max num of topics
Returns:
-------
model_list : List of LDA topic models
coherence_values : Coherence values corresponding to the LDA model with respective number of topics
"""
coherence_values = []
model_list = []
for num_topics in range(start, limit, step):
model = gensim.models.ldamodel.LdaModel(corpus=corpus,
id2word=id2word,
num_topics=num_topics,
random_state=100,
update_every=1,
chunksize=100,
passes=10,
alpha='auto',
per_word_topics=True)
model_list.append(model)
coherencemodel = CoherenceModel(model=model, texts=texts, dictionary=dictionary, coherence='c_v')
coherence_values.append(coherencemodel.get_coherence())
return model_list, coherence_values
def plot_coherent_scores(bow, limit =30, start = 1, step = 1, sv = False ):
id2word = corpora.Dictionary(bow)
# Create Corpus
texts = bow
# Term Document Frequency
corpus = [id2word.doc2bow(text) for text in texts]
x = range(start, limit, step)
plt.figure(figsize=(10,4))
sns.set_style("darkgrid")
coherence_values = compute_coherence_values(id2word, corpus, texts, limit, start=2, step=3)[1]
plt.plot(x, coherence_values, c = 'limegreen')
plt.xlabel("Num Topics", fontsize = 16)
plt.ylabel("Coherence score", fontsize = 16)
plt.xticks((5, 7, 10, 15, 20, 25, 30), fontsize = 15)
plt.yticks(fontsize = 15)
plt.grid(c = 'lightcyan')
plt.axvline(x=7, c = 'darkslateblue', linewidth = 3.0)
plt.title('Coherence Scores to Determine Optimal No. of Topics ', fontsize = 18)
if sv == True:
plt.savefig('coherence_scores.png')
plt.show()
def plot_word_cloud(cleaned_docs, sv = False):
all_words = ' '.join([text for text in cleaned_docs])
wordcloud = WordCloud(width=800, height=500, random_state=21, max_font_size=200, stopwords = ['hey', 'lol', '[][]', 'cc', 'anyon', 'say', 'etc']).generate(all_words)
plt.figure(figsize=(16, 16))
plt.imshow(wordcloud, interpolation="bilinear", cmap = 'inferno')
plt.axis('off')
plt.title('Unfiltered Words')
if sv == True:
plt.savefig('word_cloud.png')
plt.show()
######################################################################
############### CLEAN TWEET TEXT PIPELINE ############################
######################################################################
def clean_tweet_text(text_column):
'''
INPUT:
Pandas dataframe column w/tweets text:
OUTPUT:
Cleaned Docs, bag of words (bow)
symbol_set charachters removed,
specified stop words removed
punctuation removed
words stemmed and lemmatized
non-english words removed
words <3 chars removed
'''
punct = set(string.punctuation)
punct.remove('@')
punct.remove('#')
punct.add('🇺🇸')
punct.add('🇯🇵')
punct.add('🇰🇷')
punct.add('🇩🇪')
punct.add('🇨🇳')
punct.add('🇫🇷')
punct.add('🇪🇸')
punct.add('🇮🇹')
punct.add('🇷🇺')
punct.add('🇬🇧')
punct.add('🤗')
### English stop words from NLTK
stop_words = set(nltk.corpus.stopwords.words('english'))
### Custom stop words.
added_stop_words = {'rt', 'via', 'new', 'time', 'today', 'one', 'say', 'get', 'go', 'im', 'know', 'need', 'made', 'https', 'http', 'that', 'would', 'take', 'your', 'two', 'yes', 'back', 'look', 'see', 'amp', 'tell', 'give', 'httpst', 'htt', 'use', 'dont', 'thing', 'man', 'thank', 'lol', 'cc', 'didnt', 'hey', 'like', 'ask', 'let', 'even', 'also', 'ok', 'etc', 'thank', 'ive', 'hi', 'wasnt'}
### stemmer and lemmer
lemmer = WordNetLemmatizer()
stemmer = SnowballStemmer('english')
# converting from pd to list
corpus = text_column.values.tolist()
#Removing all HTTPs
docs_no_http = [ re.sub(r'https?:\/\/.*\/\w*', '', doc) for doc in corpus ]
#First ---> tokenize docs
tokenized_docs = [doc.split() for doc in docs_no_http]
# Lower case words in doc
tokenized_docs_lowered = [[word.lower() for word in doc]
for doc in tokenized_docs]
# Removing punctuation from docs
docs_no_punct = [[remove_symbols(word, punct) for word in doc]
for doc in tokenized_docs_lowered]
# Removing added stop words
docs_no_stops1 = [[word for word in doc if word not in added_stop_words]
for doc in docs_no_punct]
# Removing nltkstop words
docs_no_stops = [[word for word in doc if word not in stop_words ]
for doc in docs_no_stops1]
# Lemmatizing words in docs
docs_lemmatized = [[lemmer.lemmatize(word) for word in doc]
for doc in docs_no_stops]
# Stemming words in docs
docs_stemmed = [[stemmer.stem(word) for word in doc]
for doc in docs_lemmatized]
# Removes mentions, emotions, hashtags and emojies
docs_no_mentions = [preprocessing_text(' '.join(doc)) for doc in docs_stemmed]
# Removes all non-english charachters and any other different charachters
docs_english_only = [re.sub(r'[^a-zA-Z]', " ", doc) for doc in docs_no_mentions]
# keeping words that are more than 2 chars long
cleaned_docs = []
for doc in docs_english_only:
cleaned_docs.append(' '.join(word for word in doc.split() if len(word)>2))
# converting cleaned docs into list of lists i.e bag of words (bow) per each doc
bow = [list(tweet.split(' ')) for tweet in cleaned_docs]
return cleaned_docs, bow
def preprocessing_text(text):
'''
INPUT: str
OUTPUT: str w/ emojies, urls, hashtags and mentions removed
'''
import preprocessor as p
p.set_options(p.OPT.EMOJI, p.OPT.URL, p.OPT.HASHTAG, p.OPT.MENTION, p.OPT.NUMBER,
p.OPT.RESERVED, p.OPT.SMILEY)
clean_text = p.clean(text)
return clean_text
def remove_symbols(word, symbol_set):
'''
INPUT: STR, symbol set (i.e. punctuation)
OUTPUT:word w/ removed symbols from specified symbol set
'''
return ''.join(char for char in word
if char not in symbol_set)
######################################################################
##### Main function with lots of commented out code for workflow:#####
######################################################################
if __name__ == "__main__":
CURRENT_DIR = os.path.dirname('~/galvanize/Emotion_Detection_Elections/data')
file_path = os.path.join(CURRENT_DIR, 'data/russia_201901_1_tweets_csv_hashed.csv')
print('Read Data:')
users_df = read_data(file_path, kind = 'tweets', aggregated_by = 'users')
print(users_df.head(5))
print('Cleaned Data: ')
text_column = users_df['tweet_text']
print('Ready to Clean the Tweets!')
cleaned_docs, bow = clean_tweet_text(text_column)
print('The Tweets are Clean: ')
print('Model is ready! \nBelow you can see 7 topics and most frequent keywords: ')
lda_model(bow)
plot_word_cloud(cleaned_docs, sv = False)
print('Coherence Score Metric to decide on optimal number of topics: ')
plot_coherent_scores(bow, limit =30, start = 1, step = 1, sv = False )
|
from tkinter import *
import tkinter.font as tkFont
import time, socket, threading, json
tcp_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
tcp_socket.bind(('', 8080))
tcp_socket.listen(5)
def send_Message(new_socket, inputtext, receive_text, user_name):
mess = inputtext.get('1.0', END)
message = [user_name, mess]
message = json.dumps(message)
new_socket.send(message.encode('utf8'))
theTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
receive_text.insert(END, user_name + theTime + '说:\n')
receive_text.insert(END, '' + mess + '')
inputtext.delete(0.0, END)
def recv_Message(new_socket, receive_text, message, user_name):
theTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
receive_text.insert(END, message[0] + theTime + '说:\n')
receive_text.insert(END, message[1])
while True:
try:
mess = new_socket.recv(1024).decode('utf8')
print(mess)
if mess:
res = json.loads(mess)
theTime = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
receive_text.insert(END, res[0] + theTime + '说:\n')
receive_text.insert(END, res[1])
else:
new_socket.close()
except:
tcp_server(user_name)
def close(tk):
tk.destroy()
def tcp_server(user_name):
while True:
try:
new_socket, adder = tcp_socket.accept()
message = new_socket.recv(1024).decode('utf8')
message = json.loads(message)
if message:
threading.Thread(target=tcp_gui, args=(new_socket, message, user_name)).start()
except:
print('取消')
def tcp_gui(new_socket, message, user_name):
tk = Tk()
tk.title(user_name)
tk.geometry('800x600')
f = Frame(tk)
f1 = Frame(tk)
f2 = Frame(tk)
f3 = Frame(tk)
# 显示消息的滚动条
receive_scr = Scrollbar(f)
receive_scr.pack(side=RIGHT)
ft = tkFont.Font(family='Fixdsys', size=11)
receive_text = Listbox(f, width=70, height=18, font=ft)
receive_text['yscrollcommand'] = receive_scr.set
receive_text.pack(expand=1, fill=BOTH)
receive_scr['command'] = receive_text.yview()
f.pack(expand=1, fill=BOTH)
# 发送消息的滚动条
lab = Label(f1, text='asdf', height=2)
lab.pack(fill=BOTH)
f1.pack(expand=1, fill=BOTH)
input_Scrollbar = Scrollbar(f2)
input_Scrollbar.pack(side=RIGHT, fill=Y)
ft1 = tkFont.Font(family='Fixdsys', size=11)
inputtext = Text(f2, width=70, height=8, font=ft1)
inputtext['yscrollcommand'] = input_Scrollbar
inputtext.pack(expand=1, fill=BOTH)
input_Scrollbar['command'] = receive_text.yview()
f2.pack(expand=1, fill=BOTH)
sendButton = Button(f3, text='发送', width=10,
command=lambda: send_Message(new_socket, inputtext, receive_text, user_name))
sendButton.pack(expand=1, side=BOTTOM and RIGHT, padx=15, pady=8)
sendButton = Button(f3, text='关闭', width=10, command=lambda: close)
sendButton.pack(expand=1, side=RIGHT, padx=15, pady=8)
f3.pack(expand=1, fill=BOTH)
t1 = threading.Thread(target=recv_Message, args=(new_socket, receive_text, message, user_name))
t1.start()
tk.mainloop()
if __name__ == '__main__':
tcp_server('hhhh')
|
from base.mobileApp.lanxi.patientBase import patientBase
from Utils.appium_config import DriverClient as DC
import unittest
from Utils.public_action import skip_dependon
from Utils.public_action import pub_action
class patientList(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.driver = DC().getDriver()
# @classmethod
# def tearDownClass(cls):
# cls.driver.quit()
@skip_dependon(depend="test_login")
@unittest.skip("skip it")
def test_01_add(self):
try:
patient = patientBase()
patient.add_patient()
message = "//*[contains(@text,'添加成功')]"
toast = pub_action().get_toast(message, self.driver)
print("获取到的toast信息为:{}".format(toast))
self.assertEquals("添加成功", toast)
# self.assertEqual('添加成功', toast)
# print("用例是否成功:{}".format(self.assertEquals('添加成功', toast)))
except Exception as e:
raise e
finally:
patientBase().back_home()
@skip_dependon(depend="test_login")
# @unittest.skip("skip it")
def test_02_mod(self):
try:
patient = patientBase()
patient.mod_patient()
message = "//*[@text='编辑成功']"
toast = pub_action().get_toast(message, self.driver)
print("获取到的toast信息为:{}".format(toast))
self.assertEquals('编辑成功', toast)
except Exception as e:
raise e
finally:
patientBase().back_home()
@skip_dependon(depend="test_login")
# @unittest.skip("skip it")
def test_03_dele(self):
try:
patient = patientBase()
patient.dele_patient()
message = "//*[@text='删除就诊人成功']"
toast = pub_action().get_toast(message, self.driver)
print("获取到的toast信息为:{}".format(toast))
self.assertEquals('删除就诊人成功', toast)
except Exception as e:
raise e
finally:
patientBase().back_home()
|
def sayHello():
print("你好,我是sayhello");
if __name__=="__main__":
print(__name__);
print("小明开发的模块");
sayHello(); |
# -*- coding: utf-8 -*-
import feedparser
import re
import json
def convertTime(time):
time = time.split(":")
amOrPM = "AM"
if int(time[0]) > 12:
time[0] = str(int(time[0]) - 12)
amOrPM = "PM"
return time[0]+":"+time[1]+ " "+amOrPM
rssFeed = feedparser.parse('https://olemisssports.com/calendar.ashx/calendar.rss')
data = {}
data["Sports"] = []
for item in rssFeed.entries:
startdate = item.s_localstartdate
date = startdate[5:7]+"/"+startdate[8:10]+"/"+startdate[2:4]
if len(startdate) == 10:
time = "TBA"
else:
time = startdate[11:16]
time = convertTime(time)
idStr = item.id[43:]
locationWhere = item.ev_location
if locationWhere == "":
locationWhere = 'TBA'
summary = item.summary.split('\\n')
summaryReturn = ""
if len(summary) >= 3:
if summary[1][:1] == 'W' or summary[1][:1] == 'L':
summary[0] = summary[0][4:]
summary[1] = summary[1][:1]+","+summary[1][1:]
summaryReturn = summary[1]
if summaryReturn == "":
summaryReturn = time
event = re.split(" at | vs ",summary[0])
print(idStr + " " +date+" "+event[1]+" "+ locationWhere + " " + event[0] + " "+summaryReturn)
item = {'eventId': int(idStr), \
'Date': date, \
"SportType": event[0] , \
"Event": event[1], \
"Summary": summaryReturn, \
"LocationWhere": locationWhere }
item = {"Item": item}
data["Sports"].append(item)
with open('sports.json', 'w') as f:
json.dump(data, f, ensure_ascii=False)
|
from django.db import models
from django.utils import timezone
#from ..models import Loi
from ..models import Competence, Charge
from ..fonctions_base import *
class Maison(models.Model):
#user = models.ForeignKey(auth.User)
active = models.BooleanField(default=True)
priorite = models.SmallIntegerField(default=9)
nom = models.CharField(max_length=40, unique=True)
description = models.TextField(default='', blank=True, null=True)
background = models.TextField(default='', blank=True, null=True)
nom_info = models.CharField(max_length=20, blank=True, default='')
institutions = models.TextField(blank=True, default='')
dieu = models.CharField(max_length=40, blank=True, default='')
embleme = models.CharField(max_length=40, blank=True, default='')
suzerain = models.CharField(max_length=40, blank=True, default='')
senateur = models.ForeignKey('Perso', blank=True, null=True, on_delete=models.SET_NULL, related_name=('senateur'))
bonus_competence_categorie1 = models.ForeignKey('Categorie_competence', blank=True, null=True, on_delete=models.SET_NULL, related_name=('bonus_competence_categorie1'))
bonus_competence1 = models.ForeignKey('Competence', blank=True, null=True, on_delete=models.SET_NULL, related_name=('bonus_competence1'))
bonus_competence_categorie2 = models.ForeignKey('Categorie_competence', blank=True, null=True, on_delete=models.SET_NULL, related_name=('bonus_competence_categorie2'))
bonus_competence2 = models.ForeignKey('Competence', blank=True, null=True, on_delete=models.SET_NULL, related_name=('bonus_competence2'))
prestige = models.SmallIntegerField(default=100)
pct_prestige = models.SmallIntegerField(default=0, verbose_name='pct_prestige : valeur temporaire et auto')
influence = models.SmallIntegerField(default=0, verbose_name='influence : valeur temporaire et auto')
nb_voix_senat = models.SmallIntegerField(default=0, verbose_name='nb_voix_senat : valeur temporaire et auto')
def __str__(self):
return self.nom
def save(self):
#self.influence = self.get_influence()
#self.nb_voix_senat = self.get_pct_influence()
'''
# sauvegarde des lois en cours pour les mettre a jour
qst_lois_encours = Loi.objects.filter(active=True)
for loi_encours in qst_lois_encours :
if objet_ds_manytomany(self,loi_encours.maison_a_vote):
loi_encours.save()
'''
#senateur
charge_senateur = Charge.objects.get(nom_info='senateur')
if self.senateur :
qst_senateurs = charge_senateur.perso_charge.filter(maison=self).exclude(id=self.senateur.id).all()
for senateur in qst_senateurs :
senateur.charge = None
senateur.save()
if not self.senateur.charge == charge_senateur :
self.senateur.charge = charge_senateur
self.senateur.save()
else :
qst_senateurs = charge_senateur.perso_charge.filter(maison=self).all()
for senateur in qst_senateurs :
print(senateur)
senateur.charge = None
senateur.save()
super().save() # Call the "real" save() method.
def get_pct_prestige(self):
resultat = 0
if self.get_OK_pr_senat() :
prestige_total = 0
qst_all_maison = Maison.objects.all()
for maison in qst_all_maison:
if maison.get_OK_pr_senat():
prestige_total = prestige_total + maison.prestige
resultat = int(round(float(self.prestige*100/prestige_total)))
return resultat
def get_influence(self):
resultat = 0
if self.senateur and self.active :
bonus_aura = 0
bonus_charge = 0
comp_aura = Competence.objects.get(nom_info='aura')
lieu_senat = self.senateur.charge.lieu
qst_persos = self.persos_maison.filter(active=True).filter(hote__isnull=True).filter(lieu=lieu_senat).filter(PV__gt=0).all()
for perso in qst_persos :
bonus_aura = bonus_aura + perso.valeur_competence(comp_aura)
if perso.charge :
bonus_charge = bonus_charge + perso.charge.influence
resultat = self.get_pct_prestige() + bonus_aura + bonus_charge
return resultat
def get_pct_influence(self):
resultat = 0
if self.get_OK_pr_senat() :
influence_total = 0
qst_all_maison = Maison.objects.all()
for maison in qst_all_maison:
if maison.get_OK_pr_senat():
influence_total = influence_total + maison.get_influence()
resultat = int(round(float(self.get_influence()*100/influence_total)))
return resultat
def get_OK_pr_senat(self):
r = True
if not self.active : r=False
if not self.senateur : r= False
return r
|
from .base import ScalarVariable
from .global_global_step import global_step
from .global_global_step import get_value as current_step
from .global_keep_prob import keep_prob
from .global_batch_size import batch_size
from .utils import create_global_scalars
|
from django.test.simple import *
import os
def run_tests(test_labels, verbosity=1, interactive=True, extra_tests=[]):
"""
worsk exactly as per normal test
but only creates the test_db if it doesn't yet exist
and does not destroy it when done
tables are flushed and fixtures loaded between tests as per usual
but if your schema has not changed then this saves significant amounts of time
and speeds up the test cycle
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
setup_test_environment()
settings.DEBUG = False
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
for test in extra_tests:
suite.addTest(test)
suite = reorder_suite(suite, (TestCase,))
###Everything up to here is from django.test.simple
from django.db.backends import creation
from django.db import connections, DatabaseError
old_name = {}
for alias in connections:
connection = connections[alias]
old_name[alias] = settings.DATABASES[alias]['NAME']
if settings.DATABASES[alias]['TEST_NAME']:
settings.DATABASES[alias]['NAME'] = settings.DATABASES[alias]['TEST_NAME']
else:
settings.DATABASES[alias]['NAME'] = creation.TEST_DATABASE_PREFIX + settings.DATABASES[alias]['NAME']
connection.settings_dict["DATABASE_NAME"] = settings.DATABASES[alias]['NAME']
# does test db exist already ?
try:
if settings.DATABASES[alias]['ENGINE'] == 'sqlite3':
if not os.path.exists(settings.DATABASES[alias]['NAME']):
raise DatabaseError
connection.cursor()
except Exception:
print 'database %s does not exist. creating...' % alias
# db does not exist
# juggling ! create_test_db switches the DATABASE_NAME to the TEST_DATABASE_NAME
settings.DATABASES[alias]['NAME'] = old_name[alias]
connection.settings_dict["DATABASE_NAME"] = old_name[alias]
connection.creation.create_test_db(verbosity, autoclobber=True)
else:
connection.close()
settings.DATABASES[alias]['SUPPORTS_TRANSACTIONS'] = connection.creation._rollback_works()
result = unittest.TextTestRunner(verbosity=verbosity).run(suite)
for alias in settings.DATABASES:
#Since we don't call destory_test_db, we need to set the db name back.
settings.DATABASES[alias]['NAME'] = old_name[alias]
connection.settings_dict["DATABASE_NAME"] = old_name[alias]
teardown_test_environment()
return len(result.failures) + len(result.errors)
|
#!/usr/bin/env python3
"""
:Author: Anemone Xu
:Email: anemone95@qq.com
:copyright: (c) 2019 by Anemone Xu.
:license: Apache 2.0, see LICENSE for more details.
"""
import unittest
from tf import tf_lstm
class TestLSTMMethods(unittest.TestCase):
def test_load_json(self):
label_dict = tf_lstm.load_label('../data/label')
print(
tf_lstm.load_json(tf_lstm.simple_text_processing, '../data/slice/benchmark/slice-5888683.json', label_dict))
if __name__ == '__main__':
unittest.main()
|
from django.shortcuts import render
from Account.models import Users, ApiToken, UserDevice,PushNotification
from Account.serializers import UserSerializer,UserLoginSerializer
from rest_framework.response import Response
from rest_framework.views import APIView
class RegisterAPIView(APIView):
def post(self, request):
info = request.data['info']
create = UserSerializer(data=info)
user_exists = Users.objects.filter(email=info['email']).first()
if user_exists:
return Response({"Message": "User already exists"})
if create.is_valid():
create.save()
user = Users.objects.filter(email=info['email']).first()
ApiToken.objects.create(user=user)
token = ApiToken.objects.filter(user=user).first()
token.create_token()
token.save()
UserDevice.objects.create(user=user,device_id=request.data['device_id'])
PushNotification.objects.create(user=user,push_notification_id=request.data['push_notification_id'])
dic = {'user data': create.data,'token': token.api_token}
return Response(dic, status=200)
elif create.is_valid is False:
return Response({"error message": "Data not in required format"})
else:
return Response({"error message": "signup failed"})
class LoginAPIView(APIView):
def post(self, request):
info = request.data
valid_user = Users.objects.filter(email=info['email'], password=info['password']).first()
if valid_user is not None:
token = ApiToken.objects.filter(user=valid_user).first()
if token is None:
ApiToken.objects.create(user=valid_user)
token = ApiToken.objects.filter(user=valid_user).first()
token.create_token()
token.is_valid = True
token.save()
push_notification = PushNotification.objects.filter(user=valid_user).first()
push_notification.create_notification_token()
push_notification.save()
dic = {'user data': UserSerializer(instance=valid_user).data, 'token': token.api_token}
return Response(dic,status=200)
elif token.is_valid is True:
dic = {'user data': UserSerializer(instance=valid_user).data, 'token': token.api_token,"message": "already logged in"}
return Response(dic, status=200)
elif token.is_valid is False:
token.is_valid = True
token.save()
dic = {'user data': UserSerializer(instance=valid_user).data, 'token': token.api_token}
return Response(dic, status=200)
else:
return Response({"error message": "Login Failed. Incorrect email or password"}, status=400)
class LogOutAPIView(APIView):
def get(self,request):
token = request.query_params["TOKEN"]
obj = ApiToken.objects.filter(api_token=token).first()
if obj is None:
return Response({"message": "Invalid Token"}, status=400)
else:
obj.delete()
push_not = PushNotification.objects.filter(user=obj.user).first()
push_not.push_notification_token = None
return Response({"message": "successfully logged out"}, status=200)
|
# https://www.hackerrank.com/challenges/python-sort-sort/problem
n, m = map(int, input().split())
t = [tuple(map(int, input().split())) for _ in range(n)]
k = int(input())
for r in sorted(t, key=lambda x: x[k]):
print(*r) |
from flask import request,session,abort
from planner_project.common import api_response,custom_error
from planner_project.data_access import mysql
from planner_project.sql.backweb import user_sql,home_sql
def get_token():
token = request.cookies.get("token")
if token != None:
return token
raise custom_error.CustomFlaskErr(status_code=600,message="请先登录")
#获取当前登录用户 未登录返回None
def current_user():
user = session.get("user", None)
return user
#获取当前登录用户 如果未登录将终止请求并返回 600
def current_user_mush_login():
user = current_user()
if user!=None and any(user):
return user
#调用统一异常处理
raise custom_error.CustomFlaskErr(status_code=600,message="请先登录")
#更新当前用户缓存
def set_session_login():
token= get_token()
session["user"] = mysql.get_object(home_sql.select_sysuser_login_info, (token)) |
r"""
POSTORDER TRAVERSAL WITH PARENT LINK
Given the root to a binary tree, where nodes have a link to value, left, right, and parent, write a function that
prints an postorder traversal with O(1) space complexity. Without a link to the parent node, postorder traversal
requires O(h) space, where h is the height of the tree.
Consider the following binary tree:
3
/ \
1 5
/ \ /
0 2 4
Example:
Input = Node(3, Node(1, Node(0), Node(2)), Node(5, Node(4))) # or, the tree above
Output = 0 2 1 4 5 3
"""
# NOTE: O(1) space rules out all recursive solutions.
# Iterative Approach: If coming from parent and can go left, go left. Else if not coming from right and can go right,
# go right. Else, print, and go up.
# Time Complexity: O(n), where n is the number of nodes in the tree.
# Space Complexity: O(1).
def postorder_traversal_with_parent_link_iterative(root):
if root is not None:
prev = None
curr = root
while curr:
if curr.left and prev is curr.parent: # If coming from parent and can go left, go left.
prev = curr
curr = curr.left
elif curr.right and prev is not curr.right: # If not coming from right and can go right, go right.
prev = curr
curr = curr.right
else: # Else, print, and go up.
print(f" {curr.value}", end="")
prev = curr
curr = curr.parent
class Node:
def __init__(self, value, left=None, right=None, parent=None):
self.value = value
self.left = left
if self.left:
self.left.parent = self
self.right = right
if self.right:
self.right.parent = self
self.parent = parent
def __repr__(self):
return repr(self.value)
def postorder_traversal_recursive(root):
if root:
postorder_traversal_recursive(root.left)
postorder_traversal_recursive(root.right)
print(f" {root.value}", end="")
def display(node):
def _display(node):
if node.right is None and node.left is None: # No child.
return [str(node.value)], len(str(node.value)), 1, len(str(node.value)) // 2
if node.right is None: # Only left child.
lines, n, p, x = _display(node.left)
u = len(str(node.value))
first_line = (x + 1) * ' ' + (n - x - 1) * '_' + str(node.value)
second_line = x * ' ' + '/' + (n - x - 1 + u) * ' '
shifted_lines = [line + u * ' ' for line in lines]
return [first_line, second_line] + shifted_lines, n + u, p + 2, n + u // 2
if node.left is None: # Only right child.
lines, n, p, x = _display(node.right)
u = len(str(node.value))
first_line = str(node.value) + x * '_' + (n - x) * ' '
second_line = (u + x) * ' ' + '\\' + (n - x - 1) * ' '
shifted_lines = [u * ' ' + line for line in lines]
return [first_line, second_line] + shifted_lines, n + u, p + 2, u // 2
else: # Two children.
left, n, p, x = _display(node.left)
right, m, q, y = _display(node.right)
u = len(str(node.value))
first_line = (x + 1) * ' ' + (n - x - 1) * '_' + str(node.value) + y * '_' + (m - y) * ' '
second_line = x * ' ' + '/' + (n - x - 1 + u + y) * ' ' + '\\' + (m - y - 1) * ' '
if p < q:
left += [n * ' '] * (q - p)
elif q < p:
right += [m * ' '] * (p - q)
lines = [first_line, second_line] + [a + u * ' ' + b for a, b in zip(left, right)]
return lines, n + m + u, max(p, q) + 2, n + u // 2
if node:
lines, _, _, _ = _display(node)
for line in lines:
print(line)
trees = [Node(3, Node(1, Node(0), Node(2)), Node(5, Node(4))),
Node(1, Node(3, Node(4), Node(6, Node(5), Node(0))), Node(2)),
Node(3, Node(1, None, Node(2)), Node(5, Node(4))),
Node(3, Node(1, Node(0)), Node(5, None, Node(6))),
Node(3, Node(1, Node(0), Node(3)), Node(5, Node(3), Node(6))),
Node(4, Node(1, Node(0), Node(3)), Node(2)),
Node(4, Node(1, Node(0), Node(2)), Node(5, Node(3), Node(6))),
Node(0, Node(1, Node(2))),
Node(0, Node(1), Node(3, Node(2), Node(4))),
Node(26, Node(5, Node(-37, Node(-74, Node(-86), Node(-51)), Node(-7, Node(-17))),
Node(17, Node(11, Node(5)), Node(26, Node(18)))),
Node(74, Node(41, Node(34, Node(28)), Node(52, Node(47))),
Node(90, Node(88, Node(86)), Node(99, Node(95))))),
Node(27, Node(2, None, Node(17, Node(11, Node(5)), Node(26, Node(18)))),
Node(74, Node(41, Node(34, Node(28))),
Node(90, Node(88), Node(99, None, Node(105, None, Node(420)))))),
Node(27, Node(74, Node(90, Node(99, Node(105, Node(420))), Node(88)),
Node(41, None, Node(34, None, Node(28)))),
Node(2, Node(17, Node(26, None, Node(18)), Node(11, None, Node(5)))))]
fns = [postorder_traversal_with_parent_link_iterative]
for i, tree in enumerate(trees):
print(f"trees[{i}]:")
display(tree)
print(f"(recursive postorder traversal:", end="")
postorder_traversal_recursive(tree)
print(")\n")
for fn in fns:
print(f"{fn.__name__}(tree):", end="")
fn(tree)
print()
print()
|
# Below are the global variables
from datetime import datetime
from threading import Thread
from ibapi.client import *
from ibapi.wrapper import *
from message.chatbot import ChatBot
from traders.ma.raivo_trader import RaivoTrader
logging.basicConfig(format='%(asctime)s-%(levelname)s:%(message)s', level="WARN")
class IBWrapper(EWrapper):
def __init__(self, traders, secondaryTrader=None):
super().__init__()
self.traders = traders
self.nextorderId = None
tickId = 1
reqId = 1
for trader in traders:
trader.reqId = reqId
trader.tickDataId = tickId
reqId += 1
tickId += 1
if secondaryTrader is None:
trader.traderApp = self
else:
trader.traderApp = secondaryTrader
def nextValidId(self, orderId: int):
super().nextValidId(orderId)
self.nextorderId = orderId
logging.info('The next valid order id is: %s', self.nextorderId)
def orderStatus(self, orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId,
whyHeld, mktCapPrice):
super().orderStatus(orderId, status, filled, remaining, avgFillPrice, permId, parentId, lastFillPrice, clientId,
whyHeld, mktCapPrice)
logging.warning('Order status: %s. Id: %s. Filled: %s', status, orderId, filled)
def execDetails(self, reqId, contract, execution):
logging.warning('Order executed: %s %s %s', contract.symbol, str(execution.price), str(execution.shares))
for t in self.traders:
t.hasOrderUpdate(reqId, execution.price, execution.shares, execution)
def realtimeBar(self,
reqId: TickerId,
response_time: int, open_: float, high: float, low: float, close: float,
volume: int, wap: float, count: int):
super().realtimeBar(reqId, response_time, open_, high, low, close, volume, wap, count)
bar = RealTimeBar(response_time, -1, open_, high, low, close, volume, wap, count)
for t in self.traders:
if t.reqId is reqId:
t.newBar(bar)
break
def tickByTickMidPoint(self, reqId: int, time: int, midPoint: float):
super().tickByTickMidPoint(reqId, time, midPoint)
for t in self.traders:
if t.tickDataId is reqId:
t.newMidpoint(midPoint)
break
class IBClient(EClient):
def __init__(self, wrapper):
EClient.__init__(self, wrapper)
class IBApp(IBWrapper, IBClient):
def __init__(self, ipaddress, portid, traders=[], secondaryTrader=None):
IBWrapper.__init__(self, traders, secondaryTrader)
IBClient.__init__(self, wrapper=self)
self.connect(ipaddress, portid, 12)
thread = Thread(target=self.run)
thread.start()
setattr(self, "_thread", thread)
tradingApp = self
if secondaryTrader is not None:
tradingApp = secondaryTrader
while True:
if isinstance(tradingApp.nextorderId, int):
print('connected')
print()
break
else:
print('waiting for connection')
time.sleep(1)
if __name__ == '__main__':
chatbot = ChatBot()
twlo_trader = RaivoTrader('TWLO', units=100, chatbot=chatbot, last_5_day_max=290, last_5_day_min=270, delta=0.4)
fb_trader = RaivoTrader('FB', units=100, chatbot=chatbot, last_5_day_max=310, last_5_day_min=290, delta=0.4)
nvda_trader = RaivoTrader('NVDA', units=100, chatbot=chatbot, last_5_day_max=560, last_5_day_min=520, delta=0.4)
qcom_trader = RaivoTrader('QCOM', units=400, chatbot=chatbot, last_5_day_max=130, last_5_day_min=115, delta=0.4)
bmw_contract = Contract()
bmw_contract.symbol = 'BMW'
bmw_contract.secType = 'STK'
bmw_contract.exchange = 'IBIS'
bmw_contract.currency = 'EUR'
tradingHoursStart = datetime.now().replace(hour=10, minute=00, second=0)
tradingHoursEnd = datetime.now().replace(hour=19, minute=00, second=0)
bmw_trader = RaivoTrader(None,
contract=bmw_contract,
units=365,
chatbot=chatbot,
last_5_day_max=59.70,
last_5_day_min=57.31,
tradingHoursStart=tradingHoursStart,
tradingHoursEnd=tradingHoursEnd)
traderList = [
bmw_trader,
twlo_trader,
fb_trader,
# nvda_trader
]
ib = IBApp("127.0.0.1", 7400, traders=traderList)
time.sleep(5)
for t in traderList:
logging.warning("RealTimeBars requested for: %s", t.contract.symbol)
ib.reqTickByTickData(t.tickDataId, t.contract, "MidPoint", 0, False)
ib.reqRealTimeBars(t.reqId, t.contract, 30, "TRADES", False, [])
|
# encoding: utf-8
# Copyright 2013 maker
# License
from django.test.simple import DjangoTestSuiteRunner
from django.conf import settings
class CustomTestRunner(DjangoTestSuiteRunner):
"""
Custom DjangoTestSuiteRunner to remove Django modules from tests
"""
def __init__(self, *args, **kwargs):
super(CustomTestRunner, self).__init__(*args, **kwargs)
def run_tests(self, test_labels, **kwargs):
test_labels = [app[7:] for app in settings.INSTALLED_APPS if 'maker' in app and app.count('.')==1]
return super(CustomTestRunner, self).run_tests(test_labels, **kwargs) |
#ArgumentParser:変数名,型,初期値を記録するのに用いる
import argparse
def get_args():
#ArgumentParserを宣言し,説明としてdab-rを加える
parser = argparse.ArgumentParser(description='dib-r')
#4つの引数があり前から順に,変数名,型,初期値,この変数の説明(なくてもよい)
#ファイルリスト(ここではデータセット)の名前
parser.add_argument('--filelist', type=str, default='test_list.txt', help='filelist name')
#pytirchのDataLoaderを使う際に複数処理(いくつのコア)でデータをロードするかを指定
parser.add_argument('--thread', type=int, default=8, help='num of workers')
#実験用セーブフォルダー
parser.add_argument('--svfolder', type=str, default='prediction', help='save folder for experiments ')
#事前トレーニングデータ(どこにあるかわかっていない)
parser.add_argument('--g_model_dir', type=str, default='checkpoints/g_model.pth', help='save path for pretrained model')
#データセット
parser.add_argument('--data_folder', type=str, default='dataset', help='data folder')
#反復のスタート
parser.add_argument('--iter', type=int, default=-1, help='start iteration')
#損失の種類
parser.add_argument('--loss', type=str, default='iou', help='loss type')
#カメラモード(perが何かはわかっていない)
parser.add_argument('--camera', type=str, default='per',help='camera mode')
#カメラの数
parser.add_argument('--view', type=int, default=2,help='view number')
#画像サイズ
parser.add_argument('--img_dim', type=int, default=64,help='dim of image')
#チャンネル数(RGBAの4チャンネル)
parser.add_argument('--img_channels', type=int, default=4,help='image channels')
#バッチサイズ
parser.add_argument('--batch_size', type=int, default=64,help='batch size')
#エポック数
parser.add_argument('--epoch', type=int, default=1000,help='training epoch')
#ログごとの反復
parser.add_argument('--iter_log', type=int, default=50,help='iterations per log')
#サンプルごとの反復
parser.add_argument('--iter_sample', type=int, default=1000,help='iterations per sample')
#モデルの保存ごとの反復
parser.add_argument('--iter_model', type=int, default=10000,help='iterations per model saving')
#ハイパーパラメーター(silが何かはわかってない)
parser.add_argument('--sil_lambda', type=float, default=1,help='hyperparamter for sil')
args = parser.parse_args()
return args
|
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
def plot_iterations(data, net_width):
iterations = data.groupby('net_width').get_group(net_width)\
.groupby(['discretization', 'solver']).count()\
.unstack().iteration
axis = iterations.plot.bar(rot=0)
axis.set_ylabel("iterations")
axis.set_title(f"iterations by discretization and solver (h={net_width})")
def plot_error_over_iterations(data, net_width, discretization):
errors = data.groupby(['net_width', 'discretization'])\
.get_group((net_width, discretization))\
.pivot(index='iteration', columns='solver', values='error')
axis = errors.plot(logy=True)
axis.set_ylabel("relative error")
axis.set_title(f"relative error over iterations (h={net_width}, {discretization})")
def plot_run_time(data, net_width):
run_time = data.groupby('net_width').get_group(net_width)\
.groupby(['discretization', 'solver']).nth(0)\
.unstack().run_time
axis = run_time.plot.bar(rot=0)
axis.set_ylabel("run time (s)")
axis.set_title(f"run time (s) by discretization and solver (h={net_width})")
data = pd.read_csv("errors.csv", index_col=0)
plot_iterations(data, net_width=0.01)
plot_run_time(data, net_width=0.01)
plot_error_over_iterations(
data, net_width=0.01, discretization='HDG BDM 2')
plt.show()
|
# Shahriyar Mammadli
# Import required libraries
import pandas as pd
import helperFunctions as hf
from sklearn.model_selection import train_test_split
# Read the train and test data
trainDf = pd.read_csv('../Datasets/KaggleDigitRecognizer/train.csv')
predDf = pd.read_csv('../Datasets/KaggleDigitRecognizer/test.csv')
# Split train, test
# For the sake of simplicity, in this version, we skip validation skip
X_train, X_test, y_train, y_test = train_test_split(trainDf.drop(trainDf.columns[0], 1), trainDf[trainDf.columns[0]], test_size=0.1, random_state=123)
# Build a CNN model
CNNSubmitDf = hf.buildCNNModel(X_train, X_test, y_train, y_test, X_train.shape[0], X_test.shape[0], predDf)
CNNSubmitDf.to_csv("submissionCNN.csv", header=True, index=False)
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
DBUSER = 'lustri'
DBPASS = 'lustri'
DBHOST = 'postgres'
DBPORT = '5432'
DBNAME = 'db'
db = SQLAlchemy()
def create_app():
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = \
'postgresql+psycopg2://{user}:{passwd}@{host}:{port}/{db}'.format(user=DBUSER,
passwd=DBPASS, host=DBHOST, port=DBPORT, db=DBNAME)
app.config['DEBUG'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.secret_key = 'lustri'
db.init_app(app)
from api.register.views import register, user_view
app.register_blueprint(register)
app.add_url_rule(
'/users/', view_func=user_view, methods=['GET','POST']
)
app.add_url_rule(
'/users/<int:id>', view_func=user_view, methods=['GET','PUT','DELETE']
)
return app |
from keras.models import Sequential
from keras.layers import LSTM, Dense, GRU
from keras.callbacks import TensorBoard
from data_prep import gen_cosine_amp_for_supervised
batch_size = 128
sequence_length = 64
cos, expected = gen_cosine_amp_for_supervised(xn=sequence_length*100)
cos = cos.reshape((-1, 64, 1))
print(expected.shape)
expected = expected.reshape((-1, 1))
# tb_callback = TensorBoard(log_dir='./logs/cos/run1_lstm', histogram_freq=10, batch_size=10, write_graph=True, write_grads=True,
# write_images=True, embeddings_freq=0, embeddings_layer_names=None, embeddings_metadata=None)
#
# # expected input data shape: (batch_size, timesteps, data_dim)
# model = Sequential()
# model.add(LSTM(50,
# return_sequences=True,
# input_shape=(sequence_length, 1),
# #batch_input_shape=(batch_size, sequence_length, 1),
# stateful=False))
# model.add(LSTM(50, return_sequences=True, stateful=False))
# model.add(LSTM(50, stateful=False))
# model.add(Dense(50, activation='sigmoid'))
# model.add(Dense(1, activation='linear'))
#
# model.compile(loss='mean_squared_error',
# optimizer='adam',
# metrics=['accuracy'])
#
# model.fit(cos, expected, validation_split=0.1,
# batch_size=batch_size,
# epochs=200,
# shuffle=False,
# callbacks=[tb_callback]
# )
#
# predicted = model.predict(x=cos, batch_size=batch_size)
|
from typing import Optional
from pymodelextractor.learners.learner import Learner
from pymodelextractor.learners.learning_result import LearningResult
from pymodelextractor.learners.observation_table_learners.observation_table import (
epsilon, ObservationTable, TableInconsistency)
from pymodelextractor.learners.observation_table_learners.translators.fa_observation_table_translator import \
FAObservationTableTranslator
from pymodelextractor.teachers.teacher import Teacher
from pythautomata.automata.deterministic_finite_automaton import \
DeterministicFiniteAutomaton as DFA
from pythautomata.automata.symbolic_finite_automaton import \
SymbolicFiniteAutomaton as SFA
from pythautomata.base_types.alphabet import Alphabet
from pythautomata.base_types.sequence import Sequence
from pythautomata.base_types.symbol import Symbol
from pythautomata.boolean_algebra_learner.boolean_algebra_learner import \
BooleanAlgebraLearner
from pythautomata.boolean_algebra_learner.closed_discrete_interval_learner import \
ClosedDiscreteIntervalLearner as IntervalLearner
from pythautomata.utilities.automata_converter import AutomataConverter
import time
class LambdaStarLearner(Learner):
# TODO this should probably be instantiated in learn
_observed_symbols: set[Symbol] = set()
_o_t_translator = FAObservationTableTranslator()
_algebra_learner: BooleanAlgebraLearner
def __init__(self, boolean_algebra_learner: BooleanAlgebraLearner = IntervalLearner):
self._algebra_learner = boolean_algebra_learner
def learn(self, teacher: Teacher) -> LearningResult:
start_time = time.time()
answer: bool
counter_example: Sequence
observation_table: _ObservationTable = self._build_observation_table()
self._initialize_observation_table(observation_table, teacher)
model = self._build_model(observation_table)
answer, counter_example = teacher.equivalence_query(model)
self._update_with_new_counterexample(
observation_table, teacher, counter_example)
self._make_consistent(observation_table, teacher)
while not answer:
model = self._build_model(observation_table)
answer, counter_example = teacher.equivalence_query(model)
if not answer:
self._update_with_new_counterexample(
observation_table, teacher, counter_example)
self._close_table(observation_table, teacher)
self._make_consistent(observation_table, teacher)
# TODO add states counter
return LearningResult(model, len(model.states),
{'equivalence_queries_count': teacher.equivalence_queries_count,
'membership_queries_count': teacher.membership_queries_count,
'duration': time.time() - start_time})
def _update_with_new_counterexample(self, observation_table: '_ObservationTable', teacher: Teacher,
counter_example: Sequence) -> None:
self._update_observed_symbols(
counter_example, observation_table, teacher)
self._update_observation_table_with_counterexample(
observation_table, teacher, counter_example)
def _make_consistent(self, observation_table: '_ObservationTable', teacher: Teacher):
alphabet = Alphabet(frozenset(self._observed_symbols))
while True:
inconsistency = observation_table.find_inconsistency(alphabet)
if inconsistency is None:
return
self._resolve_inconsistency(
observation_table, inconsistency, teacher)
self._close_table(observation_table, teacher)
def _fill_hole_for_sequence(self, observation_table: '_ObservationTable', sequence: Sequence, teacher: Teacher) -> None:
suffix = observation_table.exp[-1]
observation_table[sequence].append(
teacher.membership_query(sequence + suffix))
def _resolve_inconsistency(self, observation_table: '_ObservationTable', inconsistency: TableInconsistency, teacher: Teacher) -> None:
symbol = inconsistency.symbol
differenceSequence = inconsistency.differenceSequence
observation_table.exp.append(symbol+differenceSequence)
for sequence in observation_table.observations:
self._fill_hole_for_sequence(observation_table, sequence, teacher)
pass
def _build_model(self, observation_table: '_ObservationTable') -> SFA:
# not truly a dfa as it might be missing transitions, but using a dfa with missing transitions is what we need
evidence_automaton: DFA = self._o_t_translator.translate(
observation_table, Alphabet(frozenset(self._observed_symbols)))
return AutomataConverter.convert_dfa_to_sfa(evidence_automaton, self._algebra_learner)
def _close_table(self, observation_table: '_ObservationTable', teacher: Teacher) -> None:
while True:
blue_sequence = self._get_closedness_violation_sequence(
observation_table)
if blue_sequence is None:
return
observation_table.move_from_blue_to_red(blue_sequence)
for symbol in self._observed_symbols:
new_blue_sequence = blue_sequence + symbol
self._add_to_blue(observation_table,
teacher, new_blue_sequence)
def _get_closedness_violation_sequence(self, observation_table: '_ObservationTable') -> Optional[Sequence]:
return next(filter(lambda x: not observation_table.same_row_exists_in_red(x), observation_table.blue), None)
def _update_observation_table_with_counterexample(self,
observation_table: '_ObservationTable', teacher: Teacher, counter_example: Sequence) -> None:
# save it inside a set, if sequence is long enough, this will optimize the algorithm
prefixes = set(counter_example.get_prefixes())
for sequence in prefixes:
self._add_to_red(observation_table, teacher, sequence)
for symbol in self._observed_symbols:
suffixed_sequence = sequence + symbol
if suffixed_sequence not in prefixes:
self._add_to_blue(observation_table,
teacher, suffixed_sequence)
def _update_observed_symbols(self, sequence: Sequence, observation_table: '_ObservationTable', teacher: Teacher) -> None:
new_symbols = list(
s for s in sequence if s not in self._observed_symbols)
self._observed_symbols.update(new_symbols)
for symbol in new_symbols:
for red_seq in observation_table.red:
new_seq = red_seq + symbol
self._add_to_blue(observation_table, teacher, new_seq)
if len(new_symbols) > 0:
self._close_table(observation_table, teacher)
self._make_consistent(observation_table, teacher)
def _build_observation_table(self) -> '_ObservationTable':
return _ObservationTable()
def _initialize_observation_table(self, observation_table: '_ObservationTable', teacher: Teacher) -> None:
observation_table.exp = [epsilon]
self._add_to_red(observation_table, teacher, epsilon)
for symbol in self._observed_symbols:
self._add_to_blue(observation_table, teacher, Sequence((symbol,)))
def _add_to_red(self, observation_table: '_ObservationTable', teacher: Teacher, sequence: Sequence) -> None:
if sequence not in observation_table.red:
observation_table.red.add(sequence)
observation_table[sequence] = self._get_filled_row_for(
observation_table, teacher, sequence)
def _add_to_blue(self, observation_table: '_ObservationTable', teacher: Teacher, sequence: Sequence) -> None:
if not sequence in observation_table.blue:
observation_table.blue.add(sequence)
observation_table[sequence] = self._get_filled_row_for(
observation_table, teacher, sequence)
def _get_filled_row_for(self, observation_table: '_ObservationTable', teacher: Teacher, sequence: Sequence) -> list[bool]:
suffixes = observation_table.exp
row: list[bool] = []
for suffix in suffixes:
result = teacher.membership_query(sequence + suffix)
row.append(result)
return row
class _ObservationTable(ObservationTable):
def __init__(self):
super().__init__()
def is_closed(self) -> bool:
return all(self.same_row_exists_in_red(s) for s in self.blue)
def same_row_exists_in_red(self, blueSequence: Sequence) -> bool:
# TODO check if this is what i need
return any(self.observations[sequence] == self.observations[blueSequence]
for sequence in self.red)
def find_inconsistency(self, alphabet: Alphabet) -> Optional[TableInconsistency]:
# TODO check if this is what i need
redList = list(self.red)
redListLength = len(redList)
for i in range(redListLength):
for j in range(i + 1, redListLength):
red1 = redList[i]
red2 = redList[j]
if red1 != red2 and self.observations[red1] == self.observations[red2]:
inconsistency = self._inconsistency_between(
red1, red2, alphabet)
if inconsistency is not None:
return inconsistency
return None
|
from moha import *
mol,orbs = IOSystem.from_file('../data/water.xyz','sto-3g.nwchem')
ham = ChemicalHamiltonian.build(mol,orbs)
wfn = HFWaveFunction(10,7,{'alpha':5,'beta':5})
hf_solver = PlainSCFSolver(ham,wfn)
hf_results = hf_solver.kernel()
pa_m_results = PopulationAnalysisMulliken(mol,orbs,ham,wfn).kernel()
pa_l_results = PopulationAnalysisLowdin(mol,orbs,ham,wfn).kernel()
|
import click
from toapi import __version__
@click.group(context_settings={"help_option_names": ["-h", "--help"]})
@click.version_option(__version__, "-v", "--version")
def cli():
"""
Toapi - Every web site provides APIs.
"""
|
from django.contrib.auth.models import User
from import_export import resources, fields
from import_export.widgets import ForeignKeyWidget
from apps.users.models import ParentProfile, StudentProfile, TeacherProfile
from apps.school.models import Grade, Section
class ParentResource(resources.ModelResource):
username = fields.Field(
column_name='username',
attribute='user',
widget=(ForeignKeyWidget(User, 'username'))
)
class Meta:
model = ParentProfile
exclude = ('user', 'id', 'image', )
class StudentResource(resources.ModelResource):
username = fields.Field(
column_name='username',
attribute='user',
widget=(ForeignKeyWidget(User, 'username'))
)
grade = fields.Field(
column_name='grade',
attribute='grade',
widget=(ForeignKeyWidget(Grade, 'name'))
)
section = fields.Field(
column_name='section',
attribute='section',
widget=(ForeignKeyWidget(Section, 'name'))
)
parent = fields.Field(
column_name='parent',
attribute='parent',
widget=(ForeignKeyWidget(ParentProfile, 'first_name'))
)
class Meta:
model = StudentProfile
exclude = ('user', 'grade', 'section', 'parent', 'slug', 'id', 'file', )
class TeacherResource(resources.ModelResource):
username = fields.Field(
column_name='username',
attribute='user',
widget=(ForeignKeyWidget(User, 'username'))
)
class Meta:
model = TeacherProfile
exclude = ('user', 'id', 'slug', 'image', ) |
a1 = input('输入任意数字')
# print(len(a1))
a2 = a1.split(',')
print(a2)
|
"""
We need to use the QC'd 24h 12z total to fix the 1h problems :(
"""
import Nio
import mx.DateTime
import Ngl
import numpy
import iemre
import os
import sys
import netCDF4
def merge(ts):
"""
Process an hour's worth of stage4 data into the hourly RE
"""
# Load up the 12z 24h total, this is what we base our deltas on
fp = "/mesonet/ARCHIVE/data/%s/stage4/ST4.%s.24h.grib" % (
ts.strftime("%Y/%m/%d"), ts.strftime("%Y%m%d%H") )
grib = Nio.open_file(fp, 'r')
# Rough subsample, since the whole enchillata is too much
lats = numpy.ravel( grib.variables["g5_lat_0"][200:-100:5,300:900:5] )
lons = numpy.ravel( grib.variables["g5_lon_1"][200:-100:5,300:900:5] )
vals = numpy.ravel( grib.variables["A_PCP_GDS5_SFC_acc24h"][200:-100:5,300:900:5] )
res = Ngl.natgrid(lons, lats, vals, iemre.XAXIS, iemre.YAXIS)
stage4 = res.transpose()
# Prevent Large numbers, negative numbers
stage4 = numpy.where( stage4 < 10000., stage4, 0.)
stage4 = numpy.where( stage4 < 0., 0., stage4)
# Open up our RE file
nc = netCDF4.Dataset("/mesonet/data/iemre/%s_mw_hourly.nc" % (ts.year,),'a')
ts0 = ts + mx.DateTime.RelativeDateTime(days=-1)
jan1 = mx.DateTime.DateTime(ts.year, 1, 1, 0, 0)
offset0 = int(( ts0 - jan1).hours)
offset1 = int(( ts - jan1).hours)
if offset0 < 0:
offset0 = 0
iemre2 = numpy.sum(nc.variables["p01m"][offset0:offset1,:,:], axis=0)
iemre2 = numpy.where( iemre2 > 0., iemre2, 0.00024)
iemre2 = numpy.where( iemre2 < 10000., iemre2, 0.00024)
print "Stage IV 24h [Avg %5.2f Max %5.2f] IEMRE Hourly [Avg %5.2f Max: %5.2f]" % (
numpy.average(stage4), numpy.max(stage4),
numpy.average(iemre2), numpy.max(iemre2) )
multiplier = stage4 / iemre2
print "Multiplier MIN: %5.2f AVG: %5.2f MAX: %5.2f" % (
numpy.min(multiplier), numpy.average(multiplier),numpy.max(multiplier))
for offset in range(offset0, offset1):
data = nc.variables["p01m"][offset,:,:]
# Keep data within reason
data = numpy.where( data > 10000., 0., data)
adjust = numpy.where( data > 0, data, 0.00001) * multiplier
adjust = numpy.where( adjust > 250.0, 0, adjust)
nc.variables["p01m"][offset,:,:] = numpy.where( adjust < 0.01, 0, adjust)
ts = jan1 + mx.DateTime.RelativeDateTime(hours=offset)
print "%s IEMRE %5.2f %5.2f Adjusted %5.2f %5.2f" % (ts.strftime("%Y-%m-%d %H"),
numpy.average(data), numpy.max(data),
numpy.average(nc.variables["p01m"][offset]),
numpy.max(nc.variables["p01m"][offset]))
nc.sync()
iemre2 = numpy.sum(nc.variables["p01m"][offset0:offset1,:,:], axis=0)
print "Stage IV 24h [Avg %5.2f Max %5.2f] IEMRE Hourly [Avg %5.2f Max: %5.2f]" % (
numpy.average(stage4), numpy.max(stage4),
numpy.average(iemre2), numpy.max(iemre2) )
nc.close()
if __name__ == "__main__":
if len(sys.argv) == 4:
ts = mx.DateTime.DateTime( int(sys.argv[1]),int(sys.argv[2]),
int(sys.argv[3]), 12 )
else:
ts = mx.DateTime.gmt() + mx.DateTime.RelativeDateTime(days=-1,hour=12,minute=0,second=0)
merge(ts)
|
import re
print(re.search(r"[a-zA-Z]{5}","a ghost")) # find exactly 5 char word
print(re.search(r"[a-zA-Z]{5}","a ghost appeared")) # find exactly 5 char
# word, finds only first word
print(re.findall(r"[a-zA-Z]{5}","a scary ghost appeared")) # find exactly 5
# char
# word, finds all words
print(re.findall(r"\b[a-zA-Z]{5}","a scary ghost appeared")) # find exactly 5
# char
# word,
# find all full words
print(re.findall(r"\w{5,10}","I really like strawberries")) # find exactly 5
# -10 char word,
print(re.findall(r"\w{5,10}","I really like strawberries"))
print(re.findall(r"\w{5,}","I really like strawberries"))
print(re.search(r"s\w{,20}","I really like strawberries")) # starts with s
# followed by 20 alphanum chars
|
seats = """
LLLLL.LLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLL.LLLLLLLLLL.LLLLLLLL.LLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLL
LLLLLLLLLL.LLL.LLLLL.LLLLLL.LLLLLLL.LLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL..LLLLL.LLLLLLLLLLLLLLLL
LL.LLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLLL.LLL.LLLLLLLLLLL.LLLLLLLL..LLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLL.LLL.LL.LLLL.LLL.LLLLLL.LLLLL.LLLLLLLLLLLL.LLLL.LLLLLLLLLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLL..LLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLL.LL.LLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
.......L...L.L...L......L..LLL..L.............L..L....L.LL.......LL......L..LL..L..L.L.LL..L.L....
LLLLL..LLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLL..LLLLLLLL.LLLLLLLL.LLLL.LLLLLL.LLLLLL.LLLLLLLLLLLL.LLL
LLLLLLLLLL.LLLLLLLLL.LLLLLL..LLLLLL.LLLLLLL.LLL..LL.LLLLLLLLLL.LLLL.LLLLLLLLLLLLL.LL.LLLLLL.LLLLLL
L.LLL.LLLLLLLLLLLLLLLLLLLLL.LLLLLL..LLLLLLL.LLLLLLLL..LLLLLLLL.LLLL.LLL.LL.LLLLLL.L.LLLLL.L.LLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLL.LLLLLLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLL.LLLLLL
LLLLLLLLLL.LLLLLLLLLLLLLLLL.LLL.LLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLL.L.LLLLLLL.L.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
.....LL..L....LL.L....L....L.....L.L...L......LLL..L.L....L..LLL...L.L..L.L..LL...L..L.L.....L.L.L
LLLLL.LLL..LLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLL.LLL.LLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLL.LL.L.LLLLLL.LLLLLLL.LLLLLLL.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLL.LLLLLLL.LLLLLL.LLL.LL.LLLLLLLLLLLLLLLL
LLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LL.LLLL.LLLLLL.LL.LLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLL
.LLLL.LLLL.LLLLLLLLL.L.LLL..LLLLLLL.LLLLLLLLLLLLL.LLL.LLLLLLLLLLLLL.LLLL.LLL.LLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLLLLLL.LLL...LLLLLL.LL.LLLLLL.LLL.LLLL.LLLLLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
.......L.L...L..L..L.LL.LL.L....L.L..LL..L.L.L.LL...LLL...LLLL..LLLL.......L...L....LL.....L.LL...
LLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLL.LLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLL
LLLLLLLLLL.LLLLLLLLL.LL.LLL.LLLLL.L.LLLLLLLLLLLLLLLLL.LLLLLLLL.LLLL.LLLLLLLLL.LLL.LLLLLLLLL.LLLLLL
LLLLL.LLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLL.LLL.LLLLLLLLL.LLLLLLLL.LL.LLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
LLLLL..LLL.LLLLLLLLLLLLLLLL.LLLLLLL.LLLLLLL.LLLLLL.LL.LLLLLLLLLLLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
L...LL.LL.L....LL..L.L..L..L.L.LL.L.....L..LL.......L..LLL..L...LL......LL..LL..L.L.LL..LL.L......
LLLLL.LLLL.LLLLLLLLLLLLLLLL.LLL.LLL.LLLLLLL.LLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL..LLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLLLLLLLLLLLL.LLLL.L.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLL..LLLLLL
.LLLL..L...L..LL...L...L.L.L..LLL.L....L..L..L.....L.L..............LL.L....L..L......LL..LL..L...
LLLLL.LLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLLLL.LL.LLLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLLLLLLLLL.LL..LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL.LLLLLLLL.LLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
LLLLL.LLLLLLLL.LLLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
LLLLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLL.LLLLLLL.LLLLL.LLL.LLLLLLLL.LLLL.L.LLLLLLLLLLL.LLLLLLLLLLLLLLLL
LLLLL.L.LLLLLLLLLLLLLLL.LLL.LLLLLLL.LLLLLLLLLLLLLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLL.LLLLLLLLLL.LLLLL
LLLLL.LLLL.LL.LLLLLL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL..LLLLLLLLL.L.LLLLLLLLLLLLLLL.LLLLLLL
L.LLL.LL.L.L...L.L....L.....L.....LL..........LL...L..L.L....L..L.L....L.L.LL.L.......LL.....L.L..
LLLLL.LLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLL.L.LLLLLLL.LLLLLLLL.LLLL.LLLLLL..LLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLLLLLLLLLLLLLL.LLLL.LLLLL.L.LLLLLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLL.LL.LLLLLL
LLLL..LLLL.LLLLLLLLL.LLLLLL.LLL.LLL.LLLLLL..LLLLLLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLLLLLLLLL.LL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLL.LL.LLLLLLLLLLLLLLL.LLLLLLL.LLLLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLL.L.LLLLL.LLLLLLL.LLLLLLLLL.LLLLLLLL.LLLL.LLLLLLL.LLLLL.LLLLLLLLL.LLLLLL
...L.......LL....L......L...L..L.L..L...L.L.....LLLL.L...L.....L...L.....LL...........L..L...L...L
LLLLLLLLLLLL.LLLLLLL.LLLLLL.LLL.LLL.LLLLLLL.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LLL
LLLLL.LLLL.LLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLL.LLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLLLLLLLLLLLL.LLLLLL.L.LLLLL.LLLLLLLLLLLLLL.LL.LLLLLLLL.LLLL.LLLLLL.LLLLLL.LLLLLLL.LLLLLLLL
LLLLL.LLLL.LLL.LLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLL
LLLL...LLL.LLLLLLLLL.LLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLL.LLLLLL.LLLL.L.LLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
.LLLL.LLLL.LLLLLLLLL.LLLLLL.LLLLL.L.LLLLLLLLLLLLLLLLL..LLLLLLL.LLLL.LLLLLLLLLLLLLLLLLLLLL.L.LLLLLL
........L.L..LL.....L..L.L...L..LL.LL..L.L..L...........L.L...LLLL.L..L..........L..L....L..LL...L
LLLLLLLLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLLL.LLLLLLLLLLLL.LLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLLLLLLLLLLLL.LLLLLL.LLLLL.L.LLLLLLLLLLLLLLLLL.LLLLLLLL.LLLL.LL.LLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLL.LL
LLLLL.LLLLLLLLLLLLLL.LLLLLL.LLLLLLL.LL.LLLL.LLLL...LLLLLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLL.LLLL.L
LLLLL.LLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLLL.LLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLLLLLLL.LLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLL.LLLLLLLLLL.LL.LL.LLLLLLLLLL.LLLLLLLLLLLLLLLL
.L.....LL......LLLLLL....L..LL...L.L..LLL.L..............L..LLL.L..L.L........LLLL....L..L..L.....
LLLLL.LLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLL.LL.LLL.LLLLLL.LLLLLLLLLLLLLLLL
LLLLL.LLLLLLLLLLLLLL.LLL.LL.LLLLLLL.LLLLLLL..LLLLLLLLLLLL.LLLL.LLLLLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLL.LLL.LLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLL.L.LLLL.LLLLLL.LLLLLLLLL.LLLL.L
LLLLL.LLLLLLLLLLLLLLLLLLLLL.LLLLLLL..LLLLLL.L.LLLLLLLLLLLLLLLL.LLLL.LL.LLLLLLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLL.L.LLLLLLLLLLLL..LLLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
...L.LLLL....LL....LL.L...LLLL.LL......L....L.L.L.L........LLLL.LL....L.L..L.L....LL..L....L......
LLLLL.LLLL.LLLLLLLL..LLLLLL.LLLLLLL.LLLLLLLL.LLLLLLLL.LLLLLLLLLLLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLL.LLL.LLLLL.LLL.LLL.LLLLLL.LLLLLLL.LLLLLLLLL.LLLLLLLL.LLLL.LLLLLL.LLLLLL.LLLLLLLLLLLLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLL.LLLLLLLLLLLL..LLLL.LL.LLLLLLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLL.LLLLLLLLLLL.LLLLL.LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
....LLL...LL.........L.....LL...L.L...LL......L...LL..L.L.....L..L....L.L.....L.L...L..LL...L.L...
LLLLLLLL.L.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLL.LLL.LLLLL.LLLLLLLL.LLLL.LLLLLLLLLLLLL.LLLLLL.LL..LLLLL
LLLLL.LLLLLLLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLL.LLLL.LL.LLL.LLLLL.LLLLLLLLLLLLLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLL.LLLLLLLL.LLLL.LLLLLLLLLL.LL.LLLLLLLLL.LLLLLL
LLLL..LLLL.L.LLLLL.L.L.LLLLLLLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLLLLLLLLLLLLLLLLL.LLLLLLLLLLLLLL.LLLLLL.L.LLLLLLLL.L.LLLLLLLLLLL.LLLLLL.LLLLLL.LLLLLLLLLLLLLL.L
LLLLLLLLL.LLLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLL.L.LLLLLLL.LLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLL..LLLLL
...LL...LL.L.....L.L.LL......L..L.L...L.L..L..L..........L...L.....L...L......L..LLL...L.L..LLL...
LLLLLLLLLLLLLLLLLLLLLLLLLLL.LLLLLLL.LLLLLLL.LLLLLLLLL.LLLLLLLL.LLLL.LLLLLL.LLLLLL.LLLLLLLLL.LLLLLL
LLLLL..LLL.LLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLL.LLLLLLLL..LLLLLLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLL..LLLLL
LLLLLLLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLLLLLL.L.LLLLLLLLL.LLLLLLLLLLLLL.LLLLLLLLL.LLL.LLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLL..LLLLLLL.LLLLLLLLLL.LLLL.LLLLLLLLL.LLLLLLLL.LLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLLLL
LLLLLLLLLLLLLLLLLLLL.LLLLL..LLLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLL.LLLL.LL.LLLLLLLLLL.LLLLLLLLL.LLLLLL
....L....L.....LLLLL...L.L...LL...L.LL.........L.L.L..L..LL.....L..LLL..L.L......LL.L.LLLL..L..LLL
LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLLLLLLLLLL.LLLLLL.LLLLLLLL.L.L.LLLLLLLLL.LLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLLLL.LLLLLLLLLL.LLLLLLLLL.LL.LLLLLLLLLLLLL.LLLLLLLL..LLLLLL
LLLLLLLLLLLLLLLLLLLL.LLLLLL.LLLLLLL.LLLLLLL.LLLLLLLLLLLLLLLLLL.LLLL.LLLLLL.LLLLLLLLLLLLLLLLLLLLLLL
LLLLL.LL.LLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLL.LLLLL.LLLLLL..LLLLLLLLLLLLLLLL..LLLLLL
LLLLL.L.LL.LLLL.LLLL.LLLLLL..LLLLLL.LLLLLLLLLLL.LLLLLLLLLLL.LLLLLLL.LLLLLL.LLLLLL.LLLLLLLLLLLLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLLLLLLLLLL.LLLLLLL.LLLLLLLL..LLLLLLLL.LLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLL
LLLLL.LLLL.LLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLL.LLLLLLLLL.LLLLLLL..LLLL.LLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLLLLLLLL.LLL.LLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLL.LLLLLL.LLLLLLLLLLLLLLLL.LLLLLL
LLLLL.LLLL.LLLLLLLLLLLLLLLL.LLLLLLLLLLLLLLL.L.LLLLLLL.LL.LLLLLLL.LL.LLLLLL.LLLLLL.LLLLLLLLL..LLLLL
LLL.L.LLLL.LLLLLLLLL.LLLLLL..LLLLLL.LLLLLLL.LLLLLLLLL.LLLLLLLL.LLLL.LLLLLLL.LLLLL.LLLLLLLLL.LLLLLL
L.LLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLL.LLLLLLLLLLLLLLLLLLLLL.LLLL.LLLL..LLLLLLLLLLLLLLLLLL.LLL.LLLLLL
"""
import itertools, time
def get_seat_type(seats, coords) :
return seats[coords[1]][coords[0]]
def get_surroundings(seats, coords) :
surroundings = []
for combo in itertools.product([-1, 0, 1], [-1, 0, 1]) :
try :
if combo != (0, 0) :
x_factor, y_factor = 0, 0
while True :
new_addition = list((combo[0]+coords[0]+x_factor, combo[1]+coords[1]+y_factor))
try :
new_seat = seats[new_addition[1]][new_addition[0]]
if new_seat != '.' :
if new_addition[0] >= 0 and new_addition[1] >= 0 :
surroundings.append(new_seat)
break
else :
x_factor += combo[0]
y_factor += combo[1]
except :
break
except :
pass
return surroundings
def determine_new_seat(seat_type, surroundings) :
if seat_type == 'L' and '#' not in surroundings :
return '#'
elif seat_type == '#' and surroundings.count('#') >= 5 :
return 'L'
else :
return seat_type
def update_seats(seats) :
x_max, y_max = len(seats[0]), len(seats)
updated_seats = []
for y in range(y_max) :
new_row = ''
for x in range(x_max) :
coords = [x, y]
seat_type = get_seat_type(seats, coords)
if seat_type == '.' :
new_row = new_row + '.'
else :
surroundings = get_surroundings(seats, coords)
new_addition = determine_new_seat(seat_type, surroundings)
new_row = new_row + new_addition
updated_seats.append(new_row)
return updated_seats
def main(seats) :
while True :
updated_seats = update_seats(seats)
"""for row in updated_seats :
print(row)
print()"""
if seats == updated_seats :
return ''.join(updated_seats).count('#')
seats = updated_seats
seats = seats[1:][:-1].split('\n')
start = time.time()
print(main(seats))
end = time.time()
print('Runtime: ', end-start)
|
import datetime
import unittest
from unittest import mock
import freezegun
from tests.plugins import PluginTestCase
from plugins.conversion import get_currency_data
class GetCurrencyDataTest(unittest.TestCase):
def test_uses_cached_data_if_recent_enough(self):
currency_data = {"_timestamp": datetime.datetime(2018, 9, 30, 15, 30)}
get_patch = mock.patch("requests.get")
data_patch = mock.patch.dict("plugins.conversion._currency_data", currency_data)
with freezegun.freeze_time(
"2018-09-30 16:00:00"
), data_patch, get_patch as get_mock:
assert get_currency_data() == currency_data
get_mock.assert_not_called()
with freezegun.freeze_time(
"2018-09-30 17:00:00"
), data_patch, get_patch as get_mock:
get_mock.return_value = mock.Mock(text="")
assert get_currency_data() == {
"_timestamp": datetime.datetime(2018, 9, 30, 17, 0)
}
get_mock.assert_called_once_with(
"http://www.ecb.europa.eu/stats/eurofxref/eurofxref-daily.xml",
timeout=2,
)
ddg_f = "plugins.conversion.get_duckduckgo_data"
ecb_f = "plugins.conversion.get_currency_data"
class ConversionPluginTest(PluginTestCase):
def create_plugin(self):
from plugins.conversion import ConversionPlugin
return ConversionPlugin(self.bot, self.channel)
@mock.patch(ddg_f, return_value={"AnswerType": "conversions", "Answer": "TEST"})
@mock.patch(ecb_f, return_value={"NOK": 8.00, "DKK": 6.00})
def test_converts_currencies(self, currency_mock, convert_mock):
self.assertEqual("10 eur = 80 nok", self.reply("10 eur into nok"))
self.assertEqual("10 eur = 60 dkk", self.reply("10 eur into dkk"))
self.assertEqual("10 nok = 1.25 eur", self.reply("10 nok into eur"))
self.assertEqual("10 dkk = 1.67 eur", self.reply("10 dkk into eur"))
self.assertEqual("10 nok = 7.50 dkk", self.reply("10 nok into dkk"))
self.assertEqual("10 dkk = 13.33 nok", self.reply("10 dkk into nok"))
self.assertEqual("10 dkk = 13.33 nok", self.reply("what is 10 dkk into nok?"))
self.assertEqual("10,000 eur = 80,000 nok", self.reply("10k eur into nok"))
self.assertEqual("1,100 eur = 8,800 nok", self.reply("1.1k eur into nok"))
self.assertEqual("TEST", self.reply("10 cny into eur"))
self.assertEqual("TEST", self.reply("10 eur into cny"))
self.assertEqual("TEST", self.reply("10 usd into cny"))
self.assertEqual("10 eur = 80 nok, 60 dkk", self.reply("10 eur into nok,dkk"))
self.assertEqual(
"10 eur = 80 nok, 60 dkk", self.reply("10 eur into nok,dkk,cny")
)
def check_convert_reply(self, message, expected_qs, response="DEFAULT"):
if response == "DEFAULT":
response = message + " reply"
return_value = {"AnswerType": "conversions", "Answer": response}
with mock.patch(ddg_f, return_value=return_value) as mf:
self.assertEqual(response, self.reply(message))
mf.assert_called_with(
"https://api.duckduckgo.com",
{"q": expected_qs, "format": "json", "no_html": 1},
)
@mock.patch(ecb_f, return_value={})
def test_converts_units(self, currency_mock):
self.check_convert_reply("100kg into stones", "100 kg into stones")
self.check_convert_reply("100 kg into stones", "100 kg into stones")
self.check_convert_reply("100 kg in stones", "100 kg in stones")
self.check_convert_reply("100 KG IN STONES", "100 kg in stones")
self.check_convert_reply("asdf 100 kg in stones asdf", "100 kg in stones")
self.check_convert_reply("what is 100 kg in stones?", "100 kg in stones")
self.check_convert_reply(
"100 square metres in acres", "100 square metres in acres"
)
self.check_convert_reply(
"100 cubic metres in litres", "100 cubic metres in litres"
)
self.check_convert_reply("100 fl.oz in litres", "100 fl.oz in litres")
self.check_convert_reply("100 000 kg in tons", "100000 kg in tons")
self.check_convert_reply("100,000 kg in tons", "100000 kg in tons")
self.check_convert_reply("100k kg in tons", "100000 kg in tons")
self.check_convert_reply("123 456.78 kg in lbs", "123456.78 kg in lbs")
self.check_convert_reply("0.5 kg in lbs", "0.5 kg in lbs")
self.check_convert_reply(".5 kg in lbs", "0.5 kg in lbs")
|
import sys
inp = open("in.txt", "r")
out = open("out.txt", 'w')
n = int(inp.readline())
last = []
costs = {}
last.append([])
for i in range(1, n + 1):
str_t = inp.readline()
l = str_t.split(" ")
tmpL = l[:-1]
if len(tmpL) == 0:
last.append(tmpL)
else:
listNodes = []
for k in range(0, len(tmpL) - 1, 2):
v = int(tmpL[k])
c = int(tmpL[k + 1])
listNodes.append(v)
costs[(v, i)] = c
last.append(listNodes)
inst = int(inp.readline())
dest = int(inp.readline())
inp.close()
inf = -sys.maxsize-1
D = []
pred = []
for i in range(0, n + 1):
D.append(inf)
pred.append(inf)
for i in range(1, n + 1):
if (inst in last[i]):
D[i] = costs[(inst, i)]
pred[i] = inst
for iter in range(2, n - 1, 1):
for v in range(1, n + 1, 1):
if v == inst:
pass
for edge in last[v]:
if edge == inst:
pass
if D[edge] * costs[(edge, v)] > D[v]:
D[v] = D[edge] * costs[(edge, v)]
pred[v] = edge
stack = []
if (D[dest] != inf):
out.write("Y\n")
stack.append(dest)
u = pred[dest]
while (u != inst):
stack.append(u)
u = pred[u]
stack.append(inst)
stack.reverse()
res = " ".join(str(x) for x in stack)
out.write(res)
out.write("\n")
out.write(str(D[dest]))
else:
out.write("N\n")
out.close()
|
import numpy as np
import mycsv
#** Intuition
# The strategy with overall higher rankings will be the best.
def findStrategy(path,year,exchange):
data=mycsv.getCol(path+str(year)+"_"+exchange,range(7))
#data=np.array([[1,2.2],[2,2.2],[1,2.2],[2,3.2],[1,2.2],[2,3.2],[1,3.2],[2,3.2],[1,3.2],[2,2.2]])
data=np.asarray(data[1:])
bestparameters=[]
for parameter in data.T:
value_freq={}
for ranking,value in enumerate(parameter):
try:
value_freq[str(value)]+=ranking
except KeyError:
value_freq[str(value)]=ranking
print value_freq
bestparametervalueindex=np.argmin(value_freq.values())
bestparametervalue=value_freq.keys()[bestparametervalueindex]
bestparameters.append(bestparametervalue)
return bestparameters
if __name__=="__main__":
for year in range(2013,1999,-1):#2013,1999,-1
for exchange in ["OTCBB"]:#"NASDAQ","NYSE","OTCBB","AMEX"
print year,exchange
path="FinalResults/"
print findStrategy(path,year,exchange)
print ""
|
import os
import sys
import cv2
import json
import numpy as np
import skimage.draw
from imgaug import augmenters as iaa
ROOT_DIR = os.path.abspath("./")
print("ROOT_DIR", ROOT_DIR)
AUGMENT_SIZE_PER_IMAGE = 10
args = sys.argv
if len(args) != 2:
print("Need option target directory")
sys.exit()
target = args[1]
print("Augment target Directory :", target)
# Augmentation definition
seq0 = iaa.Sequential([
iaa.Sometimes(0.8, iaa.Affine(rotate=(-90, 90))),
])
seq1 = iaa.Sequential([
iaa.Sometimes(0.8, iaa.GaussianBlur(sigma=(0.0, 2.0))),
iaa.Sometimes(0.8, iaa.LogContrast(gain=(0.8, 1.4))),
iaa.Sometimes(0.4, iaa.AdditiveGaussianNoise(scale=(0, 20)))
])
seq2 = iaa.Sequential([
iaa.Sometimes(0.9, iaa.CoarseDropout((0.01, 0.04), size_percent=(0.01, 0.05))),
])
DATA_DIR = os.path.join(ROOT_DIR, target)
ids = 0
for curDir, dirs, files in os.walk(DATA_DIR):
if not curDir.endswith("augmented") and os.path.isfile(curDir+ "/label.json"):
if not os.path.isdir(curDir + "/augmented"):
os.makedirs(curDir + "/augmented")
annotations = json.load(open(curDir + "/label.json"))
annotations = list(annotations.values())
annotations = [a for a in annotations if a['regions']]
print("Current Directory for Augmentation:", curDir)
result = {}
for a in annotations:
polygons = [r['shape_attributes'] for r in a['regions']]
names = [r['region_attributes'] for r in a['regions']]
img = cv2.imread(curDir + "/" + a['filename'])
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
height, width = img.shape[:2]
depth = np.load(curDir + "/" + a['filename'].split(".")[0] + "_depth.npy")
noise = 0.004 * np.random.rand(depth.shape[0], depth.shape[1]) - 0.002
depth += noise
depth = np.reshape(depth, [depth.shape[0], depth.shape[1], 1])
mask = np.zeros([height, width, len(polygons)], dtype=np.uint8)
mask_attr = []
mask_size = []
for i, (p, n) in enumerate(zip(polygons, names)):
rr, cc = skimage.draw.polygon(p['all_points_y'], p['all_points_x'])
mask[rr, cc, i] = 1
mask_attr.append(n["name"])
mask_size.append(np.count_nonzero(mask[:,:,i]))
depth_and_img = np.append(depth, img, axis=2)
img_and_mask = np.append(depth_and_img, mask, axis=2)
for i in range(AUGMENT_SIZE_PER_IMAGE):
filename = a['filename'].split(".")[0] + "_" + str(i) + ".png"
depth_filename = a['filename'].split(".")[0] + "_" + str(i) + "_depth"
# if os.path.isfile(curDir + "/augmented/" + filename):
# continue
aug_all = seq0.augment_image(img_and_mask)
aug_depth = aug_all[:, :, 0]
aug_img = aug_all[:, :, 1:4]
aug_mask = aug_all[:, :, 4:]
aug_img = aug_img.astype(np.uint8)
aug_mask = aug_mask.astype(np.uint8)
aug_img = seq1.augment_image(aug_img)
aug_depth = seq2.augment_image(aug_depth)
cv2.imwrite(curDir + "/augmented/" + filename, cv2.cvtColor(aug_img, cv2.COLOR_RGB2BGR))
np.save(curDir + "/augmented/" + depth_filename, aug_depth)
# Make augmented masked label data
result[str(ids) + "_" + filename] = {"filename": filename, "file_attributes":{}, "size":0}
regions = []
for j in range(mask.shape[-1]):
attr = {}
attr["region_attributes"] = {"name": mask_attr[j]}
tmp = aug_mask[:,:,j]
ret, thresh = cv2.threshold(tmp, 0.5, 1.0, cv2.THRESH_BINARY)
contours, hierarchy = cv2.findContours(thresh, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if (len(contours) == 0) : continue
if np.count_nonzero(tmp) < mask_size[j] * 0.7:
continue
contours_index = 0
for k in range(len(contours)):
if k != len(contours) - 1:
if len(contours[k]) < len(contours[k + 1]):
contours_index = k + 1
contours = contours[contours_index]
all_points_x = [int(contours[k][0][0]) for k in range(len(contours))]
all_points_y = [int(contours[k][0][1]) for k in range(len(contours))]
attr["shape_attributes"] = {"name": "polyline", "all_points_x": all_points_x, "all_points_y": all_points_y}
regions.append(attr)
result[str(ids) + "_" + filename]["regions"] = regions
ids += 1
with open(curDir + "/augmented/label_augment.json", "w") as f:
json.dump(result, f)
print("Finished")
|
import pymongo
DB = None
def connect(host, database, auth=None, port=27017, ssl=True):
mc = pymongo.MongoClient(host, port=port, ssl=ssl)
db = getattr(mc, database)
if auth:
db.authenticate(auth['username'], auth['password'])
globals()['DB'] = db
|
from extruder_turtle import ExtruderTurtle
import math
import random
HAIRLENGTH = 1
HAIR_ANGLE = math.pi/3
EXT_DENSITY = 0.05
FEEDRATE = 500
NUM_HAIRS = 15
LAYER_HEIGHT = 0.15
SIDELENGTH = 25
NUM_SIDES = 5
LAYERS = 100
dx = SIDELENGTH/(NUM_HAIRS+1)
t = ExtruderTurtle()
## Set up the turtle
t.name("furry-prism.gcode")
t.setup(x=100, y=100)
t.rate(FEEDRATE)
t.set_density(EXT_DENSITY)
for l in range(LAYERS):
## Draw a pentagon
for k in range(NUM_SIDES):
t.move(dx)
for n in range(NUM_HAIRS):
t.left(HAIR_ANGLE)
t.move(HAIRLENGTH)
t.move(-HAIRLENGTH)
t.right(HAIR_ANGLE)
t.move(dx)
t.right(2*math.pi/NUM_SIDES)
## Move to the next layer
t.lift(LAYER_HEIGHT)
## Save to a GCODE file
t.finish()
|
# -*- coding: utf-8 -*-
"""
"""
from openpyxl import load_workbook
def got_drink(item_dict, item):
"""
item_dict 는 { 상품명 : 수량 } 형식으로 되어있는 Dict
item_dict[item] ==> 해당 item에 대한 수량을 return
Ex) print(item_dict['사이다']) ==> 10 (사이다 수량)
item_dict[item] -= 1 은 item_dict[item] = item_dict[item] - 1 을 줄인말
자기 자신에서 1뺀 값으로 갱신
"""
item_dict[item] -= 1
return 0
EXCEL_FILE_NAME = 'Database.xlsx'
db = load_workbook(filename=EXCEL_FILE_NAME)
machine_db = db['자판기']
# item_dict 라는 비어있는 딕셔너리를 선언
item_dict = {}
# machine_db의 row 를 하나씩 순차적으로 접근
# for [변수] in machine_db.rows:
# [변수]는 machine_db의 row를 하나씩 받아서 끝까지 전달
# [변수] 이름이 row여서 혼동의 소지가 있어서 설명 추가했습니다.
for row in machine_db.rows:
# 첫번째 row의 정보는 None, 수량, 단가, 매진 / 두번째 row의 정보는 사이다, 10, 1000, None / ....
# row의 0번째 index 값이 None 이 아니면 ==> 첫번째 열이 비어있지 않으면
if row[0].value is not None:
# item_dict의 [row[0].value] (사이다, 콜라, ....)
# item_dict의 [row[1].value] ( 10, 13, ....)
# 딕셔너리 추가하는 방법
# 딕셔너리 이름[Key] = Value
# 해당 딕셔너리에 {Key : Value} 가 추가
item_dict[row[0].value] = row[1].value
got_drink(item_dict, '사이다')
got_drink(item_dict, '콜라')
print(item_dict['콜라'])
"""
item_dict를 다른 Excel(Database1.xlsx) 파일에 저장하는 Code 만들기
"""
EXCEL2_FILE_NAME = 'Database1.xlsx'
db2 = load_workbook(filename=EXCEL2_FILE_NAME)
machine_db2 = db2['자판기']
for row in machine_db2.rows:
if row[0].value is not None:
print(row[0].value)
print(item_dict[row[0].value])
row[1].value = item_dict[row[0].value]
db2.save(EXCEL2_FILE_NAME)
print('ok') |
from Backend_API.database import database_config
class FlaskConfig(object):
DEBUG = False
TESTING = False
BABEL_DEFAULT_LOCALE = "en"
SEND_FILE_MAX_AGE_DEFAULT = 0
class FlaskProductionConfig(FlaskConfig):
SECRET_KEY = "FA848613990667D31E2875021B945130E4A37EDED8035E83EC2426C3366737B2"
SESSION_COOKIE_SECURE = True
REMEMBER_COOKIE_SECURE = True
SESSION_PROTECTION = 'strong'
SESSION_COOKIE_HTTPONLY = True
LANGUAGES = {
'en': 'English'
}
DB_HOST = database_config.DB_HOST
DB_PORT = database_config.DB_PORT
DB_NAME = database_config.DB_NAME
DB_USERNAME = database_config.DB_USERNAME
DB_PASS = database_config.DB_PASS
STATIC_FILE = "static"
HOST = '0.0.0.0'
PORT = '5090'
SSL_CERT = 'certificates/test.com.crt'
SSL_KEY = 'certificates/test.com.key'
DIRECTIONS_API_KEY = 'AIzaSyBhWJgRmMZum5qBnjGA7HoaY_vpmyzMxe0'
SECURE = True
HTTPONLY = True
class FlaskDevelopmentConfig(FlaskConfig):
DEBUG = True
TESTING = True
SECRET_KEY = "FA848613990667D31E2875021B945130E4A37EDED8035E83EC2426C3366737B2"
DB_HOST = database_config.DB_HOST
DB_PORT = database_config.DB_PORT
DB_NAME = database_config.DB_NAME
DB_USERNAME = database_config.DB_USERNAME
DB_PASS = database_config.DB_PASS
STATIC_FILE = "static"
HOST = 'localhost'
PORT = '5090'
SSL_CERT = 'certificates/test.com.crt'
SSL_KEY = 'certificates/test.com.key'
DIRECTIONS_API_KEY = 'AIzaSyBhWJgRmMZum5qBnjGA7HoaY_vpmyzMxe0'
SECURE = False
HTTPONLY = False
|
import os
import matplotlib.pyplot as plt
import cv2
import torch
import pandas as pd
from ..util.paths import process
from ..util import torch2cv, map_range, make_grid
def _get_tensor(x):
x = x[0] if torch.typename(x) in ['tuple', 'list'] else x
return x
def save_image(img, title):
to_save = ((img / img.max())*255).astype(int)
cv2.imwrite('{0}.png'.format(title), to_save)
def process_none(x):
if x is None:
x = []
elif not any((isinstance(x, y) for y in [list, tuple])):
x = [x]
return x
def _register(net, hook, modules=None, match_names=None, do_forward=True):
modules = process_none(modules)
match_names = process_none(match_names)
for mod_name, mod in net.named_modules():
name_match = any([torch.typename(modules).find(x) >= 0 for x in match_names])
instance_match = any([isinstance(mod, x) for x in modules])
if instance_match or name_match:
if do_forward:
mod.register_forward_hook(hook(mod_name))
else:
mod.register_backward_hook(hook(mod_name))
return net
def _hook_generator(do_input=False, do_output=True, tag='', save_path='.', replace=True, histogram=True, bins=100, mode='forward', param_names=None):
save_path = process(save_path, True)
tensor_names = ['input', 'output'] if mode in ['forward', 'parameters'] else ['grad_input', 'grad_output']
def get_hook(module_name):
counter = 1
def hook(module, inp=None, out=None):
nonlocal counter, tensor_names
if mode == 'parameters':
tensors = {x: _get_tensor(getattr(module, x)) for x in param_names}
else:
tensors = [(tensor_names[0], inp, do_input), (tensor_names[1], out, do_output)]
tensors = {x[0]: _get_tensor(x[1]) for x in tensors if x[2]}
for tensor_name, data in tensors.items():
if data is None:
continue
title_end = '' if replace else '-{0:06d}'.format(counter)
title_end = title_end + '-hist' if histogram else title_end
title = '{0}-{1}-{2}{3}'.format(tag, module_name, tensor_name, title_end)
if histogram:
img = torch2cv(data)
df = pd.DataFrame(img.reshape(img.size))
fig, ax = plt.subplots()
df.hist(bins=bins, ax=ax)
fig.savefig(os.path.join(save_path, '{0}.png'.format(title)))
plt.close(fig)
else:
if data.dim() > 1:
img = torch2cv(make_grid(data, color=False))
to_save = (map_range(img)*255).astype(int)
cv2.imwrite(os.path.join(save_path, '{0}.png'.format(title)), to_save)
counter = counter+1
return hook
return get_hook
def forward_hook(net, modules=None, match_names=None, do_input=False, do_output=True,
tag='', save_path='.', replace=True, histogram=True, bins=100):
"""Registers a forward hook to a network's modules for vizualization of the inputs and outputs.
When net.forward() is called, the hook saves an image grid or a histogram
of input/output of the specified modules.
Args:
net (nn.Module): The network whose modules are to be visualized.
modules (list or tuple, optional): List of class definitions for the
modules where the hook is attached e.g. nn.Conv2d (default None).
match_names (list or tuple, optional): List of strings. If any modules
contain one of the strings then the hook is attached (default None).
do_input (bool, optional): If True the input of the module is
visualized (default False).
do_output (bool, optional): If True the output of the module is
visualized (default True).
tag (str, optional): String tag to attach to saved images (default None).
save_path (str, optional): Path to save visualisation results
(default '.').
replace (bool, optional): If True, the images (from the same module)
are replaced whenever the hook is called (default True).
histogram (bool, optional): If True then the visualization is a
histrogram, otherwise it's an image grid.
bins (bool, optional): Number of bins for histogram, if `histogram` is
True (default 100).
Note:
* If modules or match_names are not provided then no hooks will be
attached.
"""
hook = _hook_generator(do_input,do_output,tag,save_path,replace, histogram, bins, 'forward')
_register(net, hook, modules, match_names, True)
return net
def backward_hook(net, modules=None, match_names=None, do_grad_input=False, do_grad_output=True,
tag='', save_path='.', replace=True, histogram=True, bins=100):
"""Registers a backward hook to a network's modules for vizualization of the gradients.
When net.backward() is called, the hook saves an image grid or a histogram
of grad_input/grad_output of the specified modules.
Args:
net (nn.Module): The network whose gradients are to be visualized.
modules (list or tuple, optional): List of class definitions for the
modules where the hook is attached e.g. nn.Conv2d (default None).
match_names (list or tuple, optional): List of strings. If any modules
contain one of the strings then the hook is attached (default None).
do_grad_input (bool, optional): If True the grad_input of the module is
visualized (default False).
do_grad_output (bool, optional): If True the grad_output of the module
is visualized (default True).
tag (str, optional): String tag to attach to saved images (default None).
save_path (str, optional): Path to save visualisation results
(default '.').
replace (bool, optional): If True, the images (from the same module)
are replaced whenever the hook is called (default True).
histogram (bool, optional): If True then the visualization is a
histrogram, otherwise it's an image grid.
bins (bool, optional): Number of bins for histogram, if `histogram` is
True (default 100).
Note:
* If modules or match_names are not provided then no hooks will be
attached.
"""
hook = _hook_generator(do_grad_input,do_grad_output,tag,save_path,replace, histogram, bins, 'backward')
_register(net, hook, modules, match_names, False)
return net
def parameters_hook(net, modules=None, match_names=None, param_names=None,
tag='', save_path='.', replace=True, histogram=True, bins=100):
"""Registers a forward hook to a network's modules for vizualization of its parameters.
When net.forward() is called, the hook saves an image grid or a histogram
of the parameters of the specified modules.
Args:
net (nn.Module): The network whose parameters are to be visualized.
modules (list or tuple, optional): List of class definitions for the
modules where the hook is attached e.g. nn.Conv2d (default None).
match_names (list or tuple, optional): List of strings. If any modules
contain one of the strings then the hook is attached (default None).
param_names (list or tuple, optional): List of strings. If any
parameters of the module contain one of the strings then they are
visualized (default None).
tag (str, optional): String tag to attach to saved images (default None).
save_path (str, optional): Path to save visualisation results
(default '.').
replace (bool, optional): If True, the images (from the same module)
are replaced whenever the hook is called (default True).
histogram (bool, optional): If True then the visualization is a
histrogram, otherwise it's an image grid.
bins (bool, optional): Number of bins for histogram, if `histogram` is
True (default 100).
Note:
* If modules or match_names are not provided then no hooks will be
attached.
* If param_names are not provided then no parameters will be visualized.
"""
hook = _hook_generator(False,False,tag,save_path,replace, histogram, bins, 'parameters', param_names)
_register(net, hook, modules, match_names, True)
return net
def parameters(net, modules=None, match_names=None, param_names=None, tag='', save_path='.', histogram=True, bins=100):
"""Visualizes a network's parameters on an image grid or histogram.
Args:
net (nn.Module): The network whose parameters are to be visualized.
modules (list or tuple, optional): List of class definitions for the
modules where the hook is attached e.g. nn.Conv2d (default None).
match_names (list or tuple, optional): List of strings. If any modules
contain one of the strings then the hook is attached (default None).
param_names (list or tuple, optional): List of strings. If any
parameters of the module contain one of the strings then they are
visualized (default None).
tag (str, optional): String tag to attach to saved images (default None).
save_path (str, optional): Path to save visualisation results
(default '.').
histogram (bool, optional): If True then the visualization is a
histrogram, otherwise it's an image grid.
bins (bool, optional): Number of bins for histogram, if `histogram` is
True (default 100).
Note:
* If modules or match_names are not provided then no parameters will be
visualized.
* If param_names are not provided then no parameters will be visualized.
"""
save_path = process(save_path, True)
modules = process_none(modules)
match_names = process_none(match_names)
for module_name, mod in net.named_modules():
name_match = any([torch.typename(modules).find(x) >= 0 for x in match_names])
instance_match = any([isinstance(mod, x) for x in modules])
if instance_match or name_match:
params = {x: _get_tensor(getattr(mod, x)) for x in param_names}
for tensor_name, data in params.items():
title = '{0}-{1}-{2}'.format(tag, module_name, tensor_name)
if data is None:
continue
if histogram:
img = torch2cv(data)
df = pd.DataFrame(img.reshape(img.size))
fig, ax = plt.subplots()
df.hist(bins=bins, ax=ax)
fig.savefig(os.path.join(save_path, '{0}.png'.format(title)))
plt.close(fig)
else:
if data.dim() > 1:
img = torch2cv(make_grid(data, color=False))
to_save = (map_range(img)*255).astype(int)
cv2.imwrite(os.path.join(save_path, '{0}.png'.format(title)), to_save) |
import controls
import pytest
import pandas as pd
import prediction
def test_update_a_share_list():
try:
controls.update_a_share_list()[0]
except ValueError as ex:
print(ex)
assert {'label': 'sh.600000-浦发银行', 'value': 'sh.600000-浦发银行'}
def test_get_A_stock_list():
try:
len(controls.get_A_stock_list())
except ValueError as ex:
print(ex)
assert 300
def test_share_dict_to_option():
try:
controls.share_dict_to_option({'sh.600000':'浦发银行'})
except ValueError as ex:
print(ex)
assert 'sh.600000-浦发银行'
def test_split_share_to_code():
# split options to get code
try:
controls.split_share_to_code('sh.600000-浦发银行')
except ValueError as ex:
print(ex)
assert 'sh.600000'
def test_list_to_option_list():
# quick create dropdown options from list
try:
controls.list_to_option_list(['sh.600000-浦发银行'])
except ValueError as ex:
print(ex)
assert {"label": 'sh.600000-浦发银行', "value": 'sh.600000-浦发银行'}
def test_get_trend_df():
try:
controls.get_trend_df('sh.600000','2020-01-01','2020-07-07')
except ValueError as ex:
print(ex)
assert True
def test_get_trend_week_df():
try:
controls.get_trend_week_df('sh.600000','2020-01-01','2020-07-07')
except ValueError as ex:
print(ex)
assert True
def test_get_trend_month_df():
try:
controls.get_trend_month_df('sh.600000','2020-01-01','2020-07-07')
except ValueError as ex:
print(ex)
assert True
def test_get_information():
try:
a = controls.get_information('sh.600000')
print(a)
except ValueError as ex:
print(ex)
assert ['sh.600000','浦发银行','1999-11-10','','Stock','Listed']
def test_predict_tomorrow():
try:
prediction.predict_tomorrow(df)
except ValueError as ex:
print(ex)
assert True
df = pd.read_csv("data/daily_data.csv")
@pytest.mark.parametrize('df, ma',
[
(df, [5, 10])
])
def test_plot_candlestick(df, ma):
fig = controls.plot_candlestick(df=df, ma=ma)
fig.show()
assert True
if __name__ == "__main__":
pytest.main()
|
import sqlite3
import sys
def printProfile(skypeDB):
conn = sqlite3.connect(skypeDB)
c = conn.cursor()
c.execute("SELECT datetime(timestamp,'unixepoch'),dialog_partner,author,body_xml FROM Messages;")
print ("--Found Messages--")
for row in c:
try:
if 'parlist' not in str(row[3]):
if str(row[1]) != str(row[2]):
msgDirection = "To " + str(row[1]) + ": "
else:
msgDirection = "From " + str(row[2]) + ": "
print ("Time: " + str(row[0]) + " " + msgDirection + str(row[3]))
except:
pass
def main():
skypeDB = "C:\\sqlite\\main.db"
printProfile(skypeDB)
if __name__ == "__main__":
main()
|
# 일 평균 30만주이상 거래되는 nasdaq, newyork, amex 중(step2_300k_day_coms.xlsx)
# 최근 3개월간 10%(연간 100%) 이상 상승한 종목 중(step3_3mon_10p_up.xlsx) <-- <월봉 양호한 종목 추출 목적>
# 볼린저밴드 상단 접근 종목(밴드 상단의 -20%선 이상) 중 시가가 기준선 위에 있고 종가가 상단선 80% 이상이며, 전일 시가, 종가 갭의 2배이상 상승한 종목
# 일일 1회 가동
from pandas_datareader import data as pdr
import yfinance as yf
import pandas as pd
import matplotlib.pyplot as plt
import mplfinance as mpf
import openpyxl
import datetime
import time
yf.pdr_override()
wb = openpyxl.Workbook()
now = datetime.datetime.now()
filename = datetime.datetime.now().strftime("%Y-%m-%d")
# wb.save('watch_data.xlsx')
sheet = wb.active
# cell name : date, simbol, company name, upper or lower or narrow band
sheet.append(['time', 'market', 'symbol', 'code', 'company_name', 'bol_high', 'bol_low', 'bol_gap(%)', 'rise_margin(%)', 'monthly_rise(%)', 'MA20', 'open', 'close', 'volume', 'industry', 'trade'])
wb.save('case1_bollinger_follow_'+filename+'.xlsx')
#회사 데이터 읽기
# df_com = pd.read_excel("step2_300k_day_coms.xlsx")
df_com = pd.read_excel("step3_3mon_10p_up_2021-07-08.xlsx")
i = 1
for i in range(len(df_com)):
# now = datetime.datetime.now()
df = pdr.get_data_yahoo(df_com.iloc[i]['symbol'], period = '60d')
try :
df['MA20'] = df['Close'].rolling(window=20).mean()
# df['MA60'] = df['Close'].rolling(window=60).mean()
df['stddev'] = df['Close'].rolling(window=20).std()
df['upper'] = df['MA20'] + (df['stddev']*2)
df['lower'] = df['MA20'] - (df['stddev']*2)
# df['vol_avr'] = df['Volume'].rolling(window=5).mean()
df['gap'] = df['upper'] - df['lower']
df['rise_margin'] = (df['upper'] - df['Close']) / df['Close'] * 100
# df = df[19:]
cur_close = df.iloc[-1]['Close']
cur_open = df.iloc[-1]['Open']
today_gap = cur_close - cur_open
pre_open_price = df.iloc[-2]['Open']
pre_close_price = df.iloc[-2]['Close']
pre_gap = pre_close_price - pre_open_price
# a1 = max([df.iloc[-2]['Open'], df.iloc[-2]['Close'], df.iloc[-3]['Open'], df.iloc[-3]['Close']])
# a2 = max([df.iloc[-2]['Open'], df.iloc[-2]['Close']])
# a3 = max([df.iloc[-3]['Open'], df.iloc[-3]['Close']])
# df_u = df.iloc[-1]['upper']
# df_l1 = df.iloc[-1]['lower']
# df_l2 = df.iloc[-2]['lower']
# df_l3 = df.iloc[-3]['lower']
# df_g1 = df.iloc[-1]['gap']
# df_g2 = df.iloc[-2]['gap']
# df_g3 = df.iloc[-3]['gap']
# df_v = df.iloc[-2]['vol_avr']
# print(df_com.iloc[i]['simbol'])
# print('볼린저 : ',df.iloc[-1]['MA20'], df.iloc[-1]['upper'], df.iloc[-1]['lower'])
# print('볼린저 밴드폭 : ',df.iloc[-1]['bandwidth'], '%')
if cur_close >= df.iloc[-1]['upper'] * 0.8 and cur_close > cur_open >= df.iloc[-1]['MA20'] and today_gap >= pre_gap * 2 and df.iloc[-1]['Volume'] >= 300000 :
sheet.append([now, df_com.iloc[i]['market'], df_com.iloc[i]['symbol'], df_com.iloc[i]['code'], df_com.iloc[i]['company_name'], \
df.iloc[-1]['upper'], df.iloc[-1]['lower'], df.iloc[-1]['gap'], df.iloc[-1]['rise_margin'], df_com.iloc[i]['month_rise(%)'], df.iloc[-1]['MA20'], df.iloc[-1]['Open'], \
df.iloc[-1]['Close'], df.iloc[-1]['Volume'], df_com.iloc[i]['industry'],'buy'])
wb.save('case1_bollinger_follow_'+filename+'.xlsx')
print('buy', df_com.iloc[i]['symbol'])
plt.figure(figsize=(9, 7))
plt.subplot(2, 1, 1)
plt.plot(df['upper'], color='green', label='Bollinger upper')
plt.plot(df['lower'], color='brown', label='Bollinger lower')
plt.plot(df['MA20'], color='black', label='MA20')
plt.plot(df['Close'], color='blue', label='Price')
plt.title(df_com.iloc[i]['symbol']+'stock price')
plt.xlabel('time')
plt.xticks(rotation = 45)
plt.ylabel('stock price')
plt.legend()
plt.subplot(2, 1, 2)
plt.plot(df['Volume'], color='blue', label='Volume')
plt.ylabel('Volume')
plt.xlabel('time')
plt.xticks(rotation = 45)
plt.legend()
plt.show()
df = df[['Open', 'High', 'Close', 'Volume']]
except Exception as e:
print(e)
print('error', df_com.iloc[i]['symbol'])
print(df_com.iloc[i]['symbol'])
i += 1
df_1 = pd.read_excel('case1_bollinger_follow_'+filename+'.xlsx')
# df_b_f = df_1.sort_values(by = 'rise_margin(%)', ascending= False) # gap_close_ratio(%) 기준 올림차순으로 sorting
df_b_f = df_1.sort_values(by = 'rise_margin(%)')
df_b_f.to_excel('case1_bollinger_follow_'+filename+'.xlsx')
# df_b_f.to_excel('imsi_trend_bollinger_follow_sorted_'+filename+'.xlsx')
# time.sleep(0.1)
# except Exception as e:
# print(e)
# time.sleep(0.1)
|
# this is 'The Coin Change Problem' using dynamic programming
# source hackerrank
def getways(SUM, index) :
global dp
global d
global size_of_d
if SUM < 0 or index >= size_of_d:
return 0
elif SUM == 0 :
return 1
elif dp[index][SUM]!=-1 :
return dp[index][SUM]
else :
count = getways(SUM - d[index], index)
count += getways(SUM, index + 1)
dp[index][SUM] = count
return count
SUM = int(input())
size_of_d = int(input()) #size of denomination array
d = list(map(int, input().split(' ')))
dp = [[-1 for x in range(SUM+1)] for y in range(size_of_d)]
print(getways(SUM, 0))
|
# -*- coding:gbk -*-
# auther : pdm
# email : ppppdm@gmail.com
import socket
#import time
import threading
import sys
import getopt
HOST = ''
SERVER_ADDR = 'localhost'
PORT = 4001
TOTAL_CLIENT = 65536 # default is 1000
CREATE_THREAD_SLEEP_TIME = 0.001 # default is 0.001
gConnectList = []
gClientList = []
t_ce = 0
def server():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind((HOST, PORT))
sock.listen(5)
while True:
conn, addr = sock.accept()
#print('connected by', addr)
gConnectList.append(conn)
total = len(gConnectList)
print('total conn', total)
def client():
global t_ce
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((SERVER_ADDR, PORT))
gClientList.append(sock)
#time.sleep(60)
except Exception as e:
print('client', e)
t_ce+=1
#sock.close()
#print('client exit')
def multiClient():
for i in range(TOTAL_CLIENT):
#
#new_t = threading.Thread(target=client)
#new_t.start()
#time.sleep(CREATE_THREAD_SLEEP_TIME)
# not use thread to run a client
client()
# if not use thread to run a client , close client here
while len(gClientList):
client_sock = gClientList.pop()
client_sock.close()
print('multi client exit')
print('t_ce', t_ce)
if __name__=='__main__':
try:
opts, args = getopt.getopt(sys.argv[1:], 'sc')
print(opts)
except getopt.GetoptError as err:
print(err)
for o, a in opts:
if o=='-s':
print('start server')
t_s = threading.Thread(target=server)
t_s.start()
break
if o=='-c':
print('start client')
t_mc = threading.Thread(target=multiClient)
t_mc.start()
break
|
# Copyright 2019 The LUCI Authors. All rights reserved.
# Use of this source code is governed under the Apache License, Version 2.0
# that can be found in the LICENSE file.
from recipe_engine import post_process
DEPS = [
'assertions',
'properties',
'step',
]
def RunSteps(api):
msg = api.properties.get('msg')
try:
api.assertions.assertEqual(0, 1, msg=msg)
except AssertionError as e:
api.step('AssertionError', [])
expected_message = api.properties.get('expected_message')
if expected_message:
assert str(e) == expected_message, (
'Expected AssertionError with message: %r\nactual message: %r' %
(expected_message, str(e)))
def GenTests(api):
yield api.test(
'basic',
api.post_process(post_process.MustRun, 'AssertionError'),
api.post_process(post_process.StatusSuccess),
api.post_process(post_process.DropExpectation),
)
import sys
expected_message = ('0 != 1 : 0 should be 1'
if sys.version_info.major == 3 else '0 should be 1')
yield api.test(
'custom-message',
api.properties(
msg='{first} should be {second}', expected_message=expected_message),
api.post_process(post_process.MustRun, 'AssertionError'),
api.post_process(post_process.StatusSuccess),
api.post_process(post_process.DropExpectation),
)
|
import os
import sys
import time
import serial
import serial.tools.list_ports
ports = []
ports = serial.tools.list_ports.comports()
for port in ports:
print("Find port " + port.device)
ser = serial.Serial(port.device)
if ser.isOpen():
ser.close()
ser = serial.Serial(port.device, 9600)
"""
free buffer
"""
ser.flushInput()
ser.flushOutput()
print("Connect to " + ser.name)
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
dataset= [11,10,12,14,12,15,14,13,15,102,12,14,17,19,107, 10,13,12,14,12,108,12,11,14,13,15,10,15,12,10,14,13,15,10]
# Detect outliers using z-score
outlier = []
def detect_outlier(data):
threshold = 3
mean = np.mean(data)
std = np.std(data)
for i in data:
z_score = (i - mean) / std
print(f"z_score is {z_score}")
if np.abs(z_score) > threshold:
print(f"Outlier data: {i}")
outlier.append(i)
return outlier
outlier_pts = detect_outlier(dataset)
# Detect outlier using IQR
# Steps:
# 1. sort data in ascending order
# 2. calculate q1 and q3
# 3. Find interquartile range
# 4. Find lower boundary 1.5*q1
# 5. Find upper bound 1.5*q3
# Any data point that lies outside of lower and upper bound will be considered as outlier
outlier_iqr = []
def detect_outlier_IQR(data):
sort_data = sorted(data)
q1, q3 = np.percentile(sort_data, [25, 75])
iqr = q3 - q1
iqr_lower_bound = q1 - (1.5 * iqr)
iqr_upper_bound = q3 + (1.5 * iqr)
# print(f"Data : {data}", end=" ")
# print(f"iqr_lower_bound: {iqr_lower_bound}")
# print(f"iqr_upper_bound: {iqr_upper_bound}")
for i in data:
if (i < iqr_lower_bound) | (i > iqr_upper_bound):
outlier_iqr.append(i)
return outlier_iqr
outlierByIQR = detect_outlier_IQR(dataset)
|
from .util import Xmleable, default_document
from .Party import Party
from .Accounting import AdditionalAccountID, CustomerAssignedAccountID
class AccountingCustomerParty(Xmleable):
def __init__(self, party=None, customer_assigned_account=None, additional_account=None):
self.party = party
self.customer_assigned_accountID = customer_assigned_account
self.additional_accountID = additional_account
def validate(self, errs, obs):
assert self.party is None or type(self.party) == Party
assert self.customer_assigned_accountID is None or \
type(self.customer_assigned_accountID) == CustomerAssignedAccountID
assert self.additional_accountID is None or \
type(self.additional_accountID) == AdditionalAccountID
def generate_doc(self):
self.doc = default_document.createElement(
"cac:AccountingCustomerParty")
if self.customer_assigned_accountID:
self.doc.appendChild(
self.customer_assigned_accountID.get_document())
if self.additional_accountID:
self.doc.appendChild(self.additional_accountID.get_document())
if self.party:
self.doc.appendChild(self.party.get_document())
class DeliveryCustomerParty(Xmleable):
def __init__(self, party=None, customer_assigned_account=None):
self.party = party
self.customer_assigned_accountID = customer_assigned_account
def validate(self, errs, obs):
assert self.party is None or type(self.party) == Party
assert self.customer_assigned_accountID is None or \
type(self.customer_assigned_accountID) == CustomerAssignedAccountID
def generate_doc(self):
self.doc = default_document.createElement("cac:DeliveryCustomerParty")
if self.customer_assigned_accountID:
self.doc.appendChild(
self.customer_assigned_accountID.get_document())
if self.party:
self.doc.appendChild(self.party.get_document())
|
from car_information.model.Car import Car
import os.path
import json
import unittest
class TestCar(unittest.TestCase):
def test_load_car_data(self):
a = Car()
b = Car()
data = self.get_mock_car_data()
a.load_data_from_json(json.dumps(data))
self.assertNotEqual(a, b)
def test_load_from_json(self):
path = "car_information/tests/car_data"
# path = "car_data"
data = None
try:
data = self.read_from_json(path, "XX12345")
except FileNotFoundError: # In case the test is ran from another directory
print("Didn't find file, trying backup location")
path = "car_data"
data = self.read_from_json(path, "XX12345")
a = Car()
a.load_data_from_json(json.dumps(data))
b = Car()
self.assertNotEqual(a, b)
def read_from_json(self, directory, filepath):
print(os.path.join(directory, filepath + ".json"))
with open(os.path.join(directory, filepath + ".json")) as json_file:
data = json.load(json_file)
return data
def get_mock_car_data(self):
return {
"registreringsnummer": "XX12345",
"merke": "Volkswagen",
"modell": "Caddy",
"farge": "Ukjent"
}
if __name__ == '__main__':
unittest.main()
|
'''Largest palindrome product
Problem 4
A palindromic number reads the same both ways.
The largest palindrome made from the product of two 2-digit numbers is 9009 = 91 × 99.
Find the largest palindrome made from the product of two 3-digit numbers.
'''
def is_num_palindrome(number):
return str(number) == str(number)[::-1]
def main(num1, num2, debug=False):
palindromes = {}
for num1 in range(num1,99,-1):
for num2 in range(num2,99,-1):
product = num1*num2
if debug:
print(num1,num2,product)
if (is_num_palindrome(product)):
palindromes[product] = (num1,num2)
largest_palindrome = max(palindromes.keys())
factor1 = palindromes[largest_palindrome][0]
factor2 = palindromes[largest_palindrome][1]
if debug:
print("Largest palindrome made from the product of the two 3-digit numbers:", largest_palindrome)
print("Factor 1: {}, Factor 2: {}".format(factor1, factor2))
return largest_palindrome
if __name__ == "__main__":
#Check that given case is true
assert is_num_palindrome(9009)
print(main(999, 999))
|
#!/usr/bin/env python
"""make some postprocessing plots/reports by the predictions and true labels
usage: python xgbreporter.py -d <OUTPUTDIR> [-t <TRAINDATA> -k <KEY>]
An `xgbreport.txt will be dumped under <OUTPUTDIR>, including info below:
* prediction distribution
* roc curve
* auc score
* accuracy score
* classification report
** prediction for full dataset (if TRAINDATA and KEY given)
"""
import argparse
import os
from os.path import join
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib import gridspec
from matplotlib.ticker import PercentFormatter
from sklearn.metrics import roc_curve, roc_auc_score
np.seterr(divide='ignore', invalid='ignore', over='ignore')
parser = argparse.ArgumentParser(description="Postprocessing output of xgbtrainer")
parser.add_argument("--dir", '-d', type = str, required = True, help = "training output")
parser.add_argument("--train", '-t', type = str, required = False, default = None, help = "training data")
parser.add_argument('--key', '-k', type=str, required=False, default=None, help='key for dataframe')
args = parser.parse_args()
assert (os.path.isdir(args.dir) and os.path.isfile(join(args.dir, 'xgbpredictions.h5')))
def make_prediction_distribution(trainoutdir):
print('Making xgb prediction distributions in {}'.format(trainoutdir))
from coffea import hist
datasource = join(trainoutdir, 'xgbpredictions.h5')
traindf = pd.read_hdf(datasource, 'train')
testdf = pd.read_hdf(datasource, 'test')
dataset_axis = hist.Cat('dataset', 'train/test')
label_axis = hist.Cat('label', 'S/B')
bdt_axis = hist.Bin('score', 'BDT score', 50, -10, 10)
default = hist.Hist("norm. counts", dataset_axis, label_axis, bdt_axis)
default.fill(dataset='test', label='signal (test)', score=testdf.query('y==1')['default'].values)
default.fill(dataset='test', label='background (test)', score=testdf.query('y==0')['default'].values)
default.fill(dataset='train', label='signal (train)', score=traindf.query('y==1')['default'].values)
default.fill(dataset='train', label='background (train)', score=traindf.query('y==0')['default'].values)
optimized = hist.Hist("norm. counts", dataset_axis, label_axis, bdt_axis)
optimized.fill(dataset='test', label='signal (test)', score=testdf.query('y==1')['optimized'].values)
optimized.fill(dataset='test', label='background (test)', score=testdf.query('y==0')['optimized'].values)
optimized.fill(dataset='train', label='signal (train)', score=traindf.query('y==1')['optimized'].values)
optimized.fill(dataset='train', label='background (train)', score=traindf.query('y==0')['optimized'].values)
data_err_opts = {
'linestyle':'none',
'marker': '.',
'markersize': 10.,
'elinewidth': 1,
'emarker': '_',
'markeredgecolor': 'k'
}
fill_opts = {
'edgecolor': (0,0,0,0.3),
'alpha': 0.8
}
fig, ax = plt.subplots(figsize=(8,6))
hist.plot1d(default.project('dataset', 'test'), overlay='label', ax=ax, density=True, clear=False, error_opts=data_err_opts)
hist.plot1d(default.project('dataset', 'train'), overlay='label', ax=ax, line_opts=None, clear=False, density=True, fill_opts=fill_opts)
ax.legend()
ax.autoscale(axis='y', tight=True)
ax.set_ylim(0, None);
ax.set_title('default BDT response', x=0.0, ha="left")
ax.set_xlabel(ax.get_xlabel(), x=1.0, ha="right")
ax.set_ylabel(ax.get_ylabel(), y=1.0, ha="right")
plt.savefig(join(trainoutdir, "prediction_dist_default.pdf"), bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(8,6))
hist.plot1d(optimized.project('dataset', 'test'), overlay='label', ax=ax, density=True, clear=False, error_opts=data_err_opts)
hist.plot1d(optimized.project('dataset', 'train'), overlay='label', ax=ax, line_opts=None, clear=False, density=True, fill_opts=fill_opts)
ax.legend()
ax.autoscale(axis='y', tight=True)
ax.set_ylim(0, None);
ax.set_title('optimized BDT response', x=0.0, ha="left")
ax.set_xlabel(ax.get_xlabel(), x=1.0, ha="right")
ax.set_ylabel(ax.get_ylabel(), y=1.0, ha="right")
plt.savefig(join(trainoutdir, "prediction_dist_optimized.pdf"), bbox_inches='tight')
plt.close()
def make_full_prediction_distribution(trainoutdir, trainingdata, key):
print(f"Make predictions on full training dataset: {trainingdata} and model in {trainoutdir}")
from coffea import hist
import xgboost as xgb
## full dataset
df = pd.read_hdf(trainingdata, key)
featurecols = [x for x in df.columns if x != 'label']
dfull = xgb.DMatrix(df[featurecols], label=df['label'])
## default and optimized models
xgbm_default = xgb.Booster({"nthread": 16})
xgbm_default.load_model(join(trainoutdir, "model_default/model.bin"))
xgbm_optimized = xgb.Booster({"nthread": 16})
xgbm_optimized.load_model(join(trainoutdir, "model_optimized/model.bin"))
## predictions
preds_default = xgbm_default.predict(dfull)
preds_optimized = xgbm_optimized.predict(dfull)
## making plots
label_axis = hist.Cat('label', 'S/B')
bdt_axis = hist.Bin('score', 'BDT score', 50, -10, 10)
default = hist.Hist("norm. counts", label_axis, bdt_axis)
default.fill(label='signal', score=preds_default[df['label'].values.astype(bool)])
default.fill(label='background', score=preds_default[~df['label'].values.astype(bool)])
optimized = hist.Hist("norm. counts", label_axis, bdt_axis)
optimized.fill(label='signal', score=preds_optimized[df['label'].values.astype(bool)])
optimized.fill(label='background', score=preds_optimized[~df['label'].values.astype(bool)])
fig, ax = plt.subplots(figsize=(8, 6))
hist.plot1d(default, overlay='label', ax=ax, density=True)
ax.set_ylim(0, None);
ax.set_title('default BDT response on full dataset', x=0.0, ha="left")
ax.set_xlabel(ax.get_xlabel(), x=1.0, ha="right")
ax.set_ylabel(ax.get_ylabel(), y=1.0, ha="right")
plt.savefig(join(trainoutdir, "prediction_fulldist_default.pdf"), bbox_inches='tight')
plt.close()
fig, ax = plt.subplots(figsize=(8, 6))
hist.plot1d(optimized, overlay='label', ax=ax, density=True)
ax.set_ylim(0, None);
ax.set_title('optimized BDT response on full dataset', x=0.0, ha="left")
ax.set_xlabel(ax.get_xlabel(), x=1.0, ha="right")
ax.set_ylabel(ax.get_ylabel(), y=1.0, ha="right")
plt.savefig(join(trainoutdir, "prediction_fulldist_optimized.pdf"), bbox_inches='tight')
plt.close()
class RocPlot():
def __init__(self, logscale=False, xlabel=None, ylabel=None,
xlim=None, ylim=None, rlim=None, height_ratios=[2, 1],
percentage=False, grid=False, ncol=1):
self.gs = gridspec.GridSpec(2, 1, height_ratios=height_ratios)
self.axis = plt.subplot(self.gs[0])
self.axr = plt.subplot(self.gs[1])
self.gs.update(wspace=0.025, hspace=0.075)
plt.setp(self.axis.get_xticklabels(), visible=False)
if xlim is None: xlim = (0, 1)
self.xlim_ = xlim
self.ylim_ = ylim
if xlabel is None: xlabel = 'True positive rate'
if ylabel is None: ylabel = 'False positive rate'
if percentage:
xlabel += " [%]"
ylabel += " [%]"
self.logscale_ = logscale
self.percentage_ = percentage
self.scale_ = 1 + 99 * percentage
self.axis.set_ylabel(ylabel)
self.axr.set_xlabel(xlabel)
self.axr.set_ylabel("Ratio")
self.axr.set_xlabel(self.axr.get_xlabel(), x=1.0, ha="right")
self.axis.set_ylabel(self.axis.get_ylabel(), y=1.0, ha="right")
self.axis.grid(grid, which='both', ls=':')
self.axr.grid(grid, ls=':')
self.axis.set_xlim([x * self.scale_ for x in xlim])
self.axr.set_xlim([x * self.scale_ for x in xlim])
if not ylim is None:
self.axis.set_ylim([y * self.scale_ for y in ylim])
if not rlim is None:
self.axr.set_ylim(rlim)
self.auc_ = []
self.ncol_ = ncol # legend columns
def plot(self, y_true, y_score, **kwargs):
fpr, tpr, _ = roc_curve(y_true, y_score)
self.auc_.append(roc_auc_score(y_true, y_score))
if not hasattr(self, 'fpr_ref'):
self.fpr_ref = fpr
self.tpr_ref = tpr
sel = tpr >= self.xlim_[0]
if self.logscale_:
self.axis.semilogy(tpr[sel] * self.scale_,
fpr[sel] * self.scale_,
**kwargs)
else:
self.axis.plot(tpr[sel] * self.scale_,
fpr[sel] * self.scale_,
**kwargs)
ratios = fpr / np.interp(tpr, self.tpr_ref, self.fpr_ref)
self.axr.plot(tpr[sel]*self.scale_, ratios[sel], **kwargs)
self.axis.legend(loc="upper left", ncol=self.ncol_)
if self.percentage_:
self.axis.get_yaxis().set_major_formatter(PercentFormatter(decimals=1, symbol=None))
def make_roc_curve(trainoutdir):
print('Making roc curve in {}'.format(trainoutdir))
datasource = join(trainoutdir, 'xgbpredictions.h5')
testdf = pd.read_hdf(datasource, 'test')
plt.figure(figsize=(8, 6))
roc = RocPlot(xlim=(0.6, 1), ylim=(1e-5, 1), height_ratios=[4, 1],
logscale=True, grid=True, percentage=True,
ncol=2, rlim=(0.95, 1.05))
for m in ['default', 'optimized']:
roc.plot(testdf['y'].values, testdf[m].values, label=m)
plt.savefig(join(trainoutdir, 'roccurve.pdf'), bbox_inches='tight')
plt.close()
print("Extracing working points ...")
rocstats = {
'workingpoints': ['tight', 'medium', 'loose'],
'targetfpr': [1e-4, 1e-3, 1e-2],
'fakepositiverate_default': [],
'truepositiverate_default': [],
'threshold_default': [],
'fakepositiverate_optimized': [],
'truepositiverate_optimized': [],
'threshold_optimized': [],
}
for m in ['default', 'optimized']:
fpr, tpr, thresholds = roc_curve(testdf['y'].values, testdf[m].values)
rocstats['fakepositiverate_' + m] = [fpr[fpr > t][0] for t in rocstats['targetfpr']]
rocstats['truepositiverate_' + m] = [tpr[fpr > t][0] for t in rocstats['targetfpr']]
rocstats['threshold_' + m] = [thresholds[fpr > t][0] for t in rocstats['targetfpr']]
pd.DataFrame(rocstats).to_csv(join(trainoutdir, 'rocworkingpoints.csv'))
def make_text_report(trainoutdir):
from sklearn.metrics import accuracy_score, roc_auc_score, classification_report
print("Dumping `accuracy_score`, `roc_auc_score`, and `classification_report` as text files")
datasource = join(trainoutdir, 'xgbpredictions.h5')
testdf = pd.read_hdf(datasource, 'test')
with open(join(trainoutdir, 'xgbtestreport.txt'), 'w') as outf:
for m in ['default', 'optimized']:
outf.write('**** {} ****\n'.format(m))
outf.write('accuracy score: {}\n'.format(accuracy_score(testdf['y'].values, testdf[m].values.astype(bool))))
outf.write('roc auc score: {}\n'.format(roc_auc_score(testdf['y'].values, testdf[m].values)))
outf.write('classification report:\n')
outf.write(classification_report(testdf['y'].values, testdf[m].values.astype(bool), digits=4))
outf.write('\n\n')
if __name__ == "__main__":
import time
starttime = time.time()
make_prediction_distribution(args.dir)
make_roc_curve(args.dir)
make_text_report(args.dir)
if args.train and args.key:
assert (os.path.exists(args.train))
make_full_prediction_distribution(args.dir, args.train, args.key)
print("---> Took {} s".format(time.time()-starttime)) |
# https://www.codewars.com/kata/delete-occurrences-of-an-element-if-it-occurs-more-than-n-times/train/python
def delete_nth(order,max_e):
deleted = list()
for element in order:
if deleted.count(element) < max_e:
deleted.append(element)
return deleted
|
class Solution:
def findAndReplacePattern(self, words: List[str], pattern: str) -> List[str]:
answer = []
for word in words:
check = {}
find = True
for char, pa in zip(word, pattern):
if pa not in check and char not in check.values():
check[pa] = char
else:
if check.get(pa) != char:
find = False
break
if find is True:
answer.append(word)
return answer
|
import json
import requests
class GitApi(object):
def __init__(self, user=None, passwd=None, repo=None) -> None:
super().__init__()
if user is None or passwd is None or repo is None:
raise IOError("Invalid arguments")
self.user = user
self.passwd = passwd
self.repo = repo
def get_commit(self, sha=None) -> str:
if sha is not None:
url = "https://api.github.com/repos/{user}/{repo}/commits/{sha}".format(user=self.user, passwd=self.passwd,
repo=self.repo, sha=sha)
else:
url = "https://api.github.com/repos/{user}/{repo}/commits".format(user=self.user, passwd=self.passwd,
repo=self.repo)
response = requests.get(url, auth=(self.user, self.passwd),
params={"username": self.user, "password": self.passwd})
if response.status_code == 422:
return "**Not a valid commit hash**"
elif response.status_code == 404:
return "**Repository not found**"
elif response.status_code != 200:
return "**Something went wrong _" + str(response.status_code) + "_**"
data = json.loads(response.content)
message = ""
if sha is not None:
message += "Author: **{}** ({})\n".format(data["commit"]["author"]["name"], data["author"]["login"])
message += "Message: {}\n".format(data["commit"]["message"])
message += "Date: {}\n".format(data["commit"]["author"]["date"])
message += "URL: <{}>\n".format(data["html_url"])
message += "Files changed: \n\n"
for file in data["files"]:
message += "Path: " + file["filename"] + "\n"
message += "Status: " + file["status"] + "\n"
message += "Changes: " + str(file["changes"]) + "\n\n"
return message
else:
message += "Latest 5 commits to any branch: \n\n"
for i in range(0, 5):
commit = data[i]
message += "Author: **{}** ({})\n".format(commit["commit"]["author"]["name"], commit["author"]["login"])
message += "Message: {}\n".format(commit["commit"]["message"])
message += "sha: _{}_\n".format(commit["sha"])
message += "URL: _{}_\n\n".format(commit["html_url"])
return message
def get_branch(self, branch=None) -> str:
if branch is not None:
url = "https://api.github.com/repos/{user}/{repo}/branches/{branch}".format(user=self.user, repo=self.repo,
branch=branch)
else:
url = "https://api.github.com/repos/{user}/{repo}/branches".format(user=self.user, repo=self.repo)
response = requests.get(url, auth=(self.user, self.passwd),
params={"username": self.user, "password": self.passwd})
if response.status_code == 422:
return "**Not a valid branch**"
elif response.status_code == 404:
return "**Repository not found**"
elif response.status_code != 200:
return "**Something went wrong _" + str(response.status_code) + "_**"
data = json.loads(response.content)
message = ""
if branch is None:
message += "List of all active branches:\n\n"
for b in data:
message += "Branch: **" + b["name"] + "**\n"
message += "SHA: _" + b["commit"]["sha"] + "_\n\n"
return message
else:
message += "Branch: **" + data["name"] + "**\n"
message += "Latest commit: **" + data["commit"]["commit"]["message"] + "**\n"
message += "SHA: _" + data["commit"]["sha"] + "_\n"
message += "URL: _" + data["commit"]["html_url"] + "_ \n\n"
return message
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# File Name : sql.py
'''Purpose : Intro sth '''
# Creation Date : 1435545244
# Last Modified :
# Release By : Doom.zhou
###############################################################################
from flask import Flask, g
import sqlite3
app = Flask(__name__)
def connect_db():
"""Returns a new connection to the sqlite database"""
return sqlite3.connect('doom.db', detect_types=sqlite3.PARSE_DECLTYPES)
def query_db(query, args=(), one=False):
"""Query database returning dictionary"""
cur = g.db.execute(query, args)
rv = [dict(
(cur.description[idx][0], value)
for idx, value in enumerate(row)) for row in cur.fetchall()
]
return (rv[0] if rv else None) if one else rv
@app.before_request
def before_request():
g.db = connect_db()
@app.after_request
def after_request(response):
g.db.close()
return response
@app.route('/')
def index():
sumstr = ''
results = g.db.execute('select username, login_time, logout_time \
from vpns order by login_time desc limit 12').fetchall()
for i, result in enumerate(results):
print(result)
return sumstr
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5001)
|
file = open("lista.txt", "a+")
add_student = True
print("Podaj studentów do wczytania:")
students = []
while(add_student == True):
print("Imie:")
name = input()
print("Nazwisko:")
surname = input()
print("Grupa:")
group = input()
student_data=(name, surname, group)
students.append(student_data)
print("Czy chcesz dodać kolejnego studenta? Wpisz tak/nie")
dec = input()
if dec == "nie":
add_student = False
for s in students:
student = ",".join(s)
file.write(student)
file.write("\n")
file.close()
|
#! /usr/bin/env python
"""
Date: 2023/03/27
Author: Ziwei Pan
Extract per-read CpG information from Guppy modified-base BAM
Input: Guppy modified-base BAM is 0-based file.
Output: Per-read file:
"""
import argparse
import csv
import os
import pysam
from Bio.Seq import reverse_complement
from modbampy import ModBam
def process_ref(ref_fasta):
chr_list = []
# Read reference
reference_fasta = pysam.FastaFile(ref_fasta)
# modbam needs to get all contig lengths first
tmp_bam = pysam.AlignmentFile(input_bam, "rb")
for i in range(tmp_bam.nreferences):
chr_name = tmp_bam.get_reference_name(i)
chr_size = tmp_bam.get_reference_length(chr_name)
chr_list.append([chr_name, chr_size])
return reference_fasta, chr_list
def process_modbam2bed(input_bam, chr_list, reference_fasta, output_bed, canon_threshold, mod_threshold):
with open(output_bed, 'w') as output_bed:
output_bed = csv.writer(output_bed, delimiter='\t', quoting=csv.QUOTE_NONE)
# output header
output_bed.writerow(['ID', 'Chr', 'Pos', 'Strand', 'Prediction', 'Prob_methylation'])
# https://github.com/epi2me-labs/modbam2bed
for chr_name, chr_size in chr_list:
with ModBam(input_bam) as bam:
# iterates the BAM by read
for read in bam.reads(chr_name, 0, int(chr_size)):
read_data = []
if read.is_unmapped or read.is_secondary:
continue
else:
for pos_mod in read.mod_sites:
# Check modbampy part in https://github.com/epi2me-labs/modbam2bed
read_id, ref_pos, read_pos, ref_strand, mod_strand, canon_base, mod_base, mod_score = pos_mod
prob_mod = round(mod_score / 255, 3)
## Ignore the read that doesn't have lignment (ref_pos=-1)
# https://github.com/epi2me-labs/modbam2bed/issues/16
if ref_pos == -1:
continue
### Filter out CpG sites
if ref_strand == "+":
try:
reference_base = reference_fasta.fetch(chr_name, ref_pos, ref_pos + 2).upper()
except:
continue
elif ref_strand == "-":
# For "-" strand, get the reverse complement of the reference sequence
try:
reference_base = reverse_complement(
reference_fasta.fetch(chr_name, ref_pos - 1, ref_pos + 1)).upper()
except:
continue
else:
reference_base = "NANA"
print("ref_strand is not correct!")
if reference_base in ['CG' or 'GC']:
if prob_mod < canon_threshold: # Extract canon_read and label as 0
# read_data.append([chr_name, ref_pos, ref_pos + 1, read_id, prob_mod, ref_strand, 0])
meth_state = 0
elif prob_mod >= mod_threshold: # Extract mod_read and label as 1
# read_data.append([chr_name, ref_pos, ref_pos + 1, read_id, prob_mod, ref_strand, 1])
meth_state = 1
else:
continue
read_data.append([read_id, chr_name, ref_pos + 1, ref_strand, meth_state, prob_mod])
else:
continue
for read in read_data:
output_bed.writerow(read)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Convert Guppy modified-base BAM to per-read results')
parser.add_argument("-i", "--input_file", action="store", type=str,
help='input BAM file after Guppy modified base calling', required=True)
parser.add_argument('-o', '--output_bed_file', action="store", type=str,
help='output BED file', required=True)
parser.add_argument('-r', '--reference_fasta', action="store", type=str,
help='reference fasta', required=True)
parser.add_argument('-a', '--canon_threshold', action="store", type=float, default=0.33,
help='Bases with mod. probability < THRESHOLD are counted as canonical (default 0.33)',
required=False)
parser.add_argument('-b', '--mod_threshold', action="store", type=float, default=0.66,
help='Bases with mod. probability >= THRESHOLD are counted as modified (default 0.66)',
required=False)
args = parser.parse_args()
input_bam = os.path.abspath(args.input_file)
output_bed = os.path.abspath(args.output_bed_file)
ref_fasta = os.path.abspath(args.reference_fasta)
canon_threshold = args.canon_threshold
mod_threshold = args.mod_threshold
print("Loading reference genome...")
chr_list = process_ref(ref_fasta)
reference_fasta, chr_list = process_ref(ref_fasta)
print("Process modbam...")
process_modbam2bed(input_bam, chr_list, reference_fasta, output_bed, canon_threshold, mod_threshold)
|
from abc import ABC, abstractmethod
from enum import Enum
from typing import List, Callable
import numpy as np
class ColorSpace(Enum):
RGB = 1
HSV = 2
BGR = 3
GRAY = 4
class VideoFormat(Enum):
MOV = 1
WMV = 2
OGG = 3
AVI = 4
FLV = 5
MP4 = 6
MPEG = 7
class VideoMetaInfo:
"""
Data model used for storing video related information
# TODO: This is database metadata. Need to discuss what goes in here
Arguments:
file (str): path where the video is stored
fps (int): Frames per second in the video
c_format (VideoFormat): Video File Format
"""
def __init__(self, file, fps, c_format):
self._fps = fps
self._file = file
self._c_format = c_format
@property
def file(self):
return self._file
@property
def fps(self):
return self._fps
@property
def c_format(self):
return self._c_format
class FrameInfo:
"""
Data model contains information about the frame
Arguments:
height (int)(default: -1): Height of the image : left as -1 when the height of the frame is not required
width (int)(default: -1): Width of the image : left as -1 when the height of the frame is not required
channels (int)(default: 3): Number of input channels in the video
color_space (ColorSpace)(default: ColorSpace.RGB): color space of the frame (RGB, HSV, BGR, GRAY)
"""
def __init__(self, height=-1, width=-1, channels=3, color_space=ColorSpace.RGB):
self._color_space = color_space
self._width = width
self._height = height
self._channels = channels
@property
def width(self):
return self._width
@property
def height(self):
return self._height
@property
def color_space(self):
return self._color_space
@property
def channels(self):
return self._channels
def __eq__(self, other):
return self.color_space == other.color_space and \
self.width == other.width and \
self.height == other.height and \
self.channels == other.channels
class Frame:
"""
Data model used for storing video frame related information
Arguments:
index (int): The index of the frame in video
data (numpy.ndarray): Frame object from video
info (FrameInfo): Contains properties of the frame
"""
def __init__(self, index, data, info):
self._data = data
self._index = index
self._info = info
@property
def data(self):
return self._data
@property
def index(self):
return self._index
@property
def info(self):
return self._info
def __eq__(self, other):
return self.index == other.index and \
np.array_equal(self.data, other.data) and \
self.info == other.info
class FrameBatch:
"""
Data model used for storing a batch of frames
Arguments:
frames (List[Frame]): List of video frames
info (FrameInfo): Information about the frames in the batch
outcomes (Dict[str, List[BasePrediction]]): outcomes of running a udf with name 'x' as key
"""
def __init__(self, frames, info, outcomes=None):
super().__init__()
if outcomes is None:
outcomes = dict()
self._info = info
self._frames = tuple(frames)
self._batch_size = len(frames)
self._outcomes = outcomes
@property
def frames(self):
return self._frames
@property
def info(self):
return self._info
@property
def batch_size(self):
return self._batch_size
def frames_as_numpy_array(self):
return np.array([frame.data for frame in self.frames])
def __eq__(self, other):
return self.info == other.info and \
self.frames == other.frames and \
self._outcomes == other._outcomes
def set_outcomes(self, name, predictions: 'BasePrediction'):
"""
Used for storing outcomes of the UDF predictions
Arguments:
name (str): name of the UDF to which the predictions belong to
predictions (BasePrediction): Predictions/Outcome after executing the UDF on prediction
"""
self._outcomes[name] = predictions
def get_outcomes_for(self, name: str) -> List['BasePrediction']:
"""
Returns names corresponding to a name
Arguments:
name (str): name of the udf on which predicate is being executed
Returns:
List[BasePrediction]
"""
return self._outcomes.get(name, [])
def _get_frames_from_indices(self, required_frame_ids):
# TODO: Implement this using __getitem__
new_frames = [self.frames[i] for i in required_frame_ids]
new_batch = FrameBatch(new_frames, self.info)
for key in self._outcomes:
new_batch._outcomes[key] = [self._outcomes[key][i] for i in required_frame_ids]
return new_batch
def __getitem__(self, indices) -> 'FrameBatch':
"""
Takes as input the slice for the list
Arguments:
item (list or Slice):
:return:
"""
if type(indices) is list:
return self._get_frames_from_indices(indices)
elif type(indices) is slice:
start = indices.start if indices.start else 0
end = indices.stop if indices.stop else len(self.frames)
if end < 0:
end = len(self.frames) + end
step = indices.step if indices.step else 1
return self._get_frames_from_indices(range(start, end, step))
class Point:
"""
Data model used for storing the point in coordinate space
Arguments:
x (int): x coordinate
y (int): y coordinate
"""
def __init__(self, x, y):
self._y = y
self._x = x
@property
def x(self):
return self._x
@property
def y(self):
return self._y
def __eq__(self, other):
return self.x == other.x and \
self.y == other.y
class BoundingBox:
"""
Data model used for storing bounding box
Arguments:
top_left (Point): Top left point of the bounding box
bottom_right (Point): Bottom right point of the bounding box
"""
def __init__(self, top_left: Point, bottom_right: Point):
self._bottom_right = bottom_right
self._top_left = top_left
@property
def bottom_right(self):
return self._bottom_right
@property
def top_left(self):
return self._top_left
def __eq__(self, other):
return self.bottom_right == other.bottom_right and \
self.top_left == other.top_left
class BasePrediction(ABC):
"""Base class for any type of prediction from model"""
@abstractmethod
def eq(self, element) -> bool:
"""
Checks if prediction is equal to the element
Arguments:
element (object): Check if element is equivalent
Returns:
bool (True if equal else False)
"""
pass
@abstractmethod
def contains(self, element) -> bool:
"""
Checks if the prediction contains the element
Arguments:
element (object): Element to be checked
Returns:
bool (True if contains else False)
"""
pass
@abstractmethod
def has_one(self, elements: List[object]) -> bool:
"""
This method is used for defining the 'IN' operation
Arguments:
elements (List[object]): if the predictions are in the given list
Returns:
bool
"""
pass
class Prediction(BasePrediction):
"""
Data model used to store the predicted values of the model
Arguments:
frame (Frame): Frame in which the predictions are made
"""
def __init__(self, frame: Frame, labels: List[str], scores: List[float], boxes: List[BoundingBox] = None):
self._boxes = boxes
self._labels = labels
self._frame = frame
self._scores = scores
@property
def boxes(self):
return self._boxes
@property
def labels(self):
return self._labels
@property
def frame(self):
return self._frame
@property
def scores(self):
return self._scores
@staticmethod
def predictions_from_batch_and_lists(batch: FrameBatch, predictions: List[List[str]],
scores: List[List[float]], boxes: List[List[BoundingBox]] = None):
"""
Factory method for returning a list of Prediction objects from identified values
Arguments:
batch (FrameBatch): frame batch for which the predictions belong to
predictions (List[List[str]]): List of prediction labels per frame in batch
scores (List[List[float]]): List of prediction scores per frame in batch
boxes (List[List[BoundingBox]]): List of bounding boxes associated with predictions
Returns:
List[Prediction]
"""
assert len(batch.frames) == len(predictions)
assert len(batch.frames) == len(scores)
if boxes is not None:
assert len(batch.frames) == len(boxes)
predictions_ = []
for i in range(len(batch.frames)):
prediction_boxes = boxes[i] if boxes is not None else None
predictions_.append(Prediction(batch.frames[i], predictions[i], scores[i], boxes=prediction_boxes))
return predictions_
def __eq__(self, other):
return self.boxes == other.boxes and \
self.frame == other.frame and \
self.scores == other.scores and \
self.labels == other.labels
def eq(self, element) -> bool:
return self.contains(element)
def has_one(self, element: List[object]) -> bool:
pass
def contains(self, element) -> bool:
return element in self.labels
class Predicate:
"""
Used for representing the predicates in case of filter operation.
Arguments:
name (str): Name of the field on which predicate needs to be executed
predicate (lambda)
"""
def __init__(self, name, predicate: Callable[[BasePrediction], bool]):
self._name = name
self._predicate = predicate
@property
def name(self):
return self._name
def __call__(self, prediction: BasePrediction):
return self._predicate(prediction)
|
# pacmanAgents.py
# ---------------
# Licensing Information: You are free to use or extend these projects for
# educational purposes provided that (1) you do not distribute or publish
# solutions, (2) you retain this notice, and (3) you provide clear
# attribution to UC Berkeley, including a link to
# http://inst.eecs.berkeley.edu/~cs188/pacman/pacman.html
#
# Attribution Information: The Pacman AI projects were developed at UC Berkeley.
# The core projects and autograders were primarily created by John DeNero
# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).
# Student side autograding was added by Brad Miller, Nick Hay, and
# Pieter Abbeel (pabbeel@cs.berkeley.edu).
#
# Some modifications were made to this file by Kristina Striegnitz
# (striegnk@union.edu).
from pacman import Directions
from game import Agent
import random
DIRECTION_LIST = [Directions.WEST, Directions.EAST, Directions.NORTH, Directions.SOUTH]
class GoWestAgent(Agent):
"""An agent that goes West until it can't."""
def getAction(self, game_state):
"The agent receives a GameState (defined in pacman.py)."
if Directions.WEST in game_state.getLegalPacmanActions():
return Directions.WEST
else:
return Directions.STOP
class LeftTurnAgent(Agent):
"""An agent that turns left at every opportunity"""
def getAction(self, game_state):
legal = game_state.getLegalPacmanActions()
current = game_state.getPacmanState().getDirection()
if current == Directions.STOP:
current = Directions.NORTH
if Directions.LEFT[current] in legal:
return Directions.LEFT[current]
elif current in legal:
return current
elif Directions.RIGHT[current] in legal:
return Directions.RIGHT[current]
elif Directions.REVERSE[current] in legal:
return Directions.REVERSE[current]
else:
return Directions.STOP
class RectangularRoomCleaner(Agent):
"""
A simple-reflex agent that will east an entire rectangular room. Assumes that there are no obstacles.
"""
def getAction(self, game_state):
legal = game_state.getLegalPacmanActions()
current = game_state.getPacmanState().getDirection()
left = Directions.LEFT[current]
right = Directions.RIGHT[current]
if current == Directions.STOP:
moves = list(filter(lambda move: move in legal, DIRECTION_LIST))
current = moves[0] if moves else current
if current == Directions.SOUTH:
# Turn east after hitting west wall
if left in legal and right not in legal:
return left
# Turn west after hitting east wall
elif left not in legal and right in legal:
return right
if current not in legal:
# Always turn south when hitting a wall
if left in legal and right in legal:
if current == Directions.WEST:
return left
else:
return right
# Turn or reverse when hitting a corner
elif left in legal:
return left
elif right in legal:
return right
return Directions.REVERSE[current]
else:
# Go straight if possible
return current
class RandomizedRoomCleaner(Agent):
"""
A randomized simple-reflex agent. Continues straight with a 50% chance as long as going straight is legal. Else,
it randomly picks between the remaining legal moves without stopping.
"""
def getAction(self, game_state):
legal = game_state.getLegalPacmanActions()
legal.remove(Directions.STOP)
# Stop if we have no moves
if not legal:
return Directions.STOP
# Continue straight with 50% chance as long as it is legal
current = game_state.getPacmanState().getDirection()
if current != Directions.STOP and bool(random.getrandbits(1)) and current in legal:
return current
# Randomly choose between legal moves. We will have at least one!
return random.choice(legal)
class ModelBasedRoomCleaner(Agent):
"""
A model-based reflex agent that traverses the room in a depth-first pattern.
"""
movements_x = {
Directions.NORTH: 0,
Directions.SOUTH: 0,
Directions.EAST: 1,
Directions.WEST: -1,
Directions.STOP: 0
}
movements_y = {
Directions.NORTH: 1,
Directions.SOUTH: -1,
Directions.EAST: 0,
Directions.WEST: 0,
Directions.STOP: 0
}
def __init__(self, index=0):
super().__init__(index)
self.x = 0
self.y = 0
self.explored = set()
self.moves = []
def getAction(self, game_state):
legal = game_state.getLegalPacmanActions()
legal.remove(Directions.STOP)
unexplored = list(filter(lambda move: not self.is_explored(move), legal))
if unexplored:
action = unexplored.pop()
self.update_model(action)
else:
action = Directions.REVERSE[self.moves.pop()]
self.update_model(action, backtrack=True)
return action
def update_model(self, action, backtrack=False):
self.explored.add((self.x, self.y))
self.x += ModelBasedRoomCleaner.movements_x[action]
self.y += ModelBasedRoomCleaner.movements_y[action]
if not backtrack:
self.moves.append(action)
def is_explored(self, action):
x = self.x + ModelBasedRoomCleaner.movements_x[action]
y = self.y + ModelBasedRoomCleaner.movements_y[action]
return (x, y) in self.explored
|
from django.db import models
from django.contrib.auth.models import User
class Event(models.Model):
title = models.CharField(max_length = 100)
description = models.TextField()
location = models.CharField(max_length = 60)
datetime = models.DateTimeField()
seats = models.PositiveIntegerField()
owner = models.ForeignKey(User , on_delete= models.CASCADE)
|
import json
import unittest
import responses
import htq
from htq import service
from htq.db import get_redis_client
from requests.utils import parse_header_links as phl
url = 'http://localhost/'
client = get_redis_client()
app = service.app.test_client()
def parse_header_links(value):
_links = phl(value)
links = {}
for l in _links:
key = l.get('rel') or l.get('url')
links[key] = l['url']
return links
class TestCase(unittest.TestCase):
def setUp(self):
htq.flush()
responses.add(responses.GET,
url=url,
body='{"ok": 1}',
status=200,
content_type='application/json')
responses.add(responses.DELETE,
url=url,
status=204)
responses.add(responses.POST,
url=url,
status=201)
@responses.activate
def test_root(self):
resp = app.get('/')
self.assertIn('Link', resp.headers)
@responses.activate
def test_send(self):
resp = app.post('/', data=json.dumps({
'url': url,
}), headers={'content-type': 'application/json'})
# Redirect
self.assertEqual(resp.status_code, 303)
resp = app.get(resp.location)
self.assertEqual(resp.status_code, 200)
self.assertIn('status', json.loads(resp.data.decode('utf8')))
@responses.activate
def test_status(self):
resp = app.post('/', data=json.dumps({
'url': url,
}), headers={'content-type': 'application/json'})
resp = app.get(resp.location + 'status/')
self.assertEqual(resp.status_code, 200)
self.assertEqual(json.loads(resp.data.decode('utf8')),
{'status': 'queued'})
@responses.activate
def test_response(self):
resp = app.post('/', data=json.dumps({
'url': url,
}), headers={'content-type': 'application/json'})
location = resp.location
# Receive response..
htq.receive(htq.pop())
resp = app.get(location)
self.assertIn('response', resp.headers['Link'])
links = parse_header_links(resp.headers['Link'])
response_url = links['response']
resp = app.get(response_url)
self.assertEqual(resp.status_code, 200)
resp = app.delete(response_url)
self.assertEqual(resp.status_code, 204)
resp = app.delete(response_url)
self.assertEqual(resp.status_code, 404)
@responses.activate
def test_cancel(self):
resp = app.post('/', data=json.dumps({
'url': url,
}), headers={'content-type': 'application/json'})
location = resp.location
resp = app.delete(location)
self.assertEqual(resp.status_code, 204)
resp = app.get(location)
data = json.loads(resp.data.decode('utf8'))
self.assertEqual(data['status'], htq.CANCELED)
|
# -*- coding: utf-8 -*-
import re
from xkeysnail.transform import *
# [Global modemap] Change modifier keys as in xmodmap
define_modmap({
Key.MUHENKAN: Key.LEFT_META,
Key.HENKAN: Key.RIGHT_CTRL,
})
# [Conditional modmap] Change modifier keys in certain applications
# define_conditional_modmap(re.compile(r'Emacs'), {
# Key.RIGHT_CTRL: Key.ESC,
# })
# [Multipurpose modmap] Give a key two meanings. A normal key when pressed and
# released, and a modifier key when held down with another key. See Xcape,
# Carabiner and caps2esc for ideas and concept.
# define_multipurpose_modmap(
# Enter is enter when pressed and released. Control when held down.
# {Key.ENTER: [Key.ENTER, Key.RIGHT_CTRL]}
# Capslock is escape when pressed and released. Control when held down.
# {Key.CAPSLOCK: [Key.ESC, Key.LEFT_CTRL]
# To use this example, you can't remap capslock with define_modmap.
# )
define_keymap(None, { # 特定のアプリのみ有効にしたい場合はNoneではなくre.compile("Firefox|Google-chrome")など
K("C-left_brace"): K("esc"),
K("C-h"): K("backspace"),
K("Super-space"): K("right_meta"), # gnome-tweaksで右METAをアクティビティのショートカットにしてある前提
K("Super-e"): launch(["nautilus"]),
K("Super-q"): K("M-f4"),
K("Super-t"): launch(["gnome-terminal"]),
# modifierをスルーする指定はできないっぽい?とりあえず羅列
K("Super-h"): K("left"),
K("Super-j"): K("down"),
K("Super-k"): K("up"),
K("Super-l"): K("right"),
K("Super-Shift-h"): K("Shift-left"),
K("Super-Shift-j"): K("Shift-down"),
K("Super-Shift-k"): K("Shift-up"),
K("Super-Shift-l"): K("Shift-right"),
K("Super-M-h"): K("M-left"),
K("Super-M-j"): K("M-down"),
K("Super-M-k"): K("M-up"),
K("Super-M-l"): K("M-right"),
# サンプルよりその他の書き方
# K("C-o"): [K("C-a"), K("C-c"), launch(["gedit"]), sleep(0.5), K("C-v")]
# K("C-x"): {
# K("h"): [K("C-home"), K("C-a"), set_mark(True)],
# K("C-g"): pass_through_key,
# }
}, "Global")
|
import random
f = open("palavras.txt", "rt")
bank = f.readlines()
palavra = bank[random.randint(0, len(bank))].strip('\n')
print(palavra)
'''string = []
cont = 0
for c in palavra:
string.append(c)
cont += 1
print(string)
if 'w' in palavra:
print('Sim')
else:
print("Não")
print(cont)
print(len(palavra))
#for i in palavra:
# print('_', end = ' ') '''
print(list(palavra))
'''string[2] = "A"
print(string.index("A"))
print(string)
string1 = string
if string == string1:
print("YES")'''
palavra_aux = ['-', '-','-', '-','-', '-']
letter = "a"
for i, j in enumerate(palavra):
if j == letter:
print(palavra[i])
palavra_aux[i] = letter
print(palavra_aux)
|
'''
Created on Nov 30, 2016
@author: Yuval Pinter
'''
from __future__ import division
import unittest
from evaluate_morphotags import Evaluator
from utils import split_tagstring
from numpy import nan
from numpy.testing.utils import assert_almost_equal
from numpy.testing.utils import assert_equal
class Test(unittest.TestCase):
def testEval(self):
eval1 = Evaluator(m = 'att')
eval2 = Evaluator(m = 'att_val')
eval3 = Evaluator(m = 'exact')
with open('simple_morpho_eval_test.txt', 'r') as sample_file:
for l in sample_file.readlines():
if not l.startswith('#'):
g, o = map(split_tagstring, l.split('\t'))
eval1.add_instance(g, o)
eval2.add_instance(g, o)
eval3.add_instance(g, o)
assert_almost_equal(eval1.mic_f1(), 5/9)
assert_almost_equal(eval1.mac_f1(), 13/30)
assert_almost_equal(eval1.mic_f1(att = "Number"), 2/5)
assert_equal(eval1.mac_f1(att = "Number"), nan)
assert_almost_equal(eval2.mic_f1(), 5/9)
assert_almost_equal(eval2.mac_f1(), 29/70)
assert_almost_equal(eval2.mac_f1(att = "Number"), 1/4)
assert_almost_equal(eval3.acc(), 1/4)
if __name__ == "__main__":
unittest.main()
|
def solution(n):
num = ['1','2','4']
division = []
if n < 4 : return num[n%3-1]
while(n>3):
r = n % 3
division.append(r)
n = int(n/3)
if n<4 : division.append(n)
answer = ''
while(division):
d = division.pop()
answer += num[d-1]
return answer
print(solution(4)) |
import torch
import torch.nn.functional as F
from torch.optim import Adam
from SAC.models import GaussianPolicy, QNetwork
device = 'cuda' if torch.cuda.is_available() else 'cpu'
GAMMA = 0.99
TAU = 0.005
lr = 3e-4
HIDDEN_SIZE = 256
def soft_update(target, source, tau):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(target_param.data * (1.0 - tau) + param.data * tau)
def hard_update(target, source):
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(param.data)
class SAC(object):
def __init__(self, input_size, action_space, alpha=0.2, auto_e_tune=False):
self.alpha = alpha
self.target_update_interval = 1
self.automatic_entropy_tuning = auto_e_tune
self.critic = QNetwork(input_size, action_space.shape[0], HIDDEN_SIZE).to(device)
self.critic_optim = Adam(self.critic.parameters(), lr=lr)
self.critic_target = QNetwork(input_size, action_space.shape[0], HIDDEN_SIZE).to(device)
hard_update(self.critic_target, self.critic)
# Target Entropy = −dim(A)
if self.automatic_entropy_tuning is True:
self.target_entropy = -torch.prod(torch.Tensor(action_space.shape).to(device)).item()
self.log_alpha = torch.zeros(1, requires_grad=True, device=device)
self.alpha_optim = Adam([self.log_alpha], lr=lr)
self.policy = GaussianPolicy(input_size, action_space.shape[0], HIDDEN_SIZE, action_space).to(device)
self.policy_optim = Adam(self.policy.parameters(), lr=lr)
def select_action(self, state, evaluate=False):
state = torch.FloatTensor(state).unsqueeze(0).to(device)
if evaluate is False:
action, _, _ = self.policy.sample(state)
else:
_, _, action = self.policy.sample(state)
return action.detach().cpu().numpy()[0]
def update_parameters(self, memory, batch_size, updates):
# Sample a batch from memory
state_batch, action_batch, reward_batch, next_state_batch, mask_batch = memory.sample(batch_size=batch_size)
state_batch = torch.FloatTensor(state_batch).to(device)
next_state_batch = torch.FloatTensor(next_state_batch).to(device)
action_batch = torch.FloatTensor(action_batch).to(device)
reward_batch = torch.FloatTensor(reward_batch).to(device).unsqueeze(1)
mask_batch = torch.FloatTensor(mask_batch).to(device).unsqueeze(1)
with torch.no_grad():
next_state_action, next_state_log_pi, _ = self.policy.sample(next_state_batch)
qf1_next_target, qf2_next_target = self.critic_target(next_state_batch, next_state_action)
min_qf_next_target = torch.min(qf1_next_target, qf2_next_target) - self.alpha * next_state_log_pi
next_q_value = reward_batch + mask_batch * GAMMA * min_qf_next_target
qf1, qf2 = self.critic(state_batch, action_batch)
# Two Q-functions to mitigate positive bias in the policy improvement step
qf1_loss = F.mse_loss(qf1, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf2_loss = F.mse_loss(qf2, next_q_value) # JQ = 𝔼(st,at)~D[0.5(Q1(st,at) - r(st,at) - γ(𝔼st+1~p[V(st+1)]))^2]
qf_loss = qf1_loss + qf2_loss
self.critic_optim.zero_grad()
qf_loss.backward()
self.critic_optim.step()
pi, log_pi, _ = self.policy.sample(state_batch) # f(εt;st)
qf1_pi, qf2_pi = self.critic(state_batch, pi)
min_qf_pi = torch.min(qf1_pi, qf2_pi)
policy_loss = ((self.alpha * log_pi) - min_qf_pi).mean()
# Jπ = 𝔼st∼D,εt∼N[α * logπ(f(εt;st)|st) − Q(st,f(εt;st))]
self.policy_optim.zero_grad()
policy_loss.backward()
self.policy_optim.step()
if self.automatic_entropy_tuning:
alpha_loss = -(self.log_alpha * (log_pi + self.target_entropy).detach()).mean()
self.alpha_optim.zero_grad()
alpha_loss.backward()
self.alpha_optim.step()
self.alpha = self.log_alpha.exp()
if updates % self.target_update_interval == 0:
soft_update(self.critic_target, self.critic, TAU)
return self.alpha
def save_model(self, env_name, model_path=None):
if model_path is None:
model_path = 'trained_models/' + env_name + '_model.pth.tar'
torch.save({'actor': self.policy.state_dict(), 'critic': self.critic.state_dict()}, model_path)
def load_model(self, model_path):
self.policy.load_state_dict(torch.load(model_path)['actor'])
self.critic.load_state_dict(torch.load(model_path)['critic'])
|
# https://leetcode.com/problems/keyboard-row/
class Solution:
r1 = ['Q','W','E','R','T','Y','U','I','O','P']
r2 = ['A','S','D','F','G','H','J','K','L',]
r3 = ['Z','X','C','V','B','N','M']
def check(self, word, row):
for x in word:
if x not in row:
return False
return True
def findWords(self, words: List[str]) -> List[str]:
ans = []
for w in words:
if self.check(w.upper(), self.r1) or self.check(w.upper(), self.r2) or self.check(w.upper(), self.r3):
ans.append(w)
return ans |
class TestOverload:
def __init__(self):
print('inside init')
def __init__(self, name):
print(f'name:{name}')
def __init__(self, name, age):
print(f"n:{name}, a:{age}")
# i = TestOverload()
# print(i)
# Traceback (most recent call last):
# File "C:/Users/DattatrayaTembare/PycharmProjects/python-examples/src/oop/class_overloading.py", line 12, in <module>
# i = TestOverload()
# TypeError: __init__() missing 2 required positional arguments: 'name' and 'age'
# o = TestOverload("Datta")
# print(o)
# Traceback (most recent call last):
# File "C:/Users/DattatrayaTembare/PycharmProjects/python-examples/src/oop/class_overloading.py", line 9, in <module>
# o = TestOverload("Datta")
# TypeError: __init__() missing 1 required positional argument: 'age'
o = TestOverload("Datta", 40)
print(o)
# n:Datta, a:40
# <__main__.TestOverload object at 0x0000015E9C4C84A8>
class NewClass:
def m1(self, m):
print(m)
def m1(self, m, l):
print(m, l)
n = NewClass()
# print(n.m1(1))
# Traceback (most recent call last):
# File "C:/Users/DattatrayaTembare/PycharmProjects/python-examples/src/oop/class_overloading.py", line 35, in <module>
# print(n.m1(1))
# TypeError: m1() missing 1 required positional argument: 'l'
print(n.m1(1,2))
# <__main__.TestOverload object at 0x000002370517C5C0>
# 1 2
# None |
# cook your dish here
t=int(input())
for i in range(t):
w,s=map(str,input().split())
days=["mon", "tues", "wed", "thurs", "fri", "sat", "sun"]
ans=[int(w)//7]*7
j=0
while days[j]!=s:
j+=1
extra=int(w)%7
while extra>0:
if j<7:
ans[j]+=1
j+=1
extra-=1
else:
j=0
ans[j]+=1
j+=1
extra-=1
print(*ans) |
import vcenter
# Connect to Vcenter Server
vcenter.connect()
# Get the current Date from the Vcenter server
vcenter.currentDate()
# Crete Vm_template
#Disconnect from Vcenter
vcenter.disConnect()
|
""" logging_utils.py
Utility functions for logging experiments to CometML and TensorBoard
Collaboratively developed
by Avi Schwarzschild, Eitan Borgnia,
Arpit Bansal, and Zeyad Emam.
Developed for DeepThinking project
October 2021
"""
import os
# Ignore statements for pylint:
# Too many branches (R0912), Too many statements (R0915), No member (E1101),
# Not callable (E1102), Invalid name (C0103), No exception (W0702),
# Too many local variables (R0914), Missing docstring (C0116, C0115).
# pylint: disable=R0912, R0915, E1101, E1102, C0103, W0702, R0914, C0116, C0115
def get_dirs_for_saving(args):
# generate string for saving, to be used below when saving checkpoints and stats
base_dir = f"{args.model}_{args.optimizer}" \
f"_train_mode={args.train_mode}" \
f"_width={args.width}" \
f"_max_iters={args.max_iters}" \
f"_alpha={args.alpha}" \
f"_lr={args.lr}" \
f"_batchsize={args.train_batch_size}" \
f"_epoch={args.epochs - 1}"
checkpoint = os.path.join("checkpoints", args.output, base_dir)
result = os.path.join("results", args.output, base_dir, args.run_id)
for path in [checkpoint, result]:
if not os.path.isdir(path):
os.makedirs(path)
return checkpoint, result
def write_to_tb(stats, stat_names, epoch, writer):
for name, stat in zip(stat_names, stats):
stat_name = os.path.join("val", name)
writer.add_scalar(stat_name, stat, epoch)
|
# Generated by Django 2.2 on 2019-07-14 08:33
import datetime
from django.db import migrations, models
from django.utils.timezone import utc
class Migration(migrations.Migration):
dependencies = [
('hosts_manager', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='host',
name='password',
field=models.CharField(default=datetime.datetime(2019, 7, 14, 8, 33, 25, 119302, tzinfo=utc), max_length=200),
preserve_default=False,
),
migrations.AlterField(
model_name='host',
name='port',
field=models.IntegerField(default=22),
),
]
|
class BankAccount:
def __init__(self, name, acct_num, int_rate, balance):
self.name = name
self.acct_num = acct_num
self.int_rate = .05
self.account_balance = 0
def deposit(self, amount):
self.account_balance += amount
return self
def withdraw(self, amount):
if self.account_balance <= 0:
fee = 5
self.account_balance -= fee
print("Insufficient Funds: Charging a $5 fee")
else:
self.account_balance -= amount
return self
def display_account_info(self):
print(f"Account: {self.acct_num}, Balance: ${self.account_balance}")
return self
def yield_interest(self):
int_amt = 0
if self.account_balance > 0:
int_amt = self.account_balance * self.int_rate
self.account_balance += int_amt
return self
mark = BankAccount('Mark Enders', 1234, .05, 0)
lacey = BankAccount('Lacey', 4567, .05, 0)
mark.deposit(5000).deposit(2000).deposit(4000).withdraw(3000).yield_interest().display_account_info()
lacey.deposit(10000).deposit(12000).withdraw(22000).withdraw(2000).withdraw(1000).withdraw(5000).yield_interest().display_account_info() |
from funcao.funcao import formata_data_americana
from banco.bancodado import BancoDado
from cliente.cliente import Cliente
from funcao.funcao import escreve_tela, limpa_tela
from menu.opcao_menu_cliente import entra_opcao
banco_dado = BancoDado()
#entra_opcao(banco_dado=banco_dado, opcao="3", codigo_cliente="1")
#exit()
limpa_tela()
codigo_cliente = input("Digite seu codigo de cliente: ")
i_codigo_cliente = None
try:
i_codigo_cliente = int(codigo_cliente)
except ValueError:
escreve_tela("Erro ao entrar com o codigo do cliente.")
exit(0)
cliente = Cliente(banco_dado=banco_dado)
lista_cliente = cliente.dado_cliente(codigo=i_codigo_cliente)
if lista_cliente is None or (isinstance(lista_cliente, list) and not len(lista_cliente)):
escreve_tela("Codigo do cliente inexistente.")
exit(0)
dado_cliente = lista_cliente[0] if (isinstance(lista_cliente, list) and len(lista_cliente)) else {}
if dado_cliente is None or not isinstance(dado_cliente, dict):
escreve_tela("Codigo do cliente invalido.")
exit(0)
limpa_tela()
opcao_menu = str()
while True:
escreve_tela("Cliente: {codigo} - {nome}".format(codigo=dado_cliente.get("id"), nome=dado_cliente.get("nome")))
escreve_tela("")
escreve_tela("Selecione a opcao:")
escreve_tela("1 - Inserir pedido")
escreve_tela("2 - Alterar pedido")
escreve_tela("3 - Consultar pedido")
escreve_tela("S - Sair")
escreve_tela("")
opcao_menu = input("Entre com a opcao do menu: ")
if isinstance(opcao_menu, str) and opcao_menu == str("S"):
break
entra_opcao(banco_dado=banco_dado, opcao=opcao_menu, codigo_cliente=codigo_cliente)
limpa_tela()
limpa_tela()
|
#!/usr/bin/python
import re
from HTMLParser import HTMLParser
regex_for_finding_links = "^/fakebook/*[0-9]"
# HTML Data to analyse
HTMLdata = """<html><head><title>Fakebook</title><style TYPE="text/css"><!--\n#pagelist li { display: inline; padding-right: 10px; }\n--></style></head><body><h1>Fakebook</h1><p><a href="/fakebook/">Home</a></p><hr/>'
<h1>Welcome to Fakebook</h1><p>Get started by browsing some random people\'s profiles!</p>
<h2 class="secret_flag">sdkkfdskfkjsdhkjfhuehifwuehfiuhweuif</h2>
<ul><li><a href="/fakebook/246869555/">Boveli Xipott</a></li>
<li><a href="/fakebook/248039637/">Xac Proxik</a></li>
<li><a href="/fakebook/248670702/">Fatuciru Lolob</a></li>
<li><a href="/fakebook/248699766/">Colson Crijik</a></li>
<li><a href="/fakebook/249014933/">Nenababonu Justiss</a></li>
<li><a href="/fakebook/249659119/">Batisihuba Stovic</a></li>
<li><a href="/fakebook/250129083/">Jalujosi Jacobi</a></li>
<li><a href="/fakebook/250139692/">Darell Kammerzell</a></li>
<li><a href="/fakebook/250573573/">Lovonufe Troxip</a></li>
<h2 class="secret_flag">sdkkfdskfkjsdhkjfhuehifwuehfiuhweuif</h2>
<li><a href="/fakebook/250649500/">Waylon Mulvahill</a></li></ul>
<h6>Fakebook is run by <a href="http://www.ccs.neu.edu/home/choffnes/">
David Choffnes</a> at \n
<a href="http://www.northeastern.edu">NEU</a>.
For questions, contact <a href="mailto:choffnes@ccs.neu.edu">David Choffnes</a></h6></body></html>\n"""
# create a subclass and override the handler methods
class LinkParser(HTMLParser):
def handle_starttag(self, tag, attrs):
if tag == 'a':
attrs = dict(attrs)
# print only those URLs that begin with
# /fakebook/<somenumbers>
if re.match(regex_for_finding_links,attrs['href']):
print attrs['href']
# def handle_endtag(self, tag):
# print "Encountered an end tag :", tag
# def handle_data(self, data):
# if self.lasttag == 'h2':
# print "Encountered some data :", data
class SecretFlagParser(HTMLParser):
def __init__(self):
HTMLParser.__init__(self)
self.secret_data_found = 0
self.flags = []
def handle_starttag(self, tag, attrs):
if tag == 'h2':
attrs = dict(attrs)
if ('class' in attrs) and ('style' in attrs):
if attrs['class'] == "secret_flag" and attrs['style'] == "color:red":
self.secret_data_found = 1
else:
self.secret_data_found = 0
else:
self.secret_data_found = 0
else:
self.secret_data_found = 0
def handle_data(self, data):
# if self.lasttag == 'h2':
if self.secret_data_found == 1:
self.flags.append(data)
# print repr(data)
# instantiate the links_parser and fed it some HTML
links_parser = LinkParser()
links_parser.feed(HTMLdata)
# instantiate the secret_flag_parser and fed it some HTML
secret_flag_parser = SecretFlagParser()
secret_flag_parser.feed('<html><head><title>Fakebook</title><style TYPE="text/css"><!--\n#pagelist li { display: inline; padding-right: 10px; }\n--></style></head><body><h1>Fakebook</h1><p><a href="/fakebook/">Home</a></p><hr/>'
'<h1>Welcome to Fakebook</h1><p>Get started by browsing some random people\'s profiles!</p>'
'<h2 class="secret_flag" style="color:red">kooroshmodibhakt</h2>'
'<h2 style="color:red">kooroshmodibhakt</h2>'
'<h2 class="kachda">kooroshnamobhakt</h2>'
'<ul><li><a href="/fakebook/246869555/">Boveli Xipott</a></li>'
'<li><a href="/fakebook/248039637/">Xac Proxik</a></li>'
'<li><a href="/fakebook/248670702/">Fatuciru Lolob</a></li>'
'<li><a href="/fakebook/248699766/">Colson Crijik</a></li>'
'<li><a href="/fakebook/249014933/">Nenababonu Justiss</a></li>'
'<li><a href="/fakebook/249659119/">Batisihuba Stovic</a></li>'
'<li><a href="/fakebook/250129083/">Jalujosi Jacobi</a></li>'
'<li><a href="/fakebook/250139692/">Darell Kammerzell</a></li>'
'<li><a href="/fakebook/250573573/">Lovonufe Troxip</a></li>'
'<h2 class="secret_flag">Chindichitranna</h2>'
'<li><a href="/fakebook/250649500/">Waylon Mulvahill</a></li></ul>'
'<h6>Fakebook is run by <a href="http://www.ccs.neu.edu/home/choffnes/">'
'David Choffnes</a> at \n'
'<a href="http://www.northeastern.edu">NEU</a>.'
'For questions, contact <a href="mailto:choffnes@ccs.neu.edu">David Choffnes</a></h6></body></html>')
print secret_flag_parser.flags |
#
# @lc app=leetcode id=9 lang=python3
#
# [9] Palindrome Number
#
# @lc code=start
class Solution:
def isPalindrome(self, x: int) -> bool:
if x >= 0:
temp = 0
xCopy = x
while xCopy > 0:
temp = (temp * 10) + (xCopy % 10)
xCopy = xCopy // 10
if x == temp:
return True
return False
# @lc code=end
|
""" The test module for the babysitter class """
from babysitter import Sitter
def test_event_occurred():
""" First test simulating a babysitting event occurred. """
expected = "Total amount owed: $0.00"
e1 = Sitter()
assert expected == e1.babysit()
def test_event_occurred_with_hours():
""" Putting the time babysitting by giving the start and end time. """
expected = "Total amount owed: $15.00"
e1 = Sitter("1700", "1800")
# Updated after custom rates were implemented
assert expected == e1.babysit()
def test_event_started_before_time_allowed():
""" Attempting to start and event before 5 PM should return an error. """
expected = "ERROR: Start time out of range"
e1 = Sitter("1600", "1700")
assert expected == e1.babysit()
def test_event_started_after_midnight():
""" Babysitting events can start after midnight as long as they end before 4 AM. """
expected = "Total amount owed: $20.00" # Updated after variable rates were entered.
e1 = Sitter("0000", "0100")
assert expected == e1.babysit()
def test_end_time_earlier_than_start_time():
""" Ensuring that you cannot complete an event before it is started. """
expected = "ERROR: Cannot end before start"
e1 = Sitter("1900", "1700")
assert expected == e1.babysit()
def test_input_time_must_be_valid_hour():
""" Ensuring that time inputs where hour > 23 is not accepted. """
expected = "ERROR: Please enter a valid time"
e1 = Sitter("2400", "2600")
assert expected == e1.babysit()
def test_start_time_before_midnight_end_time_after_midnight():
""" Making sure babysitter still gets paid if they are not done until after midnight."""
expected = "Total amount owed: $95.00" # Updated after variable rates were entered.
e1 = Sitter("2200", "0300")
assert expected == e1.babysit()
def test_end_time_is_within_valid_range():
""" Ensuring that the end time is within a valid range. The same as the start time. """
expected = "ERROR: End time out of range"
e1 = Sitter("0300", "0500")
assert expected == e1.babysit()
def test_time_rounding_up_when_past_half_the_hour():
""" Fufilling the requirement 'gets paid for full hours (no fractional hours)' """
expected = "Total amount owed: $95.00" # Updated after variable rates were entered.
e1 = Sitter("2200", "0235")
assert expected == e1.babysit()
def test_time_rounding_up_to_four_still_works():
""" Ensuring a time can be rounded up to four without failing. """
expected = "Total amount owed: $40.00" # Updated after variable rates were entered.
e1 = Sitter("0223", "0345")
assert expected == e1.babysit() |
import unittest
from arepl import *
import typing as t
T = t.TypeVar('T')
def await_pure(awaitable: t.Awaitable[T]) -> T:
iterable = awaitable.__await__()
try:
next(iterable)
except StopIteration as e:
return e.value
else:
raise Exception("this awaitable actually is impure! it yields!")
class TestPure(unittest.TestCase):
def test_add(self) -> None:
async def test() -> None:
repl = PureREPL({})
async def eval(line: str) -> t.Any:
result = await repl.add_line(line + '\n')
if isinstance(result, ExpressionResult):
return result.value
else:
raise Exception("unexpected", result)
self.assertEqual(await eval('1'), 1)
self.assertEqual(await eval('1+1'), 2)
await repl.add_line('foo = 1\n')
self.assertEqual(await eval('foo*4'), 4)
await_pure(test())
|
from requests_html import HTMLSession
import re
class HtmlScraper:
def __init__(self):
self.session = HTMLSession()
self.success_code = 200
def request_html(self, url):
page = self.session.get(url)
if page.status_code != self.success_code:
raise Exception("HTML request failed: status code {}".format(page.status_code))
return page.html.html
def extract_links(self, url):
page = self.session.get(url)
if page.status_code != self.success_code:
raise Exception("HTML request failed: status code {}".format(page.status_code))
return list(page.html.links)
def filter_links(self, links, pattern=None):
if pattern is None:
return links
return [link for link in links if re.match(re.compile(pattern), link) is not None]
def save(self, content, path):
fh = open(path, "w")
fh.write(content)
fh.close()
|
import numpy as np
import matplotlib.pyplot as plt
numpyDizisi = np.linspace(0,10,20)
numpyDizisi2=numpyDizisi**2
(benimFigur,benimEksen)=plt.subplots()
benimEksen.plot(numpyDizisi,numpyDizisi2,color="#3A95A8",alpha=0.9,linewidth=1.0,linestyle="--",marker="o",markersize=4,markerfacecolor="r")
benimEksen.plot(numpyDizisi2,numpyDizisi,"r")
plt.show() |
import pickle
from pprint import pprint
from typing import Dict
from src.utils.helper import Helper
def get_commands_pickle() -> Dict:
with open(f'{Helper.get_project_root()}/commands_dict.pickle', 'rb') as file:
data = pickle.load(file)
return data
def main():
commands_pickle = get_commands_pickle()
pprint(commands_pickle)
if __name__ == '__main__':
main()
|
# ###############################################################################################
#################################### ANALIZADOR SINTACTICO #################################
######################################################################################################
import scanner as lex
import TAS as tas
import Arbol as ar
import Pila as p
import TabladeSimbolos as ts
Terminal = lex.Terminal
Variables = lex.Variables
class AnalizadorSintactico():
def __init__(self, archivoAEjecutar):
self.tablaSimbolos = ts.crearTS() #crea la tabla de simbolos
self.arbol = ar.Nodo(lex.S) #crea el arbol con raiz S
self.pila = p.Pila() #crea pila para apilar lo que contiene un vector de la tas
self.pila2= p.Pila() #crea pila para apilar el nodo del arbol en el que se encuentra un elemento apilado de la primer pila
self.pila.push(lex.peso) # apilamos el simbolo final
self.pila.push(lex.S) # apilamos primer Variable
self.pila2.push(self.arbol) #apilamos raiz
self.nodoActual = self.arbol # el nodo inicial es la raiz
self.archivo = open(archivoAEjecutar)
self.Lexico = lex.Lexico(self.archivo) # creacion del analizador Lexico
def analizarSintactico(self):
a = self.Lexico.siguienteComponenteLexico(self.tablaSimbolos) #a es un vector con el componente lexico y el lexema en este orden
resultado = 0 # resultado =0 indica que el analizador Sintactico debe seguir analizando
while resultado == 0:
X = self.pila.popp() #desapila
if X != lex.peso :
self.nodoActual = self.pila2.popp() #desapila segunda pila y lo asigna a nodo actual
if X == a[0] == lex.peso:
resultado = 1 #proceso terminado con exito
elif X in Terminal: #si X es terminal
if X == a[0]: #y es el componente lexico leido
self.nodoActual.agregarHijo(ar.Nodo(a[1])) #se agrega como hijo al nodo actual
a = self.Lexico.siguienteComponenteLexico(self.tablaSimbolos) #y se obtiene el siguiente componente
else:
resultado = -1 #error
elif X in Variables: #si X es variable
v = tas.tas(X[0], a[0]) #se obtiene el vector con la variable y el componente correspondiente
if tas.tVecTASVacio(v): #si esta v vacio
print("Error sintactico")
resultado = -1 #error
else:
i = 0
auxPila = p.Pila() #se crea dos auxiliares pila
auxPila2 = p.Pila()
while i < len(v) and v[i] != []: #mientras no se pase de rango y no quede vacio
self.nodoActual.agregarHijo(ar.Nodo(v[i])) # se agrega el hijo ial arbol
nodo = self.nodoActual.getHijos()[-1] #se obtiene su nodo
auxPila.push(v[i]) #se apila en la primera pila auxiliar el elemento
auxPila2.push(nodo) # se apila en la segunda pila auxiliar el nodo
i += 1 #se suma al contador
while i >= 0: #mientras sea mayor igual a cero, como un for de i..0
aux = auxPila.popp() #se desapilan en auxiliares las pilas auxiliares
aux2 = auxPila2.popp()
if aux != None and aux != lex.epsilon : #no se apila epsilon
self.pila.push(aux) # se apilar los auxiliares de forma que quedan apiladas de manera invertida
if aux2 != None and aux != lex.epsilon:
self.pila2.push(aux2)
i -= 1 #se descuenta contador
return self.arbol #se devuelve el arbol |
import os
import sys
import subprocess
from multiprocessing import Process
import time
import os
def launchTor(n):
print('TOR %d' %i)
command = 'tor -f /etc/tor/torrc.' + str(i)
subprocess.check_call(command.split())
if __name__ == '__main__':
process_count = int(input("How Many Process?"))
dirname = os.path.dirname(__file__)
filename = os.path.join(dirname, 'shared_preferences')
shared = open(filename, 'w')
shared.write(str(process_count))
shared.close()
if os.geteuid() == 0:
print("We're root!")
else:
print("We're not root.")
subprocess.call(['sudo', 'python3', *sys.argv])
sys.exit()
for i in range(process_count):
f = open("/etc/tor/torrc.%s" % str(i), "w")
f.write("SOCKSPort 90%s\n" % str(52 + i * 2))
f.write("ControlPort 90%s\n" % str(53 + i * 2))
f.write("DataDirectory /var/lib/tor%s\n" % str(i))
f.write("HashedControlPassword 16:709DFABE7ED51F94606AE3C626FAEC7FD9E4ABCD2B999239FB5C29D200\n")
f.write("CookieAuthentication 1\n")
f.close()
print('Config files created.')
time.sleep(1)
print('Executing tors')
torProcess = []
for i in range(process_count):
torProcess.append(Process(target=launchTor, args=(i, )))
torProcess[i].start()
try:
while True:
pass
except KeyboardInterrupt:
for i in range(0, process_count):
torProcess[i].join() |
from flask import Flask, request, render_template
from requests import post, exceptions
from configparser import ConfigParser
import os
app = Flask(__name__)
# If config file location is setup in environment variables
# then read conf from there, otherwise from project root
if 'WALDUR_CONFIG' in os.environ:
config_path = os.environ['WALDUR_CONFIG']
config = ConfigParser()
config.read(config_path)
backend_url = config['backend']['url'] + ':' + config['backend']['port']
else:
backend_url = 'http://localhost:4567'
@app.route("/auth/<user_id>", methods=['GET', 'POST'])
def auth(user_id):
if request.method == 'GET':
return render_template('index.html', user_id=user_id)
if request.method == 'POST':
# Send the token to Waldur Chatbot
try:
response = post(f"{backend_url}/auth/{user_id}", data={
'token': request.form['token']
})
if response.ok:
message = "Authenticated!"
else:
message = "Couldn't authenticate."
except exceptions.ConnectionError:
message = "Couldn't connect to Waldur Chatbot."
return render_template('sent.html', message=message)
if __name__ == '__main__':
app.run(port=1234)
|
#code
#from heapq import merge
class VirArr(object):
def __init__(self, arr1,arr2):
self.arr1 = arr1
self.arr2 = arr2
def __getitem__(self, t):
if t < len(self.arr1):
return self.arr1[t]
else:
return self.arr2[t - len(arr1)]
def __setitem__(self, t, val):
if t < len(arr1):
self.arr1[t] = val
else:
self.arr2[t - len(arr1)] = val
def print(self):
print(" ".join([str(i) for i in self.arr1 + self.arr2]))
t = int(input())
for _ in range(t):
m, n = [int(j) for j in str(input()).split(" ")]
arr1 = [int(j) for j in filter(lambda x: x.isdigit(), input().split(" "))]
arr2 = [int(j) for j in filter(lambda x: x.isdigit(), input().split(" "))]
#print(*list(merge(arr1,arr2)))
k = (m+n) // 2 + ((m+n) %2)
virarr = VirArr(arr1,arr2)
while k >= 1:
for i in range(m+n-k):
if virarr[i] > virarr[i+k]:
virarr[i], virarr[i+k] = virarr[i+k], virarr[i]
if k == 1:
k = 0
else:
k = k // 2 + k%2
virarr.print()
|
import asyncio
import threading
from pytg.exceptions import NoResponse, IllegalResponseException, ConnectionError
class PGThread(threading.Thread):
def run(self):
try:
if self._target:
self._target(*self._args, **self._kwargs)
except (asyncio.TimeoutError, GeneratorExit, KeyboardInterrupt,
TypeError, RuntimeError, NoResponse, IllegalResponseException,
ConnectionError):
pass
finally:
del self._target, self._args, self._kwargs
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
this file contains pre-training and testing the mention proposal model
"""
import os
import math
import logging
import numpy as np
import tensorflow as tf
from utils import util
from utils.radam import RAdam
from data_utils.input_builder import file_based_input_fn_builder
tf.app.flags.DEFINE_string('f', '', 'kernel')
flags = tf.app.flags
flags.DEFINE_string("output_dir", "data", "The output directory of the model training.")
flags.DEFINE_string("eval_dir", "/home/lixiaoya/mention_proposal_output_dir", "The output directory of the saved mention proposal models.")
flags.DEFINE_bool("do_train", True, "Whether to train a model.")
flags.DEFINE_bool("do_eval", False, "Whether to test a model.")
flags.DEFINE_bool("do_predict", False, "Whether to test a model.")
flags.DEFINE_bool("use_tpu", False, "Whether to use TPU or GPU/CPU.")
flags.DEFINE_bool("concat_only", False, "Whether to use start/end embedding for calculating mention scores.")
flags.DEFINE_integer("iterations_per_loop", 1000, "How many steps to make in each estimator call.")
flags.DEFINE_integer("keep_checkpoint_max", 30, "How many checkpoint models keep at most.")
flags.DEFINE_string("config_filename", "experiments.conf", "the input config file name.")
flags.DEFINE_string("config_params", "train_spanbert_base", "specify the hyper-parameters in the config file.")
flags.DEFINE_string("logfile_path", "/home/lixiaoya/spanbert_large_mention_proposal.log", "the path to the exported log file.")
flags.DEFINE_string("tpu_name", None, "The Cloud TPU to use for training. This should be either the name "
"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 url.")
flags.DEFINE_string("tpu_zone", None, "[Optional] GCE zone where the Cloud TPU is located in. If not "
"specified, we will attempt to automatically detect the GCE project from metadata.")
flags.DEFINE_string("gcp_project", None, "[Optional] Project name for the Cloud TPU-enabled project. If not "
"specified, we will attempt to automatically detect the GCE project from metadata.")
flags.DEFINE_string("master", None, "[Optional] TensorFlow master URL.")
flags.DEFINE_integer("num_tpu_cores", 1, "Only used if `use_tpu` is True. Total number of TPU cores to use.")
FLAGS = tf.flags.FLAGS
format = '%(asctime)s - %(levelname)s - %(name)s - %(message)s'
logging.basicConfig(format=format, filename=FLAGS.logfile_path, level=logging.INFO)
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def model_fn_builder(config):
def model_fn(features, labels, mode, params): # pylint: disable=unused-argument
"""The `model_fn` for TPUEstimator."""
config = util.initialize_from_env(use_tpu=FLAGS.use_tpu, config_params=FLAGS.config_params, config_file=FLAGS.config_filename)
input_ids = features["flattened_input_ids"]
input_mask = features["flattened_input_mask"]
text_len = features["text_len"]
speaker_ids = features["speaker_ids"]
genre = features["genre"]
gold_starts = features["span_starts"]
gold_ends = features["span_ends"]
cluster_ids = features["cluster_ids"]
sentence_map = features["sentence_map"]
span_mention = features["span_mention"]
is_training = (mode == tf.estimator.ModeKeys.TRAIN)
model = util.get_model(config, model_sign="mention_proposal")
if FLAGS.use_tpu:
def tpu_scaffold():
return tf.train.Scaffold()
scaffold_fn = tpu_scaffold
else:
scaffold_fn = None
if mode == tf.estimator.ModeKeys.TRAIN:
tf.logging.info("****************************** tf.estimator.ModeKeys.TRAIN ******************************")
tf.logging.info("********* Features *********")
for name in sorted(features.keys()):
tf.logging.info(" name = %s, shape = %s" % (name, features[name].shape))
total_loss, start_scores, end_scores, span_scores = model.get_mention_proposal_and_loss(input_ids, input_mask, \
text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids, sentence_map, span_mention=span_mention)
if config["tpu"]:
optimizer = tf.train.AdamOptimizer(learning_rate=config['learning_rate'], beta1=0.9, beta2=0.999, epsilon=1e-08)
optimizer = tf.contrib.tpu.CrossShardOptimizer(optimizer)
train_op = optimizer.minimize(total_loss, tf.train.get_global_step())
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn)
else:
optimizer = RAdam(learning_rate=config['learning_rate'], epsilon=1e-8, beta1=0.9, beta2=0.999)
train_op = optimizer.minimize(total_loss, tf.train.get_global_step())
train_logging_hook = tf.train.LoggingTensorHook({"loss": total_loss}, every_n_iter=1)
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=mode,
loss=total_loss,
train_op=train_op,
scaffold_fn=scaffold_fn,
training_hooks=[train_logging_hook])
elif mode == tf.estimator.ModeKeys.EVAL:
tf.logging.info("****************************** tf.estimator.ModeKeys.EVAL ******************************")
total_loss, start_scores, end_scores, span_scores = model.get_mention_proposal_and_loss(input_ids, input_mask, \
text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids, sentence_map, span_mention)
def metric_fn(start_scores, end_scores, span_scores, gold_span_label):
if config["mention_proposal_only_concate"]:
pred_span_label = tf.cast(tf.reshape(tf.math.greater_equal(span_scores, config["threshold"]), [-1]), tf.bool)
else:
start_scores = tf.reshape(start_scores, [-1, config["max_segment_len"]])
end_scores = tf.reshape(end_scores, [-1, config["max_segment_len"]])
start_scores = tf.tile(tf.expand_dims(start_scores, 2), [1, 1, config["max_segment_len"]])
end_scores = tf.tile(tf.expand_dims(end_scores, 2), [1, 1, config["max_segment_len"]])
sce_span_scores = (start_scores + end_scores + span_scores)/ 3
pred_span_label = tf.cast(tf.reshape(tf.math.greater_equal(sce_span_scores, config["threshold"]), [-1]), tf.bool)
gold_span_label = tf.cast(tf.reshape(gold_span_label, [-1]), tf.bool)
return {"precision": tf.compat.v1.metrics.precision(gold_span_label, pred_span_label),
"recall": tf.compat.v1.metrics.recall(gold_span_label, pred_span_label)}
eval_metrics = (metric_fn, [start_scores, end_scores, span_scores, span_mention])
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=total_loss,
eval_metrics=eval_metrics,
scaffold_fn=scaffold_fn)
elif mode == tf.estimator.ModeKeys.PREDICT:
tf.logging.info("****************************** tf.estimator.ModeKeys.PREDICT ******************************")
total_loss, start_scores, end_scores, span_scores = model.get_mention_proposal_and_loss(input_ids, input_mask, \
text_len, speaker_ids, genre, is_training, gold_starts,
gold_ends, cluster_ids, sentence_map, span_mention)
predictions = {
"total_loss": total_loss,
"start_scores": start_scores,
"start_gold": gold_starts,
"end_gold": gold_ends,
"end_scores": end_scores,
"span_scores": span_scores,
"span_gold": span_mention
}
output_spec = tf.contrib.tpu.TPUEstimatorSpec(
mode=tf.estimator.ModeKeys.PREDICT,
predictions=predictions,
scaffold_fn=scaffold_fn)
else:
raise ValueError("Please check the the mode ! ")
return output_spec
return model_fn
def mention_proposal_prediction(config, current_doc_result, concat_only=True):
"""
current_doc_result:
"total_loss": total_loss,
"start_scores": start_scores,
"start_gold": gold_starts,
"end_gold": gold_ends,
"end_scores": end_scores,
"span_scores": span_scores,
"span_gold": span_mention
"""
span_scores = current_doc_result["span_scores"]
span_gold = current_doc_result["span_gold"]
if concat_only:
scores = span_scores
else:
start_scores = current_doc_result["start_scores"],
end_scores = current_doc_result["end_scores"]
# start_scores = tf.tile(tf.expand_dims(start_scores, 2), [1, 1, config["max_segment_len"]])
start_scores = np.tile(np.expand_dims(start_scores, axis=2), (1, 1, config["max_segment_len"]))
end_scores = np.tile(np.expand_dims(end_scores, axis=2), (1, 1, config["max_segment_len"]))
start_scores = np.reshape(start_scores, [-1, config["max_segment_len"], config["max_segment_len"]])
end_scores = np.reshape(end_scores, [-1, config["max_segment_len"], config["max_segment_len"]])
# end_scores -> max_training_sent, max_segment_len
scores = (start_scores + end_scores + span_scores)/3
pred_span_label = scores >= 0.5
pred_span_label = np.reshape(pred_span_label, [-1])
gold_span_label = np.reshape(span_gold, [-1])
return pred_span_label, gold_span_label
def main(_):
config = util.initialize_from_env(use_tpu=FLAGS.use_tpu, config_params=FLAGS.config_params, config_file=FLAGS.config_filename, print_info=True)
tf.logging.set_verbosity(tf.logging.INFO)
num_train_steps = config["num_docs"] * config["num_epochs"]
keep_chceckpoint_max = max(math.ceil(num_train_steps / config["save_checkpoints_steps"]), FLAGS.keep_checkpoint_max)
if not FLAGS.do_train and not FLAGS.do_eval and not FLAGS.do_predict:
raise ValueError("At least one of `do_train`, `do_eval` or `do_predict' must be True.")
tf.gfile.MakeDirs(FLAGS.output_dir)
tpu_cluster_resolver = None
if FLAGS.use_tpu and FLAGS.tpu_name:
tpu_cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
FLAGS.tpu_name, zone=FLAGS.tpu_zone, project=FLAGS.gcp_project)
tf.config.experimental_connect_to_cluster(tpu_cluster_resolver)
tf.tpu.experimental.initialize_tpu_system(tpu_cluster_resolver)
is_per_host = tf.contrib.tpu.InputPipelineConfig.PER_HOST_V2
run_config = tf.contrib.tpu.RunConfig(
cluster=tpu_cluster_resolver,
master=FLAGS.master,
evaluation_master=FLAGS.master,
model_dir=FLAGS.output_dir,
keep_checkpoint_max = keep_chceckpoint_max,
save_checkpoints_steps=config["save_checkpoints_steps"],
session_config=tf.ConfigProto(allow_soft_placement=True, log_device_placement=True),
tpu_config=tf.contrib.tpu.TPUConfig(
iterations_per_loop=FLAGS.iterations_per_loop,
num_shards=FLAGS.num_tpu_cores,
per_host_input_for_training=is_per_host))
model_fn = model_fn_builder(config)
estimator = tf.contrib.tpu.TPUEstimator(
use_tpu=FLAGS.use_tpu,
eval_on_tpu=FLAGS.use_tpu,
warm_start_from=tf.estimator.WarmStartSettings(config["init_checkpoint"],
vars_to_warm_start="bert*"),
model_fn=model_fn,
config=run_config,
train_batch_size=1,
eval_batch_size=1,
predict_batch_size=1)
seq_length = config["max_segment_len"] * config["max_training_sentences"]
if FLAGS.do_train:
estimator.train(input_fn=file_based_input_fn_builder(config["train_path"], seq_length, config,
is_training=True, drop_remainder=True), max_steps=num_train_steps)
if FLAGS.do_eval:
best_dev_f1, best_dev_prec, best_dev_rec, test_f1_when_dev_best, test_prec_when_dev_best, test_rec_when_dev_best = 0, 0, 0, 0, 0, 0
best_ckpt_path = ""
checkpoints_iterator = [os.path.join(FLAGS.eval_dir, "model.ckpt-{}".format(str(int(ckpt_idx)))) for ckpt_idx in range(0, num_train_steps, config["save_checkpoints_steps"])]
for checkpoint_path in checkpoints_iterator[1:]:
eval_dev_result = estimator.evaluate(input_fn=file_based_input_fn_builder(config["dev_path"], seq_length, config,is_training=False, drop_remainder=False),
steps=698, checkpoint_path=checkpoint_path)
dev_f1 = 2*eval_dev_result["precision"] * eval_dev_result["recall"] / (eval_dev_result["precision"] + eval_dev_result["recall"]+1e-10)
tf.logging.info("***** Current ckpt path is ***** : {}".format(checkpoint_path))
tf.logging.info("***** EVAL ON DEV SET *****")
tf.logging.info("***** [DEV EVAL] ***** : precision: {:.4f}, recall: {:.4f}, f1: {:.4f}".format(eval_dev_result["precision"], eval_dev_result["recall"], dev_f1))
if dev_f1 > best_dev_f1:
best_dev_f1, best_dev_prec, best_dev_rec = dev_f1, eval_dev_result["precision"], eval_dev_result["recall"]
best_ckpt_path = checkpoint_path
eval_test_result = estimator.evaluate(input_fn=file_based_input_fn_builder(config["test_path"], seq_length, config,is_training=False, drop_remainder=False),steps=698, checkpoint_path=checkpoint_path)
test_f1 = 2*eval_test_result["precision"] * eval_test_result["recall"] / (eval_test_result["precision"] + eval_test_result["recall"]+1e-10)
test_f1_when_dev_best, test_prec_when_dev_best, test_rec_when_dev_best = test_f1, eval_test_result["precision"], eval_test_result["recall"]
tf.logging.info("***** EVAL ON TEST SET *****")
tf.logging.info("***** [TEST EVAL] ***** : precision: {:.4f}, recall: {:.4f}, f1: {:.4f}".format(eval_test_result["precision"], eval_test_result["recall"], test_f1))
tf.logging.info("*"*20)
tf.logging.info("- @@@@@ the path to the BEST DEV result is : {}".format(best_ckpt_path))
tf.logging.info("- @@@@@ BEST DEV F1 : {:.4f}, Precision : {:.4f}, Recall : {:.4f},".format(best_dev_f1, best_dev_prec, best_dev_rec))
tf.logging.info("- @@@@@ TEST when DEV best F1 : {:.4f}, Precision : {:.4f}, Recall : {:.4f},".format(test_f1_when_dev_best, test_prec_when_dev_best, test_rec_when_dev_best))
tf.logging.info("- @@@@@ mention_proposal_only_concate {}".format(config["mention_proposal_only_concate"]))
if FLAGS.do_predict:
tp, fp, fn = 0, 0, 0
epsilon = 1e-10
for doc_output in estimator.predict(file_based_input_fn_builder(config["test_path"], seq_length, config,
is_training=False, drop_remainder=False), checkpoint_path=config["eval_checkpoint"],
yield_single_examples=False):
# iterate over each doc for evaluation
pred_span_label, gold_span_label = mention_proposal_prediction(config, doc_output,concat_only=FLAGS.concat_only)
tem_tp = np.logical_and(pred_span_label, gold_span_label).sum()
tem_fp = np.logical_and(pred_span_label, np.logical_not(gold_span_label)).sum()
tem_fn = np.logical_and(np.logical_not(pred_span_label), gold_span_label).sum()
tp += tem_tp
fp += tem_fp
fn += tem_fn
p = tp / (tp+fp+epsilon)
r = tp / (tp+fn+epsilon)
f = 2*p*r/(p+r+epsilon)
tf.logging.info("Average precision: {:.4f}, Average recall: {:.4f}, Average F1 {:.4f}".format(p, r, f))
if __name__ == '__main__':
tf.app.run()
|
#!/usr/bin/python
# -*- coding: utf-8 -*-
DOCUMENTATION = '''
'''
EXAMPLES = '''
'''
RETURN = '''
'''
|
import numpy as np
import dfa
import matplotlib.pyplot as plt
import scipy.signal as ss
np.random.seed(42)
print("1. Test")
np.random.seed(42)
X = np.random.randn(1000)
X = np.abs(ss.hilbert(X))
#X = np.ones(1000)
#np.array([[1.,1.], [2., 1.], [3., 1.2], [4., 1.], [5., 0.8], [6., 1.]])
scales, fluct_to_array, coeff_to_array= dfa.play(X,5,9,0.25)
print("Fluctuations")
print(fluct_to_array)
print("Coefficients, swapped")
print(coeff_to_array)
#we need to swap the order here, due to the way coeff are computed!
coeff_tmp = coeff_to_array[1]
coeff_to_array[1] = coeff_to_array[0]
coeff_to_array[0] = coeff_tmp
fluctfit = 2**np.polyval(coeff_to_array,np.log2(scales))
plt.loglog(scales, fluct_to_array, 'bo')
plt.loglog(scales, fluctfit, 'r', label=r'$\alpha$ = %0.2f'%coeff_to_array[0])
plt.title('DFA')
plt.xlabel(r'$\log_{10}$(time window)')
plt.ylabel(r'$\log_{10}$<F(t)>')#
plt.legend()
plt.show()
|
import Command as Cmd
from terminaltables import AsciiTable
import JFIO
import JEnum
import JTarget
class Command(Cmd.Command) :
def __init__(self) :
super().__init__()
# Instruction of this command
self.command = "targets"
# Title of this command
self.title = "Show attack targets"
# Description of this command
self.description = "Show attack targets in assets directory."
# Usage of this command
self.usage = "targets # list of targets.\n\ttargets create <domain> # Create Target on target list \n\ttargets remove <domain> # Remove Target"
def call(self, func) :
return self.functions[func]()
def run(self, command=[]) :
if len(command) == 0 :
files = JFIO.getFileList(JEnum.targets, lambda x: x.endswith(".bin"))
tableKeys = []
table = []
targets = []
for file in files :
mJTarget = JTarget.JTarget(file, initType=1)
tableKeys.extend(mJTarget.target.keys())
targets.append(mJTarget.target)
tableKeys = list(set(tableKeys))
for target in targets :
row = []
for tableKey in tableKeys :
row.append(target.get(tableKey, "-"))
table.append(row)
tableKeys = [tableKeys,]
asciiTable = AsciiTable(tableKeys + table)
asciiTable.title = "Target List"
print(asciiTable.table)
return
elif len(command) == 2 :
if command[0] == "create" :
mJTarget = JTarget.JTarget(command[1])
mJTarget.element("domain", command[1])
return
elif command[0] == "remove" :
filename = JEnum.targets + command[1] + ".bin"
mJTarget = JTarget.JTarget(filename, initType=1)
mJTarget.remove()
return
self.printUsage()
return
|
from dfa import DFA
# Nome do arquivo contendo o AFD a ser lido
filename = 'exemplo2.txt'
# Nome do arquivo contendo a lista de palavras
listname = "lista_exemplo.txt"
# Cria o DFA baseado no arquivo
dfa = DFA(filename)
print(dfa)
# Minimiza o dfa
dfa.minimize()
print(dfa)
# Pede uma palavra para verificar se ela faz parte do AFD minimizado
word = input("Insira uma palavra para verificar se ela faz parte de ACEITA(M min): ")
# Verifica se a plavra é aceita ou rejeitada
dfa.verify_word(word)
# Verifica uma lista de palavras
dfa.verify_list(listname) |
import socket
UDP_IP = "192.168.0.24"
UDP_PORT = 5005
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.bind((UDP_IP, UDP_PORT))
while True:
data, addr = sock.recvfrom(1024)
print "received message:", data
|
from application import create_app
app = create_app("app_name")
if __name__ == '__main__':
app.run(app.config.get("SERVER_ADDRESS", "0.0.0.0"), port=app.config.get("SERVER_PORT", 5000))
|
# -*- coding: utf-8 -*-
'''
@author: Dioooooooor (曹琛)
@time: 2019.1.21
@version: 1.0
@desc: 资源视图
'''
from flask import jsonify, request, current_app, url_for, g
from flask.views import MethodView
from CakeOrderSys.apis.v1 import api_v1
from CakeOrderSys.models import Admin, Commodity
from CakeOrderSys.apis.v1.schema import commodity_schema, commodities_schema
class IndexAPI(MethodView):
def get(self):
print('get')
return jsonify({
"api_version": "1.0",
"api_base_url": "http://api.caochen.com/v1",
"current_user_url": "",
"commodities_url": "http://api.caochen.com/v1/commodities/{?page, per_page}",
"commodity_url": "http://api.caochen.com/v1/commodities/{commodity_id}",
})
class CommoditiesAPI(MethodView):
def get(self):
"获取所有商品"
page = request.args.get('page', 1, type=int)
per_page = 10
pagination = Commodity.query.paginate(page, per_page)
commodities = pagination.items
return jsonify(commodities_schema(commodities, pagination))
api_v1.add_url_rule('/', view_func=IndexAPI.as_view('index'), methods=['GET'])
api_v1.add_url_rule('/commodities', view_func=CommoditiesAPI.as_view('commodities'), methods=['GET','POST']) |
#!/usr/bin/env python3
import sys
import os
sys.path.append(os.getenv("XRAY_DIR") + "/tools")
import simpleroute
print()
print('ready')
def load_design(f):
'''
name node pin wire
clk CLK_HROW_TOP_R_X60Y130/CLK_HROW_CK_BUFHCLK_L0 W5 HCLK_VBRK_X34Y130/HCLK_VBRK_CK_BUFHCLK0
din[0] INT_R_X9Y100/NE2BEG3 V17 VBRK_X29Y106/VBRK_NE2A3
'''
ret = {}
f.readline()
for l in f:
l = l.strip()
name, node, pin, wire = l.split(' ')
ret[name] = wire
return ret
def route2fasm(route, out_f):
pips = simpleroute.route(route)
for pip in pips:
# INT_L_X10Y122.NL1BEG2.NE2END3
# to
# INT_L_X10Y122.NL1BEG2 NE2END3
doti = pip.rfind('.')
pip = pip[0:doti] + ' ' + pip[doti + 1:]
out_f.write(pip + '\n')
def run(design_f, swn, ledn, out_f):
name2wire = load_design(design_f)
led_name = 'dout[%d]' % ledn
sw_name = 'din[%d]' % swn
led_wire = name2wire[led_name]
sw_wire = name2wire[sw_name]
print(
'Routing %s (%s) => %s (%s)' % (sw_wire, sw_name, led_wire, led_name))
route2fasm((sw_wire, led_wire), out_f)
# XXX: terminate LEDs so they are off?
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(
description='Switch to LED interconnect demo: FASM generator')
parser.add_argument('design_txt', help='ROI metadata file')
parser.add_argument('sw', type=int, help='Switch to use')
parser.add_argument('led', type=int, help='LED to use')
# For now can't use stdout since simpleroute is spewing out prints
parser.add_argument('out_fasm', help='Output .fasm file')
args = parser.parse_args()
run(
open(args.design_txt, 'r'), args.sw, args.led, open(
args.out_fasm, 'w'))
|
# Copyright (c) 2010-2018, Emmanuel Blot <emmanuel.blot@free.fr>
# Copyright (c) 2016, Emmanuel Bouaziz <ebouaziz@free.fr>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Neotion nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL NEOTION BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from array import array
from pyftdi.ftdi import Ftdi
from struct import calcsize as scalc, pack as spack, unpack as sunpack
from threading import Lock
__all__ = ['SpiPort', 'SpiGpioPort', 'SpiController']
class SpiIOError(IOError):
"""SPI I/O error"""
class SpiPort:
"""SPI port
An SPI port is never instanciated directly: use SpiController.get_port()
method to obtain an SPI port.
Example:
>>> ctrl = SpiController(silent_clock=False)
>>> ctrl.configure('ftdi://ftdi:232h/1')
>>> spi = ctrl.get_port(1)
>>> spi.set_frequency(1000000)
>>> # send 2 bytes
>>> spi.exchange([0x12, 0x34])
>>> # send 2 bytes, then receive 2 bytes
>>> out = spi.exchange([0x12, 0x34], 2)
>>> # send 2 bytes, then receive 4 bytes, manage the transaction
>>> out = spi.exchange([0x12, 0x34], 2, True, False)
>>> out.extend(spi.exchange([], 2, False, True))
"""
def __init__(self, controller, cs, cs_hold=3, spi_mode=0):
self._controller = controller
self._cpol = spi_mode & 0x1
self._cpha = spi_mode & 0x2
cs_clock = 0xFF & ~((int(not self._cpol) and SpiController.SCK_BIT) |
SpiController.DO_BIT)
cs_select = 0xFF & ~((SpiController.CS_BIT << cs) |
(int(not self._cpol) and SpiController.SCK_BIT) |
SpiController.DO_BIT)
self._cs_prolog = bytes([cs_clock, cs_select])
self._cs_epilog = bytes([cs_select] + [cs_clock] * int(cs_hold))
self._frequency = self._controller.frequency
def exchange(self, out=b'', readlen=0, start=True, stop=True,
duplex=False):
"""Perform an exchange or a transaction with the SPI slave
.. note:: Exchange is a dual half-duplex transmission: output bytes
are sent to the slave, then bytes are received from the
slave. It is not possible to perform a full duplex
exchange for now, although this feature could be easily
implemented.
:param out: data to send to the SPI slave, may be empty to read out
data from the slave with no write.
:type out: array or bytes or list(int)
:param int readlen: count of bytes to read out from the slave,
may be zero to only write to the slave
:param bool start: whether to start an SPI transaction, i.e.
activate the /CS line for the slave. Use False to
resume a previously started transaction
:param bool stop: whether to desactivete the /CS line for the slave.
Use False if the transaction should complete with a
further call to exchange()
:param duplex: perform a full-duplex exchange (vs. half-duplex),
i.e. bits are clocked in and out at once.
:return: an array of bytes containing the data read out from the
slave
:rtype: array
"""
return self._controller.exchange(self._frequency, out, readlen,
start and self._cs_prolog,
stop and self._cs_epilog,
self._cpol, self._cpha,
duplex=duplex)
def read(self, readlen=0, start=True, stop=True):
"""Read out bytes from the slave
:param int readlen: count of bytes to read out from the slave,
may be zero to only write to the slave
:param bool start: whether to start an SPI transaction, i.e.
activate the /CS line for the slave. Use False to
resume a previously started transaction
:param bool stop: whether to desactivete the /CS line for the slave.
Use False if the transaction should complete with a
further call to exchange()
:return: an array of bytes containing the data read out from the
slave
:rtype: array
"""
return self._controller.exchange(self._frequency, [], readlen,
start and self._cs_prolog,
stop and self._cs_epilog,
self._cpol, self._cpha)
def write(self, out, start=True, stop=True):
"""Write bytes to the slave
:param out: data to send to the SPI slave, may be empty to read out
data from the slave with no write.
:type out: array or bytes or list(int)
:param bool start: whether to start an SPI transaction, i.e.
activate the /CS line for the slave. Use False to
resume a previously started transaction
:param bool stop: whether to desactivete the /CS line for the slave.
Use False if the transaction should complete with a
further call to exchange()
"""
return self._controller.exchange(self._frequency, out, 0,
start and self._cs_prolog,
stop and self._cs_epilog,
self._cpol, self._cpha)
def flush(self):
"""Force the flush of the HW FIFOs"""
self._controller._flush()
def set_frequency(self, frequency):
"""Change SPI bus frequency
:param float frequency: the new frequency in Hz
"""
self._frequency = min(frequency, self._controller.frequency_max)
@property
def frequency(self):
"""Return the current SPI bus block"""
return self._frequency
class SpiGpioPort:
"""GPIO port
A SpiGpioPort instance enables to drive GPIOs wich are not reseerved for
SPI feature as regular GPIOs.
GPIO are managed as a bitfield. The LSBs are reserved for the SPI
feature, which means that the lowest pin that can be used as a GPIO is
b4:
* b0: SPI SCLK
* b1: SPI MOSI
* b2: SPI MISO
* b3: SPI CS0
* b4: SPI CS1 or first GPIO
If more than one SPI device is used, less GPIO pins are available, see
the cs_count argument of the SpiController constructor.
There is no offset bias in GPIO bit position, *i.e.* the first available
GPIO can be reached from as ``0x10``.
Bitfield size depends on the FTDI device: 4432H series use 8-bit GPIO
ports, while 232H and 2232H series use wide 16-bit ports.
An SpiGpio port is never instanciated directly: use
SpiController.get_gpio() method to obtain the GPIO port.
"""
def __init__(self, controller):
self._controller = controller
@property
def pins(self):
"""Report the addressable GPIOs as a bitfield."""
return self._controller.gpio_pins
@property
def direction(self):
"""Provide the FTDI GPIO direction"""
return self._controller.direction
def read(self):
"""Read GPIO port.
:return: the GPIO port pins as a bitfield
:rtype: int
"""
return self._controller.read_gpio()
def write(self, value):
"""Write GPIO port.
:param int value: the GPIO port pins as a bitfield
"""
return self._controller.write_gpio(value)
def set_direction(self, pins, direction):
"""Change the direction of the GPIO pins.
:param int pins: which GPIO pins should be reconfigured
:param int direction: direction bitfield (high level for output)
"""
self._controller.set_gpio_direction(pins, direction)
class SpiController:
"""SPI master.
:param silent_clock: deprecated.
:param int cs_count: is the number of /CS lines (one per device to
drive on the SPI bus)
:param boolean turbo: to be documented
"""
SCK_BIT = 0x01
DO_BIT = 0x02
DI_BIT = 0x04
CS_BIT = 0x08
SPI_BITS = DI_BIT | DO_BIT | SCK_BIT
PAYLOAD_MAX_LENGTH = 0x10000 # 16 bits max
def __init__(self, silent_clock=False, cs_count=4, turbo=True):
self._ftdi = Ftdi()
self._lock = Lock()
self._gpio_port = None
self._gpio_dir = 0
self._gpio_low = 0
self._wide_port = False
self._cs_count = cs_count
self._turbo = turbo
self._immediate = bytes((Ftdi.SEND_IMMEDIATE,))
self._frequency = 0.0
self._clock_phase = False
@property
def direction(self):
"""Provide the FTDI GPIO direction"""
return self._spi_dir | self._gpio_dir
def configure(self, url, **kwargs):
"""Configure the FTDI interface as a SPI master
:param str url: FTDI URL string, such as 'ftdi://ftdi:232h/1'
:param kwargs: options to configure the SPI bus
Accepted options:
* ``frequency`` the SPI bus frequency in Hz. Note that each slave
may reconfigure the SPI bus with a specialized
frequency.
* ``cs_count`` count of chip select signals dedicated to select
SPI slave devices
* ``turbo`` whether to enable or disable turbo mode
* ``debug`` for extra debug output
"""
# it is better to specify CS and turbo in configure, but the older
# API where these parameters are specified at instanciation has been
# preserved
self._cs_count = int(kwargs.get('cs_count', self._cs_count))
if not (1 <= self._cs_count <= 5):
raise ValueError('Unsupported CS line count: %d' % self._cs_count)
self._turbo = bool(kwargs.get('turbo', self._turbo))
for k in ('direction', 'initial', 'cs_count', 'turbo'):
if k in kwargs:
del kwargs[k]
with self._lock:
if self._frequency > 0.0:
raise SpiIOError('Already configured')
self._cs_bits = (((SpiController.CS_BIT << self._cs_count) - 1) &
~(SpiController.CS_BIT - 1))
self._spi_ports = [None] * self._cs_count
self._spi_dir = (self._cs_bits |
SpiController.DO_BIT |
SpiController.SCK_BIT)
self._spi_mask = self._cs_bits | self.SPI_BITS
self._frequency = self._ftdi.open_mpsse_from_url(
# /CS all high
url, direction=self._spi_dir, initial=self._cs_bits, **kwargs)
self._ftdi.enable_adaptive_clock(False)
self._wide_port = self._ftdi.has_wide_port
def terminate(self):
"""Close the FTDI interface"""
if self._ftdi:
self._ftdi.close()
self._frequency = 0.0
def get_port(self, cs, freq=None, mode=0):
"""Obtain a SPI port to drive a SPI device selected by Chip Select.
:note: SPI mode 2 is not supported.
:param int cs: chip select slot, starting from 0
:param float freq: SPI bus frequency for this slave in Hz
:param int mode: SPI mode [0,1,3]
:rtype: SpiPort
"""
with self._lock:
if not self._ftdi:
raise SpiIOError("FTDI controller not initialized")
if cs >= len(self._spi_ports):
raise SpiIOError("No such SPI port")
if not (0 <= mode <= 3):
raise SpiIOError("Invalid SPI mode")
if (mode & 0x2) and not self._ftdi.is_H_series:
raise SpiIOError("SPI with CPHA high is not supported by "
"this FTDI device")
if mode == 2:
raise SpiIOError("SPI mode 2 has no known workaround with "
"FTDI devices")
if not self._spi_ports[cs]:
freq = min(freq or self.frequency_max, self.frequency_max)
hold = freq and (1+int(1E6/freq))
self._spi_ports[cs] = SpiPort(self, cs, cs_hold=hold,
spi_mode=mode)
self._spi_ports[cs].set_frequency(freq)
self._flush()
return self._spi_ports[cs]
def get_gpio(self):
with self._lock:
if not self._ftdi:
raise SpiIOError("FTDI controller not initialized")
if not self._gpio_port:
self._gpio_port = SpiGpioPort(self)
return self._gpio_port
@property
def frequency_max(self):
"""Returns the maximum SPI clock"""
return self._ftdi.frequency_max
@property
def frequency(self):
"""Returns the current SPI clock"""
return self._frequency
@property
def gpio_pins(self):
"""Report the addressable GPIOs as a bitfield"""
with self._lock:
return self._get_gpio_mask()
def exchange(self, frequency, out, readlen,
cs_prolog=None, cs_epilog=None,
cpol=False, cpha=False, duplex=False):
if duplex:
if readlen > len(out):
tmp = array('B', out)
tmp.extend([0] * (readlen - len(out)))
out = tmp
elif not readlen:
readlen = len(out)
with self._lock:
if duplex:
data = self._exchange_full_duplex(frequency, out,
cs_prolog, cs_epilog,
cpol, cpha)
return data[:readlen]
else:
return self._exchange_half_duplex(frequency, out, readlen,
cs_prolog, cs_epilog,
cpol, cpha)
def read_gpio(self):
"""Read GPIO port
:return: the GPIO port pins as a bitfield
:rtype: int
"""
with self._lock:
data = self._read_raw(self._wide_port)
mask = self._get_gpio_mask()
return data & mask
def write_gpio(self, value):
"""Write GPIO port
:param int value: the GPIO port pins as a bitfield
"""
with self._lock:
mask = self._get_gpio_mask()
if (value & mask) != value:
raise SpiIOError('No such GPIO pins: %04x/%04x' %
(mask, value))
# perform read-modify-write
use_high = self._wide_port and (self.direction & 0xff00)
data = self._read_raw(use_high)
data &= ~mask
data |= value
self._write_raw(data, use_high)
self._gpio_low = data & 0xFF & ~self._spi_mask
def set_gpio_direction(self, pins, direction):
"""Change the direction of the GPIO pins
:param int pins: which GPIO pins should be reconfigured
:param int direction: direction bitfield (on for output)
"""
with self._lock:
if pins & self._spi_mask:
raise SpiIOError('Cannot access SPI pins as GPIO')
mask = self._get_gpio_mask()
if (pins & mask) != pins:
raise SpiIOError('No such GPIO pin(s)')
self._gpio_dir &= ~pins
self._gpio_dir |= (pins & direction)
def _get_gpio_mask(self):
gpio_width = self._wide_port and 16 or 8
gpio_mask = (1 << gpio_width) - 1
gpio_mask &= ~self._spi_mask
return gpio_mask
def _read_raw(self, read_high):
if read_high:
cmd = array('B', [Ftdi.GET_BITS_LOW,
Ftdi.GET_BITS_HIGH,
Ftdi.SEND_IMMEDIATE])
fmt = '<H'
else:
cmd = array('B', [Ftdi.GET_BITS_LOW,
Ftdi.SEND_IMMEDIATE])
fmt = 'B'
self._ftdi.write_data(cmd)
size = scalc(fmt)
data = self._ftdi.read_data_bytes(size, 4)
if len(data) != size:
raise SpiIOError('Cannot read GPIO')
value, = sunpack(fmt, data)
return value
def _write_raw(self, data, write_high):
direction = self.direction
low_data = data & 0xFF
low_dir = direction & 0xFF
if write_high:
high_data = (data >> 8) & 0xFF
high_dir = (direction >> 8) & 0xFF
cmd = array('B', [Ftdi.SET_BITS_LOW, low_data, low_dir,
Ftdi.SET_BITS_HIGH, high_data, high_dir])
else:
cmd = array('B', [Ftdi.SET_BITS_LOW, low_data, low_dir])
self._ftdi.write_data(cmd)
def _exchange_half_duplex(self, frequency, out, readlen,
cs_prolog, cs_epilog, cpol, cpha):
if not self._ftdi:
raise SpiIOError("FTDI controller not initialized")
if len(out) > SpiController.PAYLOAD_MAX_LENGTH:
raise SpiIOError("Output payload is too large")
if readlen > SpiController.PAYLOAD_MAX_LENGTH:
raise SpiIOError("Input payload is too large")
if cpha:
# to enable CPHA, we need to use a workaround with FTDI device,
# that is enable 3-phase clocking (which is usually dedicated to
# I2C support). This mode use use 3 clock period instead of 2,
# which implies the FTDI frequency should be fixed to match the
# requested one.
frequency = (3*frequency)//2
if self._frequency != frequency:
self._ftdi.set_frequency(frequency)
# store the requested value, not the actual one (best effort),
# to avoid setting unavailable values on each call.
self._frequency = frequency
direction = self.direction
cmd = array('B')
for ctrl in cs_prolog or []:
ctrl &= self._spi_mask
ctrl |= self._gpio_low
cmd.extend((Ftdi.SET_BITS_LOW, ctrl, direction))
epilog = array('B')
if cs_epilog:
for ctrl in cs_epilog:
ctrl &= self._spi_mask
ctrl |= self._gpio_low
epilog.extend((Ftdi.SET_BITS_LOW, ctrl, direction))
# Restore idle state
cs_high = [Ftdi.SET_BITS_LOW, self._cs_bits | self._gpio_low,
direction]
if not self._turbo:
cs_high.append(Ftdi.SEND_IMMEDIATE)
epilog.extend(cs_high)
writelen = len(out)
if self._clock_phase != cpha:
self._ftdi.enable_3phase_clock(cpha)
self._clock_phase = cpha
if writelen:
wcmd = (cpol ^ cpha) and \
Ftdi.WRITE_BYTES_PVE_MSB or Ftdi.WRITE_BYTES_NVE_MSB
write_cmd = spack('<BH', wcmd, writelen-1)
cmd.frombytes(write_cmd)
cmd.extend(out)
if readlen:
rcmd = (cpol ^ cpha) and \
Ftdi.READ_BYTES_PVE_MSB or Ftdi.READ_BYTES_NVE_MSB
read_cmd = spack('<BH', rcmd, readlen-1)
cmd.frombytes(read_cmd)
cmd.extend(self._immediate)
if self._turbo:
if epilog:
cmd.extend(epilog)
self._ftdi.write_data(cmd)
else:
self._ftdi.write_data(cmd)
if epilog:
self._ftdi.write_data(epilog)
# USB read cycle may occur before the FTDI device has actually
# sent the data, so try to read more than once if no data is
# actually received
data = self._ftdi.read_data_bytes(readlen, 4)
else:
if writelen:
if self._turbo:
if epilog:
cmd.extend(epilog)
self._ftdi.write_data(cmd)
else:
self._ftdi.write_data(cmd)
if epilog:
self._ftdi.write_data(epilog)
data = array('B')
return data
def _exchange_full_duplex(self, frequency, out,
cs_prolog, cs_epilog, cpol, cpha):
if not self._ftdi:
raise SpiIOError("FTDI controller not initialized")
if len(out) > SpiController.PAYLOAD_MAX_LENGTH:
raise SpiIOError("Output payload is too large")
if cpha:
# to enable CPHA, we need to use a workaround with FTDI device,
# that is enable 3-phase clocking (which is usually dedicated to
# I2C support). This mode use use 3 clock period instead of 2,
# which implies the FTDI frequency should be fixed to match the
# requested one.
frequency = (3*frequency)//2
if self._frequency != frequency:
self._ftdi.set_frequency(frequency)
# store the requested value, not the actual one (best effort),
# to avoid setting unavailable values on each call.
self._frequency = frequency
direction = self.direction
cmd = array('B')
for ctrl in cs_prolog or []:
ctrl &= self._spi_mask
ctrl |= self._gpio_low
cmd.extend((Ftdi.SET_BITS_LOW, ctrl, direction))
epilog = array('B')
if cs_epilog:
for ctrl in cs_epilog:
ctrl &= self._spi_mask
ctrl |= self._gpio_low
epilog.extend((Ftdi.SET_BITS_LOW, ctrl, direction))
# Restore idle state
cs_high = [Ftdi.SET_BITS_LOW, self._cs_bits | self._gpio_low,
direction]
if not self._turbo:
cs_high.append(Ftdi.SEND_IMMEDIATE)
epilog.extend(cs_high)
writelen = len(out)
if self._clock_phase != cpha:
self._ftdi.enable_3phase_clock(cpha)
self._clock_phase = cpha
wcmd = (cpol ^ cpha) and \
Ftdi.RW_BYTES_NVE_PVE_MSB or Ftdi.RW_BYTES_PVE_NVE_MSB
write_cmd = spack('<BH', wcmd, writelen-1)
cmd.frombytes(write_cmd)
cmd.extend(out)
cmd.extend(self._immediate)
if self._turbo:
if epilog:
cmd.extend(epilog)
self._ftdi.write_data(cmd)
else:
self._ftdi.write_data(cmd)
if epilog:
self._ftdi.write_data(epilog)
# USB read cycle may occur before the FTDI device has actually
# sent the data, so try to read more than once if no data is
# actually received
data = self._ftdi.read_data_bytes(len(out), 4)
return data
def _flush(self):
"""Flush the HW FIFOs"""
self._ftdi.write_data(self._immediate)
self._ftdi.purge_buffers()
|
import numpy as np
import tensorflow as tf
import keras
from keras.models import Sequential
from keras.layers.core import Dense, Activation, Flatten, Dropout
from keras.datasets import mnist
import os
# Load MNIST
(X_train, y_train), (X_test, y_test) = mnist.load_data()
# One-hot encode the labels
y_train = keras.utils.np_utils.to_categorical(y_train)
y_test = keras.utils.np_utils.to_categorical(y_test)
# Reshape the input images to be 1d vectors instead of 2d vectors
X_train = X_train.reshape(len(X_train), 28**2)
X_test = X_test.reshape(len(X_test), 28**2)
# Rescale the pixel values to be between 0 and 1
X_train, X_test = X_train / 255, X_test / 255
# # Scale to 0 mean and unit variance
# X_train = X_train - X_train.mean()
# X_test = X_test - X_train.mean()
# X_train = X_train / X_train.std()
# X_test = X_test / X_train.std()
##### Question E: Modeling Part 2
# Build the model
model = Sequential()
model.add(Dense(750, input_dim=784))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(250))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
model.summary()
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
fit = model.fit(X_train, y_train, batch_size=50, nb_epoch=20,
verbose=1)
#Evaluate the model on the test set
score = model.evaluate(X_test, y_test, verbose=0)
print('\nTest score:', score[0])
print('Test accuracy:', score[1])
# Make a sound so I check the results
os.system('say "done"')
|
r'''
use with
eliduprees_3d_designs_path = r"C:\Users\Eli\Documents\eliduprees-3d-designs\"
exec(open(eliduprees_3d_designs_path+"freecad_autorun.py").read())
autorun(eliduprees_3d_designs_path+"freecad_experiments.py")
or
eliduprees_3d_designs_path = open("/n/elidupree-autobuild/share_prefix").read().strip() + "/eliduprees-3d-designs/"
exec(open(eliduprees_3d_designs_path+"freecad_autorun.py").read())
autorun(eliduprees_3d_designs_path+"freecad_experiments.py")
eliduprees_3d_designs_path = open("/n/elidupree-autobuild/share_prefix").read().strip() + "/eliduprees-3d-designs/"; exec(open(eliduprees_3d_designs_path+"freecad_autorun.py").read()); run(eliduprees_3d_designs_path+"freecad_experiments.py")
eliduprees_3d_designs_path = "/n/autobuild/eliduprees-3d-designs/build/"; exec(open(eliduprees_3d_designs_path+"freecad_autorun.py").read()); run(eliduprees_3d_designs_path+"freecad_experiments.py")
'''
import PartDesignGui
import sys
'''import os.path
if "eliduprees_3d_designs_path" not in globals():
if os.path.isdir("/n/elidupree-autobuild"):
eliduprees_3d_designs_path = open("/n/elidupree-autobuild/share_prefix").read().strip() + "/eliduprees-3d-designs/"'''
if eliduprees_3d_designs_path not in sys.path:
sys.path.append(eliduprees_3d_designs_path)
def run(source_path):
with open (source_path) as file:
shared_globals = ['App', 'Log', 'Msg', 'Err', 'Wrn', 'traceback', 'FreeCADGui', 'Gui', 'Workbench', 'Part', 'PathCommandGroup', 'Sketcher', 'WebGui', 'sys', 'Start', 'StartPage', 'WebPage', 'WebView', 'webView', "eliduprees_3d_designs_path"]
exec (file.read(), {
g: globals()[g] for g in shared_globals
})
def autorun(source_path):
from PyQt5 import QtCore
import os.path
def check_source():
try:
modified_time = None
try:
modified_time = os.path.getmtime (source_path)
except OSError:
pass
if modified_time is not None:
if "last_modified_time" not in globals() or modified_time > globals()["last_modified_time"]:
globals()["last_modified_time"] = modified_time
on_change(source_path)
except Exception as e:
import traceback
App.Console.PrintError(traceback.format_exc())
def on_change(source_path):
run(source_path)
if "autorun_timer" in globals():
globals()["autorun_timer"].stop()
globals()["autorun_timer"] = QtCore.QTimer()
globals()["autorun_timer"].timeout.connect (check_source)
globals()["autorun_timer"].start(250)
|
import numpy as np
fock_N=10
N = 7 # number of spins
# array of spin energy splittings and coupling strengths. here we use
# uniform parameters, but in general we don't have to
# init='mixed'
init='xbasis'
# dephasing rate
# gamma = 0.01 * np.ones(N)
g=0.05
kappa=1e-1*g
beta=kappa
# beta=0
gamma=1e-3*g
detune= 1
# shift=2*gamma
shift=0
delta_column=[]
for n in range(N):
delta_row=[]
for m in range(N):
if m ==n:
delta_row.append(detune*(m+1))
# delta_row.append(detune)
else:
delta_row.append(shift)
delta_column.append(delta_row)
deltas=np.array(delta_column)
steps =2000
steps2=2000
endtime=100
endtime2=100
tlist, deltat = np.linspace(0, endtime, steps, retstep=True)
tlist2, deltat2 = np.linspace(0, endtime2, steps2, retstep=True)
rank=1
rank2=1
ntraj = 1001
run_lowrank=True
run_exact=True
run_mc=True
# run_exact=False
# run_mc=False |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.