id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
4803994 | <reponame>plqualls/web-scraping-challenge
from bs4 import BeautifulSoup
from splinter import Browser
from pprint import pprint
from webdriver_manager.chrome import ChromeDriverManager
from splinter import Browser
import pymongo
import pandas as pd
import requests
def init_browser():
executable_path = {"executable_path": "/usr/local/bin/chromedriver"}
return Browser("chrome", **executable_path, headless=False)
def scrape_info():
browser = init_browser()
mars_dict = {}
#URL of the page to scrape
url = "https://mars.nasa.gov/news/"
browser.visit(url)
#HTML
html = browser.html
#Parse HTML with Beautiful Soup
soup = bs(html, 'lxml')
#Retrieve news titles and paragraphs
news_title = soup.find("div", class_="content_title").text
news_summary = soup.find("div", class_="rollover_description_inner").text
#Mars Facts
url = 'http://space-facts.com/mars/'
browser.visit(url)
tables = pd.read_html(url)
mars_facts_df = tables[0]
#Assign the columns
mars_facts_df.columns = [Characteristics, Value]
html_table = mars_facts_df.to_html(table_id="html_tbl_css", justify='left',index=False)
#Use pandas to convert the data to a HTML table string
html_table= mars_facts_df.to_html()
#Hemisphere link
hemispheres_url = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(hemispheres_url)
#HTML Object
html_hemispheres = browser.html
soup = bs(html_hemispheres, 'lxml')
#Extract the hemispheres items url
mars_hemispheres = soup.find('div', class_='collapsible results')
hemispheres = mars_hemispheres.find_all('div', class_='item')
mars_hemisphere_pictures = []
for hemi in hemispheres:
#Extract title
hemisphere = hemi.find('div', class_="description")
title = hemisphere.h3.text
title = title.strip('Enhanced')
#Extract images
end_link = hemisphere.a["href"]
browser.visit(main_url + end_link)
img_html = browser.html
img_soup = BeautifulSoup(img_html, 'html.parser')
img_link = img_soup.find('div', class_='downloads')
img_url = img_link.find('li').a['href']
#Storage Dictionary
img_dict = {}
img_dict['title'] = title
img_dict['Picture URL'] = img_url
#Add data to list
mars_hemisphere_pictures.append(img_dict)
#close browser window
browser.quit()
#Create a summary dictionary of scraped data.
mars_dic = {
'Topic': news_title,
'Summary': news_summary,
'Mars Facts & Values': mars_facts_html,
'Pictures': mars_hemisphere_pictures
}
return (mars_dic)
import pymongo
from pymongo import MongoClient
conn = "mongodb://localhost:27017"
client = pymongo.MongoClient(conn)
#Define database and collection
db = client.mars_mission_db
collection = db.mars_dic
collection.insert_one(mars_dic)
mission_to_mars_data = db.mars_dic.find()
for data in mission_to_mars_data:
print(data)
| StarcoderdataPython |
1692399 | import unittest
from src.database.entities_pg import Job_Class, Job_Class_To_Job_Class_Similar, Job_Class_Similar, X28_HTML, \
Job_Class_Variant, Classification_Results
class TestEntitiesX28(unittest.TestCase):
def test_entities_are_correctly_mapped(self):
X28_HTML.select()
Job_Class.select()
Job_Class_To_Job_Class_Similar.select()
Job_Class_Similar.select()
Job_Class_Variant.select()
Classification_Results.select() | StarcoderdataPython |
3233810 | import ast
def split_table(table_name):
"""
Method to convert data from excel to src,target tables
Args:
table_name(str): Table name as argument in Text Format
Returns: src,target tables
"""
table_names = ast.literal_eval(table_name)
table_list = {}
tables = table_names["table"]
for each_table in tables:
table_list['src_table'] = each_table
table_list['target_table'] = tables[each_table]
return table_list
def split_db(test_db_detail):
"""
Args:
test_db_detail: Contains a String consist
of DBDetails fetched from Excel
Returns: A dictionary consist
of keys(source_db,targetdb,sourcedbType,sourceServer
targetServer,sourceuser,Targetuser)
"""
temp_dblist = []
db_detail = {}
strip_db_detail = test_db_detail.split(";")
for i in range(len(strip_db_detail)):
temp_dblist.append(strip_db_detail[i].split(':', 1))
db_detail.update({"sourcedb": temp_dblist[2][1]})
db_detail.update({"targetdb": temp_dblist[5][1]})
db_detail.update({"sourcedbType": temp_dblist[0][1]})
db_detail.update({"targetdbType": temp_dblist[4][1]})
db_detail.update({"sourceServer": temp_dblist[1][1]})
db_detail.update({"targetServer": temp_dblist[6][1]})
db_detail.update({"sourceuser": temp_dblist[3][1]})
db_detail.update({"Targetuser": temp_dblist[7][1]})
return db_detail
| StarcoderdataPython |
128517 | import pymysql,requests
def my_db(msg):
conn = pymysql.Connect(
host='192.168.3.11',##mysql服务器地址
port=3306,##mysql服务器端口号
user='yhj666',##用户名
passwd='<PASSWORD>',##密码 <PASSWORD>";~OVazNl%y)?
db='yhj666',##数据库名
charset='utf8',##连接编码
)
sq1 = 'SELECT * FROM cityid WHERE city_name LIKE "%s"' %('%'+msg+'%')
# cursor = con.cursor() # 获取游标
# sql = 'select * from my_user where username="%s";' % username
cur = conn.cursor(cursor=pymysql.cursors.DictCursor)
cur.execute(sq1)
uer = cur.fetchone()
conn.commit()
cur.close()
conn.close()
return uer
#data1 = cursor.fetchall()
# print(data1[0][1])
# uer=data1[0][1]
# sq1='select * from my_user where username="a12121"'
# print(my_db(sq1))
#输入你要查询天气的城市
msg='岳阳'
# print(my_db(msg))
print(my_db(msg)['city_code'])
def tqyb():
url1="http://t.weather.sojson.com/api/weather/city/"+my_db(msg)['city_code']
url2 = "http://t.weather.sojson.com/api/weather/city/101280601"
re=requests.get(url1)
# print(re.json()["cityInfo"])forecast #0102番禺 0101广州市 深圳市0601 白云0110 天河0109
# print(re.json()["cityInfo"])
# print(re.json()["data"]["forecast"][0])
tq0=re.json()["cityInfo"]
tq=re.json()["data"]["forecast"][0]
tq1=re.json()["data"]
# print("当前城市:"+tq0["parent"],tq0["city"])
tq00="当前城市:"+tq0["parent"],tq0["city"],"今天"+tq["week"],"最"+tq["high"],"最"+tq["low"]\
,"天气类型:"+tq["type"],tq["fx"],tq["fl"],"温馨提示:"+tq["notice"]\
,"湿度:"+tq1["shidu"],"空气质量:"+tq1["quality"],"感冒指数:"+tq1["ganmao"],"天气更新时间:"+tq0["updateTime"]
print(tq00)
# tqyb()
def my_db1(insert_sql):
con = pymysql.Connect(
host='192.168.3.11',##mysql服务器地址
port=3306,##mysql服务器端口号
user='yhj666',##用户名
passwd='<PASSWORD>',##密码 J5p";~OVazNl%y)?
db='yhj666',##数据库名
charset='utf8',##连接编码
)
cursor = con.cursor() # 获取游标
# sql = 'select * from my_user where username="%s";' % username
cursor.execute(insert_sql)
data1 = cursor.fetchall() | StarcoderdataPython |
3313981 | <gh_stars>0
#!/usr/bin/env python
import argparse
import cPickle
import traceback
import logging
import time
import sys
import numpy
import experiments.nmt
from experiments.nmt import \
RNNEncoderDecoder, \
prototype_phrase_state, \
parse_input
from experiments.nmt.numpy_compat import argpartition
logger = logging.getLogger(__name__)
class Timer(object):
def __init__(self):
self.total = 0
def start(self):
self.start_time = time.time()
def finish(self):
self.total += time.time() - self.start_time
class BeamSearch(object):
def __init__(self, enc_dec):
self.enc_dec = enc_dec
state = self.enc_dec.state
self.eos_id = state['null_sym_target']
self.unk_id = state['unk_sym_target']
def compile(self):
self.comp_repr = self.enc_dec.create_representation_computer()
self.comp_init_states = self.enc_dec.create_initializers()
self.comp_next_probs = self.enc_dec.create_next_probs_computer()
self.comp_next_states = self.enc_dec.create_next_states_computer()
def search(self, seq, n_samples, prefix=None, ignore_unk=False, minlen=1, verbose=False):
c = self.comp_repr(seq)[0]
states = map(lambda x: x[None, :], self.comp_init_states(c))
dim = states[0].shape[1]
num_levels = len(states)
fin_trans = []
fin_costs = []
trans = [[]]
costs = [0.0]
for k in range(3 * len(seq)):
if n_samples == 0:
break
# Compute probabilities of the next words for
# all the elements of the beam.
beam_size = len(trans)
last_words = (numpy.array(map(lambda t: t[-1], trans))
if k > 0
else numpy.zeros(beam_size, dtype="int64"))
log_probs = numpy.log(self.comp_next_probs(c, k, last_words, *states)[0])
# Adjust log probs according to search restrictions
if ignore_unk:
log_probs[:, self.unk_id] = -numpy.inf
if k < minlen:
log_probs[:, self.eos_id] = -numpy.inf
if prefix is not None and k < len(prefix):
log_probs[:, :] = -numpy.inf
log_probs[:, prefix[k]] = 0.
# Find the best options by calling argpartition of flatten array
next_costs = numpy.array(costs)[:, None] - log_probs
flat_next_costs = next_costs.flatten()
best_costs_indices = argpartition(
flat_next_costs.flatten(),
n_samples)[:n_samples]
# Decypher flatten indices
voc_size = log_probs.shape[1]
trans_indices = best_costs_indices / voc_size
word_indices = best_costs_indices % voc_size
costs = flat_next_costs[best_costs_indices]
# Form a beam for the next iteration
new_trans = [[]] * n_samples
new_costs = numpy.zeros(n_samples)
new_states = [numpy.zeros((n_samples, dim), dtype="float32") for level
in range(num_levels)]
inputs = numpy.zeros(n_samples, dtype="int64")
for i, (orig_idx, next_word, next_cost) in enumerate(
zip(trans_indices, word_indices, costs)):
new_trans[i] = trans[orig_idx] + [next_word]
new_costs[i] = next_cost
for level in range(num_levels):
new_states[level][i] = states[level][orig_idx]
inputs[i] = next_word
new_states = self.comp_next_states(c, k, inputs, *new_states)
# Filter the sequences that end with end-of-sequence character
trans = []
costs = []
indices = []
for i in range(n_samples):
if new_trans[i][-1] != self.enc_dec.state['null_sym_target']:
trans.append(new_trans[i])
costs.append(new_costs[i])
indices.append(i)
else:
n_samples -= 1
fin_trans.append(new_trans[i])
fin_costs.append(new_costs[i])
states = map(lambda x: x[indices], new_states)
# Dirty tricks to obtain any translation
if not len(fin_trans):
if ignore_unk:
logger.warning("Did not manage without UNK")
return self.search(seq, n_samples, False, minlen)
elif n_samples < 500:
logger.warning("Still no translations: try beam size {}".format(n_samples * 2))
return self.search(seq, n_samples * 2, False, minlen)
else:
logger.error("Translation failed")
fin_trans = numpy.array(fin_trans)[numpy.argsort(fin_costs)]
fin_costs = numpy.array(sorted(fin_costs))
return fin_trans, fin_costs
def indices_to_words(i2w, seq):
sen = []
for k in xrange(len(seq)):
if i2w[seq[k]] == '<eol>':
break
sen.append(i2w[seq[k]])
return sen
def sample(lm_model, seq, n_samples, prefix=None,
sampler=None, beam_search=None,
ignore_unk=False, normalize=False,
alpha=1, verbose=False):
if beam_search:
sentences = []
trans, costs = beam_search.search(seq, n_samples, prefix=prefix,
ignore_unk=ignore_unk, minlen=len(seq) / 2, verbose=verbose)
if normalize:
counts = [len(s) for s in trans]
costs = [co / cn for co, cn in zip(costs, counts)]
for i in range(len(trans)):
sen = indices_to_words(lm_model.target_language.indx_word, trans[i])
sentences.append(" ".join(sen))
for i in range(len(costs)):
if verbose:
logger.log(2,"{}: {}".format(costs[i], sentences[i]))
return sentences, costs, trans
elif sampler:
sentences = []
all_probs = []
costs = []
values, cond_probs = sampler(n_samples, 3 * (len(seq) - 1), alpha, seq)
for sidx in xrange(n_samples):
sen = []
for k in xrange(values.shape[0]):
if lm_model.target_language.indx_word[values[k, sidx]] == '<eol>':
break
sen.append(lm_model.target_language.indx_word[values[k, sidx]])
sentences.append(" ".join(sen))
probs = cond_probs[:, sidx]
probs = numpy.array(cond_probs[:len(sen) + 1, sidx])
all_probs.append(numpy.exp(-probs))
costs.append(-numpy.sum(probs))
if normalize:
counts = [len(s.strip().split(" ")) for s in sentences]
costs = [co / cn for co, cn in zip(costs, counts)]
sprobs = numpy.argsort(costs)
if verbose:
for pidx in sprobs:
logger.log(2, "Hypotheses {}: {} {} {}\n".format(pidx, -costs[pidx], all_probs[pidx], sentences[pidx]))
return sentences, costs, None
else:
raise Exception("I don't know what to do")
def parse_args():
parser = argparse.ArgumentParser(
"Sample (of find with beam-serch) translations from a translation model")
parser.add_argument("--state",
required=True, help="State to use")
parser.add_argument("--beam-search",
action="store_true", help="Beam size, turns on beam-search")
parser.add_argument("--beam-size",
type=int, help="Beam size")
parser.add_argument("--ignore-unk",
default=False, action="store_true",
help="Ignore unknown words")
parser.add_argument("--source",
help="File of source sentences")
parser.add_argument("--trans",
help="File to save translations in")
parser.add_argument("--normalize",
action="store_true", default=False,
help="Normalize log-prob with the word count")
parser.add_argument("--verbose",
action="store_true", default=False,
help="Be verbose")
parser.add_argument("model_path",
help="Path to the model")
parser.add_argument("--interactive",
default=False, action="store_true",
help="Interactive post-editing?")
parser.add_argument("--references",
help="Reference sentence (for computing WSR)")
parser.add_argument("--save-original",
default=False, action="store_true",
help="Interactive post-editing?")
parser.add_argument("--save-original-to",
help="Save original hypotheses to")
parser.add_argument("changes",
nargs="?", default="",
help="Changes to state")
return parser.parse_args()
def main():
args = parse_args()
state = prototype_phrase_state()
with open(args.state) as src:
state.update(cPickle.load(src))
state.update(eval("dict({})".format(args.changes)))
logging.basicConfig(level=getattr(logging, state['level']),
format=" %(asctime)s: %(name)s: %(levelname)s: %(message)s")
if args.verbose:
logger.setLevel(level=logging.DEBUG)
logger.debug("I'm being verbose!")
else:
logger.setLevel(level=logging.INFO)
rng = numpy.random.RandomState(state['seed'])
enc_dec = RNNEncoderDecoder(state, rng, skip_init=True)
enc_dec.build()
lm_model = enc_dec.create_lm_model()
lm_model.load(args.model_path)
indx_word = cPickle.load(open(state['word_indx'], 'rb'))
sampler = None
beam_search = None
if args.beam_search:
beam_search = BeamSearch(enc_dec)
beam_search.compile()
else:
sampler = enc_dec.create_sampler(many_samples=True)
idict_src = cPickle.load(open(state['indx_word'], 'r'))
idict_trg = cPickle.load(open(state['word_indx_trgt'], 'r'))
unk_id = state['unk_sym_target']
if args.source and args.trans:
# Actually only beam search is currently supported here
assert beam_search
assert args.beam_size
try:
fsrc = open(args.source, 'r')
ftrans = open(args.trans, 'w')
logger.info("Storing corrected hypotheses into: %s" % str(args.trans))
if args.save_original:
logger.info("Storing original hypotheses into: %s" % str(args.save_original_to))
ftrans_ori = open(args.save_original_to, 'w')
if not args.interactive:
assert args.references is not None, "Automatic mode requires a reference file!"
ftrg = open(args.references, 'r')
target_lines = ftrg.read().split('\n')
if target_lines[-1] == '':
target_lines = target_lines[:-1]
start_time = time.time()
n_samples = args.beam_size
total_cost = 0.0
logging.info("Beam size: {}".format(n_samples))
total_errors = 0
total_words = 0
if args.interactive:
for n_line, line in enumerate(fsrc):
errors_sentence = 0
index_prefix = None
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
hypothesis_number = 0
correct_word = -1
while correct_word != 0:
trans, costs, _ = sample(lm_model, seq, n_samples, prefix=index_prefix, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk,
normalize=args.normalize, verbose=args.verbose)
best = numpy.argmin(costs)
hypothesis = trans[best].split()
print "Sentence %d. Hypothesis %d: %s" % (n_line, hypothesis_number, " ".join(hypothesis))
correct_word = int(raw_input('Select word to correct (1 - %d).'
' Word 0 means that the sentence is correct: ' % len(hypothesis)))
if correct_word == 0:
print >> ftrans, hypothesis
else:
errors_sentence += 1
hypothesis_number += 1
new_word = raw_input('Substitute %s by: ' % hypothesis[correct_word - 1])
prefix = hypothesis[:correct_word - 1] + [new_word]
print "New prefix: %s" % (" ".join(prefix))
index_prefix = map(lambda x: idict_trg[x], prefix)
else:
for n_line, line in enumerate(fsrc):
errors_sentence = 0
index_prefix = None
seqin = line.strip()
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
if args.verbose:
logger.debug("\n \n Processing sentence %d" % (n_line + 1))
logger.debug("Source: %s" % line[:-1])
logger.debug("Desired translation: %s\n" % target_lines[n_line])
reference = target_lines[n_line].split()
checked_index = 0
unk_words = []
unk_indices = []
first_hypo = True
prefix = None
while checked_index < len(reference):
trans, costs, _ = sample(lm_model, seq, n_samples, prefix=index_prefix, sampler=sampler,
beam_search=beam_search, ignore_unk=args.ignore_unk,
normalize=args.normalize, verbose=args.verbose)
best = numpy.argmin(costs)
hypothesis = trans[best].split()
if args.verbose:
if first_hypo:
logger.debug("Hypothesis %d: %s" % (errors_sentence, " ".join(hypothesis)))
else:
logger.debug("\t prefix : %s" % (" ".join(prefix)))
logger.debug("\t new hyp: %s" % (" ".join(hypothesis)))
if args.save_original and first_hypo:
print >> ftrans_ori, " ".join(hypothesis)
first_hypo = False
if len(unk_indices) > 0: # If we added some UNK word
if len(hypothesis) < len(unk_indices): # The full hypothesis will be made up UNK words:
for i, index in enumerate(range(0, len(hypothesis))):
hypothesis[index] = unk_words[unk_indices[i]]
for ii in range(i+1, len(unk_words)):
hypothesis.append(unk_words[ii])
else: # We put each unknown word in the corresponding gap
for i, index in enumerate(unk_indices):
if index < len(hypothesis):
hypothesis[index] = unk_words[i]
else:
hypothesis.append(unk_words[i])
while checked_index < len(reference): # We check all words in the reference
if checked_index >= len(hypothesis):
errors_sentence += 1
new_word = reference[checked_index]
prefix = hypothesis + [new_word]
index_prefix = map(lambda x: idict_trg[x] if idict_trg.get(x) is not None
else unk_id, prefix)
if idict_trg.get(new_word) is None:
unk_words.append(new_word)
unk_indices.append(checked_index)
logger.debug('Error case 0! ->Add new word " % s" to the end of the hypothesis. '
'Errors: %d' % (new_word, errors_sentence))
break
elif hypothesis[checked_index] != reference[checked_index]:
correct_prefix = checked_index
errors_sentence += 1
new_word = reference[checked_index]
prefix = hypothesis[:correct_prefix] + [new_word]
logger.debug('Error case 1! -> Substitute word " % s" in hypothesis by word " % s".'
' Errors: %d' % (hypothesis[checked_index], new_word, errors_sentence))
index_prefix = map(lambda x: idict_trg[x] if idict_trg.get(x) is not None
else unk_id, prefix)
if idict_trg.get(new_word) is None:
if checked_index not in unk_indices:
unk_words.append(new_word)
unk_indices.append(checked_index)
break
else:
# No errors
checked_index += 1
if len(reference) < len(hypothesis):
hypothesis = hypothesis[:len(reference)]
errors_sentence += 1
logger.debug("Error case 3! -> Cut hypothesis. Errors: %d" % errors_sentence)
total_cost += costs[best]
total_errors += errors_sentence
total_words += len(hypothesis)
logger.debug("Final hypotesis: %s" % " ".join(hypothesis))
print >> ftrans, " ".join(hypothesis)
if (n_line + 1) % 50 == 0:
ftrans.flush()
if args.save_original:
ftrans_ori.flush()
logger.info("Current speed is {} per sentence".format((time.time() - start_time) / (n_line + 1)))
logger.info("Current WSR is: %f" % (float(total_errors) / total_words))
print "Total number of errors:", total_errors
print "WSR: %f" % (float(total_errors) / total_words)
print "Total cost of the translations: {}".format(total_cost)
fsrc.close()
ftrans.close()
if args.save_original:
ftrans_ori.close()
except KeyboardInterrupt:
print 'Interrupted!'
print "Total number of corrections (up to now):", total_errors
print "WSR: %f" % ((float(total_errors) / total_words))
sys.exit(0)
except ValueError:
pass
else:
while True:
try:
seqin = raw_input('Input Sequence: ')
n_samples = int(raw_input('How many samples? '))
alpha = None
if not args.beam_search:
alpha = float(raw_input('Inverse Temperature? '))
seq, parsed_in = parse_input(state, indx_word, seqin, idx2word=idict_src)
print "Parsed Input:", parsed_in
except Exception:
print "Exception while parsing your input:"
traceback.print_exc()
continue
sample(lm_model, seq, n_samples, sampler=sampler,
beam_search=beam_search,
ignore_unk=args.ignore_unk, normalize=args.normalize,
alpha=alpha, verbose=True)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3389595 | <reponame>TSedlar/dusk-dotfiles
import math
zip_code = 85224
panel_bg = 'transparent'
panel_border = 'none'
real_panel_bg = '#282936' # '#2c3e50'
icon_color = '#e6e6e6'
text_css = {
'css': {
'color': icon_color,
'background-color': real_panel_bg,
'font-size': '14px',
'font-family': 'Hack, FontAwesome'
}
}
key_date_text = 'date_text'
key_weather_temp = 'weather_temp'
key_cpu_percent = 'cpu_percent'
key_cpu_temp = 'cpu_temp'
key_mem_used = 'mem_used'
key_network_ssid = 'network_ssid'
key_volume = 'volume'
key_battery_cap = 'battery_cap'
key_battery_state = 'battery_state'
def bounds():
return {'x': 65, 'y': 15, 'w': 1791, 'h': 34}
def render_loop_delay():
return 1000
def init_prop_updaters():
return [(update_cpu, 5), (update_mem, 5), (update_battery, 30),
(update_weather, 60 * 10), (update_network, 30), (update_volume, 1)]
def config(data):
data.ui.set_bg(data.panel, panel_bg)
data.ui.set_border(data.panel, panel_border)
render_logo(data)
data.layout.addStretch(1)
render_cpu(data)
render_mem(data)
data.layout.addStretch(1)
render_time(data)
render_weather(data)
data.layout.addStretch(1)
render_network(data)
render_volume(data)
render_battery(data)
data.layout.addStretch(1)
render_power(data)
def render_logo(data):
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
lbl = data.ui.add_image(data.layout, __file__, '../../res/ubuntu-logo.svg', 10)
data.ui.recolor_pixmap(lbl.pixmap(), icon_color)
data.ui.set_bg(lbl, real_panel_bg)
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
def render_cpu(data):
cpu_percent = data.props.get(key_cpu_percent, '*')
cpu_temp = data.props.get(key_cpu_temp, '*')
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # cpu
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, ' %s%% ' % (cpu_percent), text_css)
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
data.ui.add_slant(data.layout, 'lui', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # temp
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'lu', real_panel_bg)
data.ui.add_center_label(data.layout, ' %s° C ' % (cpu_temp), text_css)
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
def render_mem(data):
mem = data.props.get(key_mem_used, '*')
data.ui.add_slant(data.layout, 'lui', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # mem
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, ' %sMB ' % (mem), text_css)
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
def render_time(data):
_date = data.tools.time_fmt('%m/%d/%Y')
_time = data.tools.time_fmt('%I:%M %p')
date_txt = ' %s ' % (_date)
def date_hov_txt():
return ' %s ' % (data.tools.time_fmt('%A'))
data.ui.add_slant(data.layout, 'lui', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # calendar
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
date_label = data.ui.add_center_label(data.layout, None, text_css)
data.ui.add_label_hover(date_label, date_txt, date_hov_txt,
data.props, key_date_text)
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
data.ui.add_slant(data.layout, 'lui', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # clock
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, ' %s ' % (_time), text_css)
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
def render_weather(data):
temp = data.props.get(key_weather_temp, '*')
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # cloud
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, ' %s° F ' % (temp), text_css)
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
def render_network(data):
ssid = data.props.get(key_network_ssid, '?')
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # wifi
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, ' %s ' % (ssid), text_css)
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
def render_volume(data):
volume = data.props.get(key_volume, '0%')
data.ui.add_slant(data.layout, 'lui', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # volume
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, ' %s ' % (volume), text_css)
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
def render_battery(data):
cap = data.props.get(key_battery_cap, -1)
state = data.props.get(key_battery_state, 'Invalid')
# 0% 1-25% 26-50% 51-75% 76-100%
battery_icons = ['', '', '', '', '']
battery_icon = battery_icons[int(math.ceil(float(cap) / 25))]
data.ui.add_slant(data.layout, 'lui', real_panel_bg)
data.ui.add_center_label(data.layout, battery_icon, text_css) # battery
data.ui.add_slant(data.layout, 'ru', real_panel_bg)
data.ui.add_slant(data.layout, 'ld', real_panel_bg)
data.ui.add_center_label(data.layout, '%s%%' % (cap), text_css)
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
if state == 'Charging':
data.ui.add_slant(data.layout, 'lui', real_panel_bg)
data.ui.add_center_label(data.layout, '', text_css) # bolt
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
def render_power(data):
data.ui.add_slant(data.layout, 'lui', real_panel_bg)
lbl = data.ui.add_center_label(data.layout, '  ', text_css) # power
data.ui.add_click_event(lbl, lambda _: data.tools.term('pkill -u $USER'))
data.ui.add_slant(data.layout, 'rd', real_panel_bg)
# Update functions below
def update_cpu(tools, props):
cpu_percent = tools.term("vmstat 1 2 | tail -1 | awk '{print 100-$15}'")
temp_str = tools.term('cat /sys/class/thermal/thermal_zone0/temp')
cpu_temp = int((int(temp_str) if len(temp_str) else -1000) / 1000)
props[key_cpu_percent] = cpu_percent
props[key_cpu_temp] = cpu_temp
def update_mem(tools, props):
mem = tools.term("free -m | grep 'Mem:' | awk '{print $6}'")
props[key_mem_used] = mem
def update_weather(tools, props):
yql_api = 'https://query.yahooapis.com/v1/public/yql?'
query = 'q=select wind.chill from weather.forecast where woeid in ' \
'(select woeid from geo.places(1) where text="%s")&format=json'
query_url = yql_api + (query % (zip_code)).replace(' ', '%20')
json = tools.load_json(tools.term('curl "%s"' % (query_url)))
props[key_weather_temp] = json['query']['results']['channel']['wind']['chill']
def update_network(tools, props):
ssid = tools.term("iwgetid -r")
props[key_network_ssid] = ssid
def update_volume(tools, props):
vol = tools.term("amixer get Master | grep % | awk '{print $4}'" +
" | sed -e 's/\[//' -e 's/\]//'")
props[key_volume] = vol
def update_battery(tools, props):
cap_str = tools.term('cat /sys/class/power_supply/BAT0/capacity')
cap = int(cap_str) if len(cap_str) else -1
state = tools.term('cat /sys/class/power_supply/BAT0/status')
props[key_battery_cap] = cap
props[key_battery_state] = state
| StarcoderdataPython |
3225373 | # -*- coding: utf-8 -*-
# The dos-azul-lambda request handling stack is generally structured like so:
#
# /\ * Endpoint handlers, named for the DOS operation converted to
# /__\ snake case (e.g. list_data_bundles).
# / \ * ElasticSearch helper functions that implement common query types
# /______\ such as matching on a certain field, with names matching `azul_*`
# / \ * :func:`make_es_request`, and :func:`es_query`, which make the
# /__________\ actual ElasticSearch requests using :mod:`requests`
#
# Error catching should be handled as follows:
# * Functions that return :class:`~chalice.Response` objects should raise
# Chalice exceptions where appropriate. Chalice exceptions will halt
# control flow and return a response with an appropriate error code and
# a nice message.
# * Functions that don't return :class:`~chalice.Response` objects should
# raise builtin exceptions where appropriate. Those exceptions should be
# caught by the aforementioned and either ignored or replaced with Chalice
# exceptions.
# * Endpoint handlers should raise exceptions consistent with the DOS schema.
# * Between all of this, exception logging should occur at the lowest level,
# next to where an exception is raised. This generally means
# :func:`make_es_request` and :func:`es_query`.
import datetime
import json
import logging
import os
from chalice import Chalice, Response, BadRequestError, UnauthorizedError, \
NotFoundError, ChaliceViewError
from boto.connection import AWSAuthConnection
import ga4gh.dos.client
import ga4gh.dos.schema
import pytz
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('dos-azul-lambda')
# We only need the client for the models, so we can provide any URL
dos_client = ga4gh.dos.client.Client(url='https://example.com/abc', local=True)
def model(model_name, **kwargs):
return dos_client.models.get_model(model_name)(**kwargs)
def parse_azul_date(azul_date):
"""
:rtype: datetime.datetime
"""
# Process the string first to account for inconsistencies in date storage in Azul
date = azul_date.rstrip('Z').replace(':', '') + 'Z'
date = datetime.datetime.strptime(date, '%Y-%m-%dT%H%M%S.%fZ')
return date.replace(tzinfo=pytz.utc)
def azul_to_obj(result):
"""
Takes an Azul ElasticSearch result and converts it to a DOS data
object.
:param result: the ElasticSearch result dictionary
:rtype: DataObject
"""
azul = result['_source']
data_object = model(
model_name='DataObject',
id=azul['file_id'],
name=azul['title'],
size=str(azul.get('fileSize', '')),
created=parse_azul_date(azul['lastModified']),
updated=parse_azul_date(azul['lastModified']),
version=azul['file_version'],
checksums=[model('Checksum', checksum=azul['fileMd5sum'], type='md5')],
urls=[model('URL', url=url) for url in azul['urls']],
aliases=azul['aliases'],
)
return data_object
def obj_to_azul(data_object):
"""
Takes a data object and converts it to an Azul object.
:rtype: dict
"""
# updated is optional but created is not
date = data_object.get('updated', data_object['created']).replace(':', '')
date = datetime.datetime.strptime(date, '%Y-%m-%dT%H%M%S.%f+0000')
date = date.replace(tzinfo=pytz.utc)
date = date.strftime('%Y-%m-%dT%H%M%S.%fZ')
checksum = data_object['checksums'][0]
azul = {
'file_id': data_object['id'],
'title': data_object.get('name', ''), # name is optional
'fileSize': data_object.get('size', ''),
'lastModified': date,
'file_version': data_object.get('version'),
'fileMd5sum': checksum['checksum'] if checksum['type'] == 'md5' else '',
'urls': [url['url'] for url in data_object['urls']],
'aliases': data_object.get('aliases'), # aliases are optional
}
return azul
def azul_to_bdl(result):
"""
Takes an Azul ElasticSearch result and converts it to a DOS data
bundle.
:param result: the ElasticSearch result dictionary
:return: DataBundle
"""
azul = result['_source']
data_bundle = model(
model_name='DataBundle',
id=azul['id'],
data_object_ids=azul['data_object_ids'],
created=parse_azul_date(azul['created']),
updated=parse_azul_date(azul['updated']),
version=azul['version'],
description=azul.get('description', ''), # optional field
aliases=azul.get('aliases', ''), # optional field
)
data_bundle.checksums = []
for checksum in azul['checksums']:
checksum, checksum_type = checksum.split(':', 1)
data_bundle.checksums.append(model('Checksum', checksum=checksum, type=checksum_type))
return data_bundle
def check_auth():
"""
Execute during a request to check the ``access_token`` key in the
request headers.
:return: True if ``access_token`` is valid, False otherwise
:rtype: bool
"""
return app.current_request.headers.get('access_token', None) == access_token
class ESConnection(AWSAuthConnection):
def __init__(self, region, **kwargs):
super(ESConnection, self).__init__(**kwargs)
self._set_auth_region_name(region)
self._set_auth_service_name('es')
def _required_auth_capability(self):
return ['hmac-v4']
DEFAULT_REGION = 'us-west-2'
DEFAULT_ACCESS_TOKEN = 'f4ce9d3d23f4ac9dfdc3c825608dc660'
INDEXES = {
'data_obj': os.environ.get('DATA_OBJ_INDEX', 'fb_index'),
'data_bdl': os.environ.get('DATA_BDL_INDEX', 'db_index'),
}
DOCTYPES = {
'data_obj': os.environ.get('DATA_OBJ_DOCTYPE', 'meta'),
'data_bdl': os.environ.get('DATA_BDL_DOCTYPE', 'databundle'),
}
try:
es_host = os.environ['ES_HOST']
except KeyError:
raise RuntimeError("You must specify the domain name of your ElasticSearch"
" instance with the ES_HOST environment variable.")
es_region = os.environ.get('ES_REGION', DEFAULT_REGION)
access_token = os.environ.get('ACCESS_KEY', DEFAULT_ACCESS_TOKEN)
client = ESConnection(region=es_region, host=es_host, is_secure=False)
app = Chalice(app_name='dos-azul-lambda')
app.debug = os.environ.get('DEBUG', False) == 'True'
base_path = '/ga4gh/dos/v1'
@app.route('/', cors=True)
def index():
resp = make_es_request(method='GET', path='/')
return resp.read()
@app.route('/test_token', methods=["GET", "POST"], cors=True)
def test_token():
"""
A convenience endpoint for testing whether an access token
is active or not. Will return a JSON with a key `authorized`
and a boolean regarding the key's value.
"""
body = {'authorized': check_auth()}
return Response(body, status_code=200 if body['authorized'] else 401)
def make_es_request(**kwargs):
"""
Wrapper around :meth:`ESConnection.make_request` that checks if the
request was completed successfully.
:param kwargs: same as arguments to :meth:`ESConnection.make_request`
:raises RuntimeError: if the request does not return HTTP 200
"""
request = "%s %s" % (kwargs['method'], kwargs['path'])
logger.debug(request + " " + kwargs.get('data', ""))
r = client.make_request(**kwargs)
if r.status != 200:
data = json.loads(r.read())
data = data.get('Message', '') or data.get('reason', '') or repr(data)
msg = "%s returned code %d, expcted 200: %s" % (request, r.status, data)
logger.exception(msg)
raise RuntimeError(msg)
# If `app.debug=False` (which it is for deployments), an uncaught
# exception will cause the server to automatically return a 500 response
# with a nice error message and interally log a traceback.
return r
def es_query(index, **query):
"""
Queries the configured ElasticSearch instance and returns the
results as a list of dictionaries
:param query: key-value pairs to insert into the the ElasticSearch query
:param str index: the name of the index to query
:raises RuntimeError: if the response from the ElasticSearch instance
loads successfully but can't be understood by
dos-azul-lambda
:rtype: list
"""
logger.debug("Querying index %s with query %r" % (index, query))
query = make_es_request(method='GET', data=json.dumps(query),
path='/{index}/_search'.format(index=index))
response = json.loads(query.read())
try:
hits = response['hits']['hits']
except KeyError:
msg = "ElasticSearch returned an unexpected response: %s", query.read()
logger.exception(msg)
raise RuntimeError(msg)
return hits
def azul_match_field(index, key, val, size=1):
"""
Wrapper function around :func:`es_query`. Should be used for queries
where you expect only one result (e.g. GetDataBundle).
:param str index: the name of the index to query
:param str key: the key of the field to match against
:param str val: the value of the field to match against
:param int size: the amount of results to return
:raises LookupError: if no results are returned
:rtype: :class:`AzulDocument`
"""
results = es_query(index=index, size=size,
query={'bool': {'must': {'term': {key: val}}}})
if len(results) < 1:
# We don't need to log an exception here since this kind of error could
# occur if a user requests a file that does not exist.
raise LookupError("Query returned no results")
return results[0]
def azul_match_alias(index, alias, from_=None, size=10):
"""
Wrapper function around :func:`es_query`. By default, this function
will return more than one result (intended for usage in ListDataObjects,
etc.
:param str index: the name of the index to query
:param str key: the key of the alias to match against
:param str val: the value of the alias to match against
:param str from_: page_token
:param int size: the amount of results to return
:raises LookupError: if no results are returned
:rtype: list
"""
dsl = {'term': {'aliases.keyword': alias}}
if from_:
dsl['from'] = from_
# es_query will raise a RuntimeError if it doesn't understand the ES response
# There isn't really any other exception we can check for here.
query = es_query(index=index, query=dsl, size=size)
return query
def azul_get_document(key, val, name, es_index, map_fn, model):
"""
Queries ElasticSearch for a single document and returns a
:class:`~chalice.Response` object with the retrieved data. Wrapper
around :func:`azul_match_field`. Implements lookup functionality used
in :func:`get_data_object` and :func:`get_data_bundle`.
:param str key: the key to search for in the given ElasticSearch index
:param str val: the value to search for in the given ElasticSearch index
:param str name: the key the document should be returned under
:param str es_index: the name of the index to query in ElasticSearch
:param callable map_fn: function mapping the returned Azul document to a
DOS format
:param model: DOS response model
:raises RuntimeError: if the ElasticSearch response is not understood
:rvtype: :class:`chalice.Response`
:returns: the retrieved data or the error state
"""
try:
data = azul_match_field(index=es_index, key=key, val=val)
data = map_fn(data)
# Double check to verify identity
if data['id'] != val:
raise LookupError("ID mismatch in results")
except LookupError:
# azul_match_field will also raise a LookupError if no results are returned.
# This isn't really an error, as a user requesting an object that could
# not be found is generally not unexpected.
raise NotFoundError("No results found for type %s and ID %s." % (name, val))
except RuntimeError:
# es_query will raise a RuntimeError if it doesn't understand the ES
# response. It is logged in :func:`es_query`
raise ChaliceViewError("Received an unexpected response from Azul.")
except Exception:
# If anything else happens...
logger.exception("Unexpected error attempting to retrieve {name} "
"{key}={val} from index {es_index} using transformer"
" {fn}".format(name=name, key=key, val=val,
es_index=es_index, fn=map_fn.func_name))
raise ChaliceViewError("There was a problem communicating with Azul.")
return Response(model(**{name: data}).marshal(), status_code=200)
@app.route(base_path + '/dataobjects/{data_object_id}', methods=['GET'], cors=True)
def get_data_object(data_object_id):
"""
Gets a data object by file identifier by making a query against the
configured data object index and returns the first matching file.
:param data_object_id: the id of the data object
:rtype: DataObject
"""
return azul_get_document(key='file_id', val=data_object_id, name='data_object',
map_fn=azul_to_obj, es_index=INDEXES['data_obj'],
model=dos_client.models.get_model('GetDataObjectResponse'))
@app.route(base_path + '/databundles/{data_bundle_id}', methods=['GET'], cors=True)
def get_data_bundle(data_bundle_id):
"""
Gets a data bundle by its identifier by making a query against the
configured data bundle index. Returns the first matching file.
:param data_bundle_id: the id of the data bundle
:rtype: DataBundle
"""
return azul_get_document(key='id', val=data_bundle_id, name='data_bundle',
map_fn=azul_to_bdl, es_index=INDEXES['data_bdl'],
model=dos_client.models.get_model('GetDataBundleResponse'))
@app.route(base_path + '/dataobjects', methods=['GET'], cors=True)
def list_data_objects(**kwargs):
"""
Page through the data objects index and return data objects,
respecting an alias or checksum request if it is made.
:rtype: ListDataObjectsResponse
"""
req_body = app.current_request.query_params or {}
per_page = int(req_body.get('page_size', 10))
# Build the query. If multiple criteria are specified, returned objects
# should match all of the provided criteria (logical AND).
query = {'query': {}, 'size': per_page + 1}
if 'page_token' in req_body: # for paging
query['from'] = req_body['page_token'] or 0
if 'alias' in req_body or 'checksum' in req_body or 'url' in req_body:
query['query']['bool'] = {'filter': []}
# Azul only stores MD5s so there are no results if checksum_type != md5
if 'checksum_type' in req_body and req_body['checksum_type'].lower() != 'md5':
return {'data_objects': []}
if 'alias' in req_body:
query['query']['bool']['filter'].append({
'term': {
'aliases.keyword': {'value': req_body['alias']}
}
})
if 'checksum' in req_body:
query['query']['bool']['filter'].append({
'term': {
'fileMd5sum.keyword': {'value': req_body['checksum']}
}
})
if 'url' in req_body:
query['query']['bool']['filter'].append({
'term': {
'urls.keyword': {'value': req_body['url']}
}
})
else: # if no query parameters are provided
query['query']['match_all'] = {}
results = es_query(index=INDEXES['data_obj'], **query)
response = model('ListDataObjectsResponse')
response.data_objects = [azul_to_obj(x) for x in results[:per_page]]
if len(results) > per_page:
response.next_page_token = str(int(req_body.get('page_token', 0)) + 1)
return response.marshal()
@app.route(base_path + '/databundles', methods=['GET'], cors=True)
def list_data_bundles(**kwargs):
"""
Page through the data bundles index and return data bundles,
respecting an alias or checksum request if it is made.
:rtype: ListDataBundlesResponse
"""
req_body = app.current_request.query_params or {}
page_token = req_body.get('page_token', 0)
per_page = int(req_body.get('page_size', 10))
if req_body.get('alias', None):
results = azul_match_alias(index=INDEXES['data_bdl'],
alias=req_body['alias'], size=per_page + 1,
from_=page_token if page_token != 0 else None)
else:
results = es_query(query={}, index=INDEXES['data_bdl'], size=per_page + 1)
response = model('ListDataBundlesResponse')
response.data_bundles = [azul_to_bdl(x) for x in results[:per_page]]
if len(results) > per_page:
response.next_page_token = str(int(page_token) + 1)
return response.marshal()
@app.route(base_path + '/dataobjects/{data_object_id}', methods=['PUT'], cors=True)
def update_data_object(data_object_id):
"""
Updates a data object. The data object must exist.
:param data_object_id: the id of the data object to update
"""
# Ensure that the user is authenticated first
if not check_auth():
raise UnauthorizedError("You're not authorized to use this service. "
"Did you set access_token in the request headers?")
# Make sure that the data object to update exists
try:
source = azul_match_field(index=INDEXES['data_obj'], key='file_id', val=data_object_id)
except LookupError:
raise NotFoundError("Data object not found.")
# Check that a data object was provided in the request
body = app.current_request.json_body
if not body or not body.get('data_object', None):
raise BadRequestError("Please add a data_object to the body of your request.")
# Now that we know everything is okay, do the actual update
path = '/{}/{}/{}/_update'.format(INDEXES['data_obj'], DOCTYPES['data_obj'], source['_id'])
data = json.dumps({'doc': obj_to_azul(body['data_object'])})
make_es_request(method='POST', path=path, data=data)
return model('UpdateDataObjectResponse', data_object_id=data_object_id).marshal()
@app.route('/swagger.json', cors=True)
def swagger():
"""
An endpoint for returning the Swagger API description.
"""
swagger = ga4gh.dos.schema.from_chalice_routes(app.routes)
swagger['basePath'] = '/api/ga4gh/dos/v1'
return swagger
| StarcoderdataPython |
43157 | <reponame>jasarsoft/examples
import os
import time
#fajlovi i folderi koje zelimo da backupujemo su specificirani u listi
izvor = ['C:\\py', '"C:\\Documents and Settings\xinjure\\Desktop\\"']
#primjetite da smo koristili duple navodnike unutar stringa, zbog imena koje sadrzi razmake
#backup ce biti sacuvan u glavnom backup direktoriju
ciljni_dir = "D:\\Resto" #zapamtite da promjeite ovo, unesite lokaciju koja vama odgovara na vasem racunaru
#3. Fajlovi se backupju u zip fajl
#trenutni datum je ime poddirektorija u glavnom direkotriju
danas = ciljani_dir + os.sep + time.strftime("%Y%m%d")
#trenutno vrijeme je ime zip arhive
sada = time.strftime("%H%M%S")
#trazi od korisnika da unese komentar koji se primjeni u izmenu zip arhive
komentar = input("Prilozie komantera backupa --> ")
if len(komantar) == 0: #provjerava da li je komantar unesen
cilj = danas + os.sep + sada + ".zip"
else:
cilj = danas + os.sep + sada + "_" + komentar.replace(" ", "_") + ".zip"
#kreiranje poddirektorijuma ukoliko on ne postoji
if not os.path.exists(danas):
os.mkdir(danas) #kreira direktorijium
print("Upjesno smo kreirali direktorijum", danas)
#koristimo zip komandu da posaljemo fajlove u zip arhivu
zip_komanda = "zip -qr {0} {1}".format(cilj, " ".join(izvor))
#pokrece backup
if os.system(zip_komanda) == 0:
print("Uspjeno smo izvrsili backupove u ", cilj)
else:
print("Backup nije uspjeo!")
| StarcoderdataPython |
1759230 | <reponame>xproj2501x/ecs-python
from enum import Enum
class LOG_LEVEL(Enum):
NONE = 1 << 0
LOG = 1 << 1
DEBUG = 1 << 2
WARNING = 1 << 3
ERROR = 1 << 4
ALL = 1 << 5
class LogService:
def __init__(self, context, log_level):
"""
:param context:
:type context: string
:param log_level:
:type log_level: int
"""
self._context = context
self._log_level = log_level
self._data = []
def log(self, message):
"""
:param message:
:type message: string
"""
self._write(LOG_LEVEL.LOG, message)
def debug(self, message):
"""
:param message:
:type message: string
"""
self._write(LOG_LEVEL.DEBUG, message)
def warning(self, message):
"""
:param message:
:type message: string
"""
self._write(LOG_LEVEL.WARNING, message)
def error(self, message):
"""
:param message:
:type message: string
"""
self._write(LOG_LEVEL.ERROR, message)
def _write(self, log_level, message):
"""
:param log_level:
:type log_level: int
:param message:
:type message: string
"""
if log_level <= self._log_level:
self._data.append(message)
@staticmethod
def create(context, level):
return LogService(context, level)
| StarcoderdataPython |
1764293 | #!/usr/bin/python
import json
import sys
import random
def generate_uuid ():
rand_uuid_start=''
for i in range(8):
r=random.choice('abcdef1234567890')
rand_uuid_start += r
uuid=rand_uuid_start + "-49e5-4c33-afab-9ec90d65faf3"
return uuid
# function that parses 'source' field of CWL, which contain information of step and argument names.
# it returns a dictionary with 'step' and 'arg' as key.
# if the source is global, 'step' will be ''.
def parse_source(source_str):
source={}
source_str = source_str.strip('#')
if '.' in source_str: ## the format is #step.ID, so if '.' doesn't exist, that implies this is a global argument.
## step argument ##
source_arr = source_str.split(".")
source['step'] = source_arr[0]
source['arg'] = source_arr[1]
else:
## global argument ##
source['step'] = ''
source['arg'] = source_str
return(source)
# given the type field of an element of cwl_dict['inputs'] (e.g. cwl_dict['inputs'][1]['type'], return either 'Input file' or 'parameter'.
def Is_Input_file_or_parameter ( cwl_param_type ):
argument_type='parameter' ## default is parameter unless the following conditions meet
if isinstance(cwl_param_type,list):
if 'File' in cwl_param_type: # e.g. type=[ 'File','null' ] (SBG)
argument_type='Input file'
elif isinstance(cwl_param_type[0],dict) and cwl_param_type[0]['type']=='array' and cwl_param_type[0]['items']=='File': # e.g. type=[{'type':'array','items':'File'}] (SBG)
argument_type='Input file'
elif cwl_param_type=='File': # e.g. type='File'
argument_type='Input file'
elif isinstance(cwl_param_type,dict) and cwl_param_type['type']=='array' and cwl_param_type['items']=='File': # e.g. type={'type':'array','items':'File'}
argument_type='Input file'
return argument_type
# Add a workflow output argument and map to the step output argument.
# for now we assume that 1. a global output argument has a single source and 2. that it is an output of some step. (I think this is a reasonable assumption)
def map_workflow_output_argument_to_step_argument ( workflow, source, workflow_argument, step, step_argument ):
workflow['arguments'].append(
{'workflow_argument_name': workflow_argument,
'argument_type':'Output file',
'argument_mapping':[{ 'workflow_step': step,
'step_argument_name': step_argument,
'step_argument_type':'Output file'
}]
})
# Add a step argument and map to the global input source.
# Assumes the global source exists in workflow['arguments']
def map_step_argument_to_workflow_input_argument ( workflow, source, step_id, step_argument ):
if 'arguments' in workflow:
argument_index= -1
for i in range(0,len(workflow['arguments'])):
e = workflow['arguments'][i]
if e['workflow_argument_name'] == source['arg']:
argument_index = i
global_argument_type = e['argument_type']
if argument_index == -1:
sys.exit("Error: corresponding workflow argument doesn't exist: {}".format(source['arg']))
else:
sys.exit("Error: workflow argument doesn't exist.")
# fill in the workflow dictionary. The step argument type is assumed to be the same as global argument type (in this case global argument type exists and since it is the source, it is either Input file or parameter.)
workflow['arguments'][argument_index]['argument_mapping']= \
[{ 'workflow_step': step_id, # id of the step.
'step_argument_name': step_argument, # id of an input entry of the step, remove '#step' from #step.ID
'step_argument_type': global_argument_type
}]
# add a step argument and map it to another step argument
# if source step argument doesn't exist in the workflow dictionary yet, create a new entry.
# the function assumes that the source is not a global argument.
def map_step_argument_to_another_step_argument ( workflow, source, step_id, step_argument ):
if 'arguments' in workflow:
for i in range(0,len(workflow['arguments'])):
e= workflow['arguments'][i]
argument_index=-1
if 'workflow_argument_mapping' in e:
for e2 in e['workflow_argument_mapping']:
if e['workflow_step'] == source.step and e['step_argument_name'] == source.arg: # sourced from a previously entered entry.
argument_index=i
workflow['arguments'][argument_index]['argument_mapping']= \
[{ 'workflow_step': step_id, # id of the step.
'step_argument_name': step_argument, # id of an input entry of the step, remove '#step' from #step.ID
'step_argument_type': 'Input file or parameter'
},
{ 'workflow_step': source['step'], # id of the source step.
'step_argument_name': source['arg'],
'step_argument_type': 'Output file or parameter' # do we pass parameters between steps as well?
}]
break # in theory there should be only a single match, so break shoudn't be necessary except for saving time.
if argument_index == -1: ## corresponding source step argument doesn't exist. It may appear later in cwl.
# sys.exit("Error: corresponding source step argument doesn't exist.") # don't produce error message. create a new entry.
workflow['arguments'].append(
{'workflow_argument_name': '',
'argument_type':'',
'argument_mapping':[{ 'workflow_step': step_id,
'step_argument_name': step_argument,
'step_argument_type':'Input file or parameter' # either Input file or parameter. # Is there a way to know this from workflow cwl? I will not decide it for now : any argument that is not globally associated doesn't matter too much in terms of schema.
},
{ 'workflow_step': source['step'],
'step_argument_name': source['arg'],
'step_argument_type':'Output file or parameter' # do we pass parameters between steps as well?
}]
})
else:
sys.exit("Error: workflow argument doesn't exist.")
# function that takes a cwl file and write a workflow insert json file
def parse_cwl(cwlfile, workflow_metadata_json, workflow_name, workflow_description, workflow_type, cwl_url, uuid):
# get cwl as a dict
with open(cwlfile,'r') as f:
cwl_dict=json.load(f)
# handle SBG cwl.
if 'raw' in cwl_dict:
cwl_dict=cwl_dict['raw'] # 'some' SBG's cwl is one level down under the 'raw' field.
# initialize dictionary to write to output json file
workflow={ 'arguments':[], # this is what we will create.
"workflow_steps": [], # this, too.
"title": workflow_name,
"description": workflow_description,
"workflow_type": workflow_type,
"cwl_pointer": cwl_url,
"workflow_diagram": '',
"uuid": uuid }
# parsing global input files and parameters and storing to the workflow dictionary (can't map to step arguments yet)
# argument type is either 'Input file' or 'parameter'.
for x in cwl_dict['inputs']:
argument_type = Is_Input_file_or_parameter (x['type'])
workflow['arguments'].append({'workflow_argument_name':x['id'].strip('#'), 'argument_type':argument_type})
## parsing global output files and storing to the workflow dictionary and mapping to step arguments
## (the mapping (source) information is in the same field in cwl since the global output is usually sourced from a step output )
for x in cwl_dict['outputs']:
source = parse_source(x['source'][0])
map_workflow_output_argument_to_step_argument ( workflow, source, x['id'].strip('#'), source['step'], source['arg'] )
## parsing steps (map 1. global output files to step arguments and 2. between arguments between steps that are not globally defined)
## fill in 'arguments'
for x in cwl_dict['steps']:
for y in x['inputs']:
if 'source' in y:
source = parse_source(y['source'][0])
## case 1: global argument is the source
if source['step']=='':
map_step_argument_to_workflow_input_argument( workflow, source, x['id'].strip('#'), parse_source(y['id'])['arg'] )
## case 2: no global argument (just passing between steps)
else:
map_step_argument_to_another_step_argument( workflow, source, x['id'].strip('#'), parse_source(y['id'])['arg'] )
## case 3 (no global argument, no passing between steps) - we assume this doesn't exist.
## parsing steps again
## fill in workflow_steps.
for x in cwl_dict['steps']:
workflow['workflow_steps'].append( { 'step_name': x['id'].strip('#'), 'step': generate_uuid() } ) ## assuming that uuid for step is generated at this point? Or should we retrieve a corresponding step that already exists?
with open(workflow_metadata_json,'w') as fo:
fo.write ( json.dumps(workflow,indent=4) + "\n")
#fo.write ( cwl_dict.keys() + "\n")
#fo.write ( json.dumps(cwl_dict['outputs'],indent=4) + "\n")
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser(description="temporary cwl parser that creates a workflow insert")
parser.add_argument('-c','--cwlfile', help='input cwlfile')
parser.add_argument('-w','--workflow_metadata_json', help='output workflow metadata json file')
parser.add_argument('-n','--workflow_name', help='output workflow metadata json file')
parser.add_argument('-d','--workflow_description', help='output workflow metadata json file')
parser.add_argument('-t','--workflow_type', help='output workflow metadata json file')
parser.add_argument('-u','--cwl_url', help='output workflow metadata json file')
args = parser.parse_args()
uuid= generate_uuid()
parse_cwl(args.cwlfile, args.workflow_metadata_json, args.workflow_name, args.workflow_description, args.workflow_type, args.cwl_url, uuid )
| StarcoderdataPython |
4831832 | # -*- coding: utf-8 -*-
import os
import argparse
from flask import Flask, render_template, request
from config import get_random_image_from_db
from config import get_latest_image_from_db
from config import get_all_image_from_db
from config import get_one_image_from_db
from config import set_wallpaper
import change_wallpaper
app = Flask(__name__)
app.debug = True
@app.route('/')
def hello():
return "Hello!"
@app.route('/robots.txt')
def robots():
return render_template('robots.txt')
@app.route('/all')
def all():
image_list = get_all_image_from_db()
with open('templates/wallpaper.1.html', 'w') as f:
for image in image_list:
f.write('<tr>' + ' '.join('<td>' + str(item) + '</td>' for item in image) + '</tr>\n')
return render_template('wallpaper.html')
@app.route('/all/<int:id>')
def one(id):
image_list = get_one_image_from_db(id)
with open('templates/image.1.html', 'w') as f:
for image in image_list:
f.write('<tr>' + ' '.join('<td>' + str(item) + '</td>' for item in image) + '</tr>\n')
return render_template('image.html', id=id)
@app.route('/info/<int:id>')
def info(id):
image_list = get_one_image_from_db(id)
image_id, image_name, *unuse = image_list[0]
app.static_folder = os.path.dirname(image_name)
image_url = os.path.basename(image_name)
request_path = request.path.strip('/')
return render_template('info.html', image_id=image_id, image_name=image_name, image_url=image_url, request_path=request_path)
@app.route('/random')
def random():
image_name, image_id = get_random_image_from_db()
app.static_folder = os.path.dirname(image_name)
image_url = os.path.basename(image_name)
request_path = request.path.strip('/')
return render_template('info.html', image_id=image_id, image_name=image_name, image_url=image_url, request_path=request_path)
@app.route('/latest')
def latest():
image_name, image_id = get_latest_image_from_db()
app.static_folder = os.path.dirname(image_name)
image_url = os.path.basename(image_name)
request_path = request.path.strip('/')
return render_template('info.html', image_id=image_id, image_name=image_name, image_url=image_url, request_path=request_path)
@app.route('/change')
def change():
image_name, image_id = change_wallpaper.main()
app.static_folder = os.path.dirname(image_name)
image_url = os.path.basename(image_name)
return render_template('info.html', image_id=image_id, image_name=image_name, image_url=image_url)
@app.route('/change/<int:id>')
def change_this(id):
image_list = get_one_image_from_db(id)
image_id, image_name, *unuse = image_list[0]
set_wallpaper(image_name)
app.static_folder = os.path.dirname(image_name)
image_url = os.path.basename(image_name)
return render_template('info.html', image_id=image_id, image_name=image_name, image_url=image_url)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--port", type=int, default=1027)
parser.add_argument("--host", type=str, default="127.0.0.1")
args = parser.parse_args()
app.run(port=args.port, host=args.host)
| StarcoderdataPython |
39074 | <reponame>trinhcaokhoa/Mebook_hub<filename>api/urls.py
from django.urls import path
from .views import BookView
urlpatterns = [
path('book_api', BookView.as_view()),
] | StarcoderdataPython |
94560 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Dec 21 11:05:24 2017
The oil and sugar separation (pretreatment) section for the baseline lipid cane biorefinery is defined here as System objects. The systems include all streams and units starting from enzyme treatment to purification of the sugar solution and the oil stream.
@author: Yoel
"""
import numpy as np
from biosteam import System, Stream
from biosteam.units import Mixer, EnzymeTreatment, CrushingMill, \
HXutility, RVF, SplitFlash, VibratingScreen, \
MagneticSeparator, Clarifier, MixTank, \
Shredder, ConveyingBelt, Splitter, \
SplitCentrifuge_LLE, Pump, StorageTank
from biosteam.biorefineries.lipidcane.species import pretreatment_species
from biosteam.biorefineries.lipidcane.process_settings import price
__all__ = ('pretreatment_sys', 'lipid_cane', 'lipidcane', 'area_100', 'area_200')
# %% Species
Stream.species = pretreatment_species
psp = ('Ash', 'CaO', 'Cellulose', 'Ethanol', 'Flocculant',
'Glucose', 'Hemicellulose', 'Lignin', 'Lipid',
'Solids', 'H3PO4', 'Sucrose', 'Water')
psp1 = ('Ash', 'Cellulose', 'Glucose', 'Hemicellulose',
'Lignin', 'Lipid', 'Solids', 'Sucrose', 'Water')
psp2 = ('Ash', 'CaO', 'Cellulose', 'Flocculant', 'Glucose',
'Hemicellulose', 'Lignin', 'Lipid',
'H3PO4', 'Sucrose', 'Water')
# %% Streams
f1 = (2000.042, 26986.69 , 2007.067, 15922.734, 14459.241,
10035.334, 5017.667, 22746.761, 234157.798)
lipidcane = lipid_cane = Stream('lipid_cane', f1, psp1, units='kg/hr',
price=price['Lipid cane'])
enzyme = Stream('enzyme', Cellulose=100, Water=900, units='kg/hr',
price=price['Protease'])
imbibition_water = Stream('imbibition_water',
Water=87023.35,
T = 338.15, units='kg/hr')
H3PO4 = Stream('H3PO4', H3PO4=74.23, Water=13.10, units='kg/hr',
price=price['H3PO4']) # to T203
lime = Stream('lime', CaO=333.00, Water=2200.00, units='kg/hr',
price=price['Lime']) # to P5
polymer = Stream('polymer', Flocculant=0.83, units='kg/hr',
price=price['Polymer']) # to T205
rvf_wash_water = Stream('rvf_wash_water',
Water=16770, units='kg/hr',
T=363.15) # to C202
oil_wash_water = Stream('oil_wash_water',
Water=1350, units='kg/hr',
T=358.15) # to T207
# %% Units
Stream.default_ID = 'd'
Stream.default_ID_number = 0
# Stream.default_ID_number = 100
# Feed the shredder
U101 = ConveyingBelt('U101', ins=lipid_cane)
U101.cost_items['Conveying belt'].ub = 2500
# Separate metals
U102 = MagneticSeparator('U102', ins=U101.outs)
# Shredded cane
U103 = Shredder('U103', ins=U102.outs)
# Stream.default_ID_number = 200
# Hydrolyze starch
T201 = EnzymeTreatment('T201', T=323.15) # T=50
# Finely crush lipid cane
U201 = CrushingMill('U201',
split=(0.92, 0.92, 0.04, 0.92, 0.92, 0.04, 0.1, 1),
order=('Ash', 'Cellulose', 'Glucose', 'Hemicellulose',
'Lignin', 'Sucrose', 'Lipid', 'Solids'),
moisture_content=0.5)
# Convey out bagasse
U202 = ConveyingBelt('U202', ins=U201.outs[0], outs='Bagasse')
# Mix in water
M201 = Mixer('M201')
# Screen out fibers
S201 = VibratingScreen('S201',
split=(0.35, 0.35, 0.88, 0.35,
0.35, 0.88, 0, 0.88, 0.88),
order=psp1)
# Store juice before treatment
T202 = StorageTank('T202')
T202.tau = 12
# Heat up before adding acid
H201 = HXutility('H201', T=343.15)
# Mix in acid
T203 = MixTank('T203')
# Pump acid solution
P201 = Pump('P201')
# Mix lime solution
T204 = MixTank('T204')
T204.tau = 1
P202 = Pump('P202')
# Blend acid lipid solution with lime
T205 = MixTank('T205')
# Mix recycle
M202 = Mixer('M202')
# Heat before adding flocculant
H202 = HXutility('H202', T=372.15)
# Mix in flocculant
T206 = MixTank('T206')
T206.tau = 1/4
# Separate residual solids
C201 = Clarifier('C201',
split=(0, 0, 0, 0.522, 0.522, 0, 0,
0.98, 0.522, 0.522, 0.522),
order=psp2)
# Remove solids as filter cake
C202 = RVF('C202',
outs=('filte_cake', ''),
moisture_content=0.80,
split=(0.85, 0.85, 0.85, 0.01, 0.85, 0.85, 0.01),
order=('Ash', 'CaO', 'Cellulose', 'Glucose',
'Hemicellulose', 'Lignin', 'Sucrose'))
P203 = Pump('P203')
# Separate oil and sugar
T207 = MixTank('T207', outs=('', ''))
split = np.zeros(len(pretreatment_species), float)
index = pretreatment_species.indices(('Lipid', 'Water'))
split[index] = (1, 0.0001)
T207._split = split
T207._run = lambda : Splitter._run(T207)
del split, index
# Cool the oil
H203 = HXutility('H203', T=343.15)
# Screen out small fibers from sugar stream
S202 = VibratingScreen('S202', outs=('', 'fiber_fines'),
split=1-np.array((0, 0, 0, 1, 0.002, 0, 0,0, 0, 0.002, 0.002)),
order=psp2)
sugar = S202-0
S202.mesh_opening = 2
# Add distilled water to wash lipid
T208 = MixTank('T208')
T208.tau = 2
# Centrifuge out water
C203 = SplitCentrifuge_LLE('C203',
split=(0.99, 0.01),
order=('Lipid', 'Water'))
# Vacume out water
F201 = SplitFlash('F201', T=347.15, P=2026.5,
split=(0.0001, 0.999), order=('Lipid', 'Water'))
lipid = F201.outs[1]
# %% Process specifications
# Specifications dependent on lipid cane flow rate
_enzyme_mass = enzyme.mass[[9, 12]]
_CaO_Water_mass = lime.mass[[7, 12]]
_H3PO4_Water_mass = H3PO4.mass[[1, 12]]
last_lipidcane_massnet = int(lipid_cane.massnet)
def correct_flows():
global last_lipidcane_massnet
massnet = lipid_cane.massnet
if int(massnet) != last_lipidcane_massnet:
# correct enzyme, lime, phosphoric acid, and imbibition water
_enzyme_mass[:] = 0.003 * massnet * np.array([0.1, 0.9])
_CaO_Water_mass[:] = 0.001 * massnet * np.array([0.046, 0.954])
_H3PO4_Water_mass[:] = 0.00025 * massnet
imbibition_water_mass.value = 0.25* massnet
last_lipidcane_massnet = int(massnet)
# Specifications within a system
def correct_lipid_wash_water():
oil_wash_water.mol[12] = H202.outs[0].mol[-2]*100/11
solids_index = Stream.indices(['Ash', 'CaO', 'Cellulose', 'Hemicellulose', 'Lignin'])
def correct_wash_water():
solids = solidsmol[solids_index].sum()
rvf_wash_water.mol[12] = 0.0574*solids
imbibition_water_mass = imbibition_water.mass.item(12)
# %% Pretreatment system set-up
(U103-0, enzyme)-T201
(T201-0, M201-0)-U201-1-S201-0-T202
(S201-1, imbibition_water)-M201
crushing_mill_recycle_sys = System('crushing_mill_recycle_sys',
network=(U201, S201, M201),
recycle=M201-0)
T202-0-H201
(H201-0, H3PO4)-T203-P201
(P201-0, lime-T204-0)-T205-P202
(P202-0, P203-0)-M202-H202
(H202-0, polymer)-T206-C201
(C201-1, rvf_wash_water)-C202-1-P203
clarification_recycle_sys = System('clarification_recycle_sys',
network=(M202, H202, T206, C201, C202, P203),
recycle=C202-1)
C201-0-T207-0-H203
(H203-0, oil_wash_water)-T208-C203-0-F201
T207-1-S202
pretreatment_sys = System('pretreatment_sys',
network=(U101, U102, U103,
correct_flows, T201,
crushing_mill_recycle_sys,
U202, T202, H201, T203,
P201, T204, T205, P202,
correct_wash_water,
clarification_recycle_sys,
T207, H203, S202,
correct_lipid_wash_water,
T208, C203, F201,))
solidsmol = P202.outs[0].mol
area_100 = System('area_100', network=(U101, U102, U103))
units = pretreatment_sys.units.copy()
for i in area_100.network: units.discard(i)
area_200_network = sorted(units, key=lambda x: x.ID)
area_200 = System('area_200', network=area_200_network)
| StarcoderdataPython |
3340331 | <reponame>TooTouch/tootorch<filename>setup.py<gh_stars>1-10
from setuptools import setup, find_packages
with open('README.md', encoding='utf-8') as f:
long_description = f.read()
setup(
name = 'tootorch',
version = '0.2',
long_description = long_description,
long_description_content_type = 'text/markdown',
description = 'Implemetation XAI in Computer Vision (Pytorch)',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/TooTouch/tootorch',
download_url = 'https://github.com/TooTouch/tootorch/archive/v0.1.tar.gz',
install_requires = ["torch","torchvision","h5py","tqdm","pillow","opencv-python"],
packages = find_packages(exclude = []),
keywords = ['tootorch','XAI'],
python_requires = '>=3.6',
package_data = {},
zip_safe = False,
classifiers = [
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
"Topic :: Scientific/Engineering :: Artificial Intelligence",
"Topic :: Software Development :: Libraries :: Python Modules",
],
) | StarcoderdataPython |
3262588 | from django.db import models
#CompanyDomain
from django.urls import reverse
from simple_history.models import HistoricalRecords
class CompanyDomain(models.Model):
name = models.CharField(max_length=250)
description = models.CharField(max_length=255, blank=True, null=True)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField(auto_now=True)
def get_companies_count(self):
return Company.objects.filter(domain=self).count()
def __str__(self):
return self.name
# CompanyCategory
class CompanyCategory(models.Model):
category_value = models.CharField(max_length=250)
description = models.CharField(max_length=255, blank=True)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField(auto_now=True)
def get_companies_count(self):
return Company.objects.filter(category=self).count()
def __str__(self):
return self.category_value
# Company
class Company(models.Model):
name = models.CharField(max_length=100)
domain = models.ForeignKey(CompanyDomain, on_delete=models.CASCADE, null=True, blank=True)
category = models.ForeignKey(CompanyCategory, on_delete=models.CASCADE, default=1)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField(auto_now=True)
logo = models.ImageField(upload_to='logos', max_length=255, null=True, blank=True)
owner = models.CharField(max_length=255, blank=True)
description = models.CharField(max_length=255, blank=True)
has_domain = models.BooleanField(default=False)
parent = models.CharField(max_length=255, blank=True)
def get_branch_count(self):
return Branch.objects.filter(company=self).count()
def __str__(self):
return self.name
class Meta():
db_table = 'company'
class Branch(models.Model):
name = models.CharField(max_length=100)
company = models.ForeignKey(Company, on_delete=models.CASCADE)
location = models.CharField(max_length=100)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Meta:
db_table = 'branch'
# branch_phone_ contact
class BranchPhoneContact(models.Model):
phone_number = models.CharField(max_length=45)
secondary_number = models.CharField(max_length=45, blank=True)
branch = models.ForeignKey(Branch, on_delete=models.CASCADE)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'branch_phone_contact'
class BranchEmailAddresses(models.Model):
email_address = models.EmailField(max_length=254, unique=True)
secondary_email = models.EmailField(max_length=254, blank=True, null=True, unique=True)
branch = models.ForeignKey(Branch, on_delete=models.CASCADE)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField(auto_now=True)
class Meta:
db_table = 'branch_email_addresses'
# Department
class Department(models.Model):
name = models.CharField(max_length=100)
branch = models.ManyToManyField(Branch)
company = models.ManyToManyField(Company)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField(auto_now=True)
def get_absolute_url(self):
return reverse('company_management:details_department', kwargs={'pk': self.pk})
def __str__(self):
return self.name
class Meta:
db_table = 'department'
# SLA(Service Level Agreement)
class ServiceLevelAgreement(models.Model):
name = models.CharField(max_length=255)
customer = models.ForeignKey(Company, on_delete=models.CASCADE, related_name='customer')
company = models.ForeignKey(Company, on_delete=models.CASCADE, related_name='company')
description = models.CharField(max_length=255, null=True, blank=True)
created_time = models.DateTimeField(auto_now_add=True)
modified_time = models.DateTimeField(auto_now=True)
| StarcoderdataPython |
1683451 | #!/usr/bin/python
#
# Make a 49-block file of 64-byte blocks.
#
with open('distfile.bs64', 'w') as f:
for n in xrange(49):
block = "The crying of lot %s"%n
block += "."*(64-len(block))
f.write(block)
with open('distfile.meta', 'w') as f:
f.write("\x00"*4) #version
f.write("\x00\x00\x00\x40") # bucket size
f.write("demonstration nymserver ")
f.write("\x11"*32) # nymserver ID. Won't validate.
f.write("demonstration dist ")
f.write("\x22"*32) # distribution ID. Won't validate.
| StarcoderdataPython |
1748238 | <gh_stars>1-10
import uuid
from django.db import migrations, models
from django.utils.encoding import force_text
import mayan.apps.storage.classes
def UUID_FUNCTION(*args, **kwargs):
return force_text(s=uuid.uuid4())
class Migration(migrations.Migration):
dependencies = [
('documents', '0047_auto_20180917_0737'),
]
operations = [
migrations.AlterField(
model_name='documentversion',
name='file',
field=models.FileField(
storage=mayan.apps.storage.classes.FakeStorageSubclass(),
upload_to=UUID_FUNCTION,
verbose_name='File'
),
),
]
| StarcoderdataPython |
91171 | """this is pulled from Pololu's library for driving motors here: https://github.com/pololu/drv8835-motor-driver-rpi/blob/master/pololu_drv8835_rpi.py"""
from RPIO import PWM
import RPIO
import lcm
from butterbotlcm import motor_t
lc = lcm.LCM()
A1IN = 17
A2IN = 27
B2IN = 23
B1IN = 22
TIMING = 2000
MAX_SPEED = TIMING - 1
def io_init():
PWM.setup()
PWM.set_loglevel(PWM.LOG_LEVEL_ERRORS)
class Motor(object):
def __init__(self, xin1, xin2, channels):
self.xin1 = xin1
self.xin2 = xin2
self.channel1 = channels[0]
self.channel2 = channels[1]
PWM.init_channel(self.channel1)
PWM.init_channel(self.channel2)
def setSpeed(self, speed):
speed = int(speed/100.0 * MAX_SPEED + 0.5)
if speed < 0:
speed = -speed
dir_value = 1
else:
dir_value = 0
if speed > MAX_SPEED:
speed = MAX_SPEED
PWM.add_channel_pulse(self.channel1, self.xin1, 0, dir_value*speed)
PWM.add_channel_pulse(self.channel2, self.xin2, 0, (1 - dir_value)*speed)
class Motors(object):
def __init__(self):
try:
io_init()
except RuntimeError:
print("Already configured IO")
self.motor1 = Motor(A1IN, A2IN, (0, 1))
self.motor2 = Motor(B1IN, B2IN, (2, 3))
def __del__(self):
RPIO.cleanup()
def setSpeeds(self, m1_speed, m2_speed):
self.motor1.setSpeed(m1_speed)
self.motor2.setSpeed(m2_speed)
motors = Motors()
def my_handler(channel, data):
msg = motor_t.decode(data)
motors.setSpeeds(msg.leftmotor, msg.rightmotor)
if __name__ == "__main__":
subscription = lc.subscribe("BUTTERBOT_MOTOR", my_handler)
try:
while True:
lc.handle()
except KeyboardInterrupt:
pass
| StarcoderdataPython |
156223 | """
This module contains classes for all the API response related items.
It contains one struct for news items, and two objects for Covid and Weather updates,
which self populate with the API response.
"""
import logging
import os
import requests
logger = logging.getLogger(os.getenv("COVCLOCK_LOG_NAMESPACE"))
# Data structures, the API has different available data depending on whether it is national,
# or local data
# See: https://coronavirus.data.gov.uk/details/developers-guide#structure-metrics
# pylint: disable=R0903,W0703
class NewsItem:
"""
Stores the data for a single News Item
"""
title = "" # type: str
description = "" # type: str
url = "" # type: str
source_name = "" # type: str
img_source = "" # type: str
def __init__(self, i_json):
self.title = i_json.get("title", "")
self.description = i_json.get("description", "")
self.url = i_json.get("url", "")
self.source_name = i_json.get("source", {}).get("name")
self.img_source = i_json.get("urlToImage", "")
def serialize(self):
"""
Make the NewsItem JSON readable
:return: A serialized version of the instance
:rtype: dict
"""
return {"title": self.title, "description": self.description, "url": self.url,
"image_src": self.img_source}
class WeatherUpdate:
"""
Stores the data for a single Weather Update
"""
description = "" # type: str
temp = 0.0 # type: float
precip_chance = 0.0 # type: float
@staticmethod
def populate():
"""
Create a WeatherUpdate instance with data straight from the API, and return this object
"""
# pylint: disable=C0301
resp = requests.get(
"https://api.openweathermap.org/data/2.5/forecast?q={city_name}&appid={api_key}&units={units}".format(
city_name=os.getenv("COVCLOCK_WEATHER_CITY"),
api_key=os.getenv("COVCLOCK_WEATHER_API_KEY"),
units=os.getenv("COVCLOCK_WEATHER_UNIT")))
if resp.status_code == 200:
json_resp = resp.json()
try:
data = json_resp.get("list")[0]
wu_object = WeatherUpdate()
wu_object.description = data.get("weather", [])[0].get("description", "")
wu_object.temp = data.get("main", {}).get("temp", 0)
wu_object.precip_chance = data.get("pop", 0.0)
return wu_object
except IndexError:
return None
except Exception as exception:
logger.error(exception)
else:
return None
def serialize(self):
"""
Make the WeatherUpdate JSON readable
"""
return {"description": self.description, "temp": self.temp,
"precip_chance": self.precip_chance}
class CovidUpdate:
"""
Stores the data for a single COVID Update
"""
local = False # type: bool
new_deaths = -1 # type: int
cum_deaths = -1 # type: int
new_cases = -1 # type: int
cum_cases = -1 # type: int
LOCAL_PAYLOAD = {"areaType": os.getenv("COVCLOCK_AREA_TYPE", "utla"),
"areaCode": os.getenv("COVCLOCK_AREA_CODE", "E10000008"),
"metric": ["newCasesByPublishDate", "cumDeaths28DaysByPublishDate",
"newDeaths28DaysByPublishDate"], "format": "json"} # type: dict
NATIONAL_PAYLOAD = {"areaType": "overview",
"metric": ["newCasesByPublishDate", "cumDeaths28DaysByPublishDate",
"newDeaths28DaysByPublishDate"], "format": "json"} # type: dict
@staticmethod
def populate(local=False):
"""
Create a CovidUpdate instance with data straight from the API, and return this object.
:return: A populated :class:`.CovidUpdate` instance
:rtype: CovidUpdate
"""
if local:
payload = CovidUpdate.LOCAL_PAYLOAD
else:
payload = CovidUpdate.NATIONAL_PAYLOAD
resp = requests.get(
os.getenv("COVCLOCK_COVID_API_URL_ROOT", "https://api.coronavirus.data.gov.uk/v2/data"),
params=payload)
if resp.status_code == 200:
resp_data = resp.json()
body = resp_data.get("body", [None])
# day_before_cum = body[1].get("cumDeathsByDeathDate", 0)
# if body[1].get("cumDeathsByDeathDate", 0) else 0
data = body[0]
# today_deaths = data.get("newDeathsByDeathDate", 0)
# if data.get("newDeathsByDeathDate", 0) else 0
# cum_deaths = day_before_cum + today_deaths
cu_object = CovidUpdate()
cu_object.new_cases = data.get("newCasesByPublishDate", 0) if data.get(
"newCasesByPublishDate", 0) else 0
cu_object.new_deaths = data.get("newDeaths28DaysByPublishDate", 0) if data.get(
"newDeaths28DaysByPublishDate", 0) else 0
cu_object.cum_deaths = data.get("cumDeaths28DaysByPublishDate", 0) if data.get(
"cumDeaths28DaysByPublishDate", 0) else 0
cu_object.local = local
return cu_object
return None
| StarcoderdataPython |
1601955 | <reponame>dougalsutherland/py-sdm<gh_stars>10-100
from __future__ import division, print_function
from collections import Counter, defaultdict
from contextlib import closing
from functools import partial
from glob import glob
import operator as op
import os
import cPickle as pickle
import shutil
import sys
import numpy as np
from .utils import (imap, izip, iterkeys, iteritems, lazy_range, strict_zip,
reduce, str_types, is_integer_type)
_default_category = 'none'
_do_nothing_sentinel = object()
DEFAULT_VARFRAC = 0.7
def _group(boundaries, arr):
return [arr[boundaries[i-1]:boundaries[i]]
for i in lazy_range(1, len(boundaries))]
class Features(object):
'''
A wrapper class for storing bags of features. (A *bag* is a set of feature
vectors corresponding to a single object.)
Stores them stacked into a single array (to make e.g. PCA and nearest-
neighbor searches easier), but allows seamless access to individual sets.
Also stores some metadata corresponding to each bag (e.g. labels, names).
To create a Features object, pass:
- The features. You can do this in one of two ways:
- pass bags as a list of numpy arrays, one per object, whose
dimensionality should be n_pts x dim. n_pts can vary for each
bag, but dim must be the same for all bags. n_pts cannot be 0.
- pass bags as a single numpy array of shape sum(n_pts) x dim, and
also pass n_pts as an array-like object containing the number
of points in each bag. This should be a list of positive
integers whose sum is equal to the number of rows in bags.
bags will be C-ordered.
- categories (optional): a list of the "category" for each object. If
passed, should be of equal length to the number of bags. This
might be a class name, a data source, etc. Used in storing the data;
if not passed, uses `default_category` for all of them.
Should not contain the '/' character; sticking to [-\w\d. ]+
is safest.
- default_category (optional, default "none"): the default category
to use for each object if categories is not passed.
- names (optional): a name for each object. Should be unique per
category but may have repeats across categories. Same restrictions
on characters as categories. If not present, defaults to sequential
integers.
- any other keyword argument: interpreted as metadata for each
object. Lists of scalars are converted to numpy arrays; anything
else is treated as a numpy object array.
The `data` attribute is a numpy structured array. Each element corresponds
to a bag. The datatype elements are 'features' (a reference to a bag of
features), 'category' (a string), 'name' (a string), as well as any extras.
'''
def __init__(self, bags, n_pts=None, categories=None, names=None,
default_category=_default_category, **extras):
if bags is _do_nothing_sentinel:
return # special path for from_data
# load the features
if n_pts is not None:
n_pts = np.squeeze(n_pts)
if n_pts.ndim != 1:
raise TypeError("n_pts must be 1-dimensional")
if n_pts.size == 0:
raise TypeError("must have at least one bag")
if np.any(n_pts <= 0):
raise TypeError("n_pts must all be positive")
if not is_integer_type(n_pts):
rounded = np.rint(n_pts)
if all(rounded == n_pts):
n_pts = rounded.astype(int)
else:
raise TypeError("n_pts must be integers")
bags = np.asarray(bags, order='C')
if bags.ndim != 2 or bags.shape[0] != np.sum(n_pts):
raise TypeError("bags must have shape sum(n_pts) x dim")
if bags.shape[1] == 0:
raise TypeError("bags must have dimension > 0")
self._features = bags
still_stack = False
else:
if len(bags) == 0:
raise ValueError("must have at least one bag")
dim = None
new_bags = []
n_pts = []
for bag in bags:
a = np.asarray(bag, order='C')
if a.ndim == 1:
a = a[None, :]
if a.ndim != 2:
raise TypeError("each bag must be n_pts x dim")
if dim is None:
dim = a.shape[1]
elif a.shape[1] != dim:
raise TypeError("bags' second dimension must be consistent")
if a.shape[0] == 0:
raise TypeError("each bag must have at least one point")
if a.dtype.kind not in 'fiu':
raise TypeError("can't handle type {}".format(a.dtype.name))
new_bags.append(a)
n_pts.append(a.shape[0])
n_pts = np.asarray(n_pts)
still_stack = True
# delay doing the actual vstack until later, because that can take
# a while and is wasted if there's an error in one of the other
# arguments
self._n_pts = n_pts
self._boundaries = np.hstack([[0], np.cumsum(n_pts)])
n_bags = n_pts.size
# handle categories
if categories is None:
categories = np.repeat(default_category, n_bags)
else:
categories = np.asarray(categories, dtype=str)
if len(categories) != n_bags:
raise ValueError("have {} bags but {} categories".format(
n_bags, len(categories)))
# handle names
if names is None:
names = np.array([str(i) for i in lazy_range(n_bags)])
else:
names = np.asarray(names, dtype=str)
if len(names) != n_bags:
raise ValueError("have {} bags but {} names".format(
n_bags, len(names)))
# check that they're unique per category
cat_names = np.zeros(n_bags,
dtype=[('cat', categories.dtype), ('name', names.dtype)])
cat_names['cat'] = categories
cat_names['name'] = names
if np.unique(cat_names).size != n_bags:
raise ValueError("category/name pairs must be unique")
# handle extras
the_extras = {}
for name, vals in iteritems(extras):
if len(vals) != n_bags:
raise ValueError("have {} bags but {} values for {}".format(
n_bags, len(vals), name))
the_extras[name] = np.asarray(vals)
self._extra_names = frozenset(the_extras)
# do the vstacking, if necessary
if still_stack:
self._features = bags = np.vstack(new_bags)
# make the structured array containing everything
dtype = self._get_dtype(categories, names, the_extras)
self.data = data = np.empty(n_bags, dtype=dtype)
self._refresh_features()
data['category'] = categories
data['name'] = names
for name, vals in iteritems(the_extras):
data[name] = list(vals) if vals.ndim > 1 else vals
def _get_dtype(self, categories, names, extras):
dt = [
('features', object),
('category', categories.dtype),
('name', names.dtype)
]
# in python 2 only, have to encode the names...sigh.
if sys.version_info.major == 2:
dt += [(n.encode(), 'O' if vals.ndim > 1 else vals.dtype)
for n, vals in iteritems(extras)]
else:
dt += [(n, 'O' if vals.ndim > 1 else vals.dtype)
for n, vals in iteritems(extras)]
return dt
############################################################################
### Copying, pickling, etc
@classmethod
def from_data(cls, data, copy=False, deep=False, _memo=None):
'''
Constructs a Features instance from its .data attribute.
Copies the data if copy=True is passed. Note that this will copy the
features, but not any extras which are object references. Use deep=True
in that case.
'''
new = cls(_do_nothing_sentinel)
new._update_from_data(data, copy=copy, deep=deep, _memo=_memo)
return new
def _update_from_data(self, data, copy=False, deep=False, _memo=None):
feats = data['features']
self._n_pts = np.array([f.shape[0] for f in feats])
self._boundaries = np.hstack([[0], np.cumsum(self._n_pts)])
reg_names = frozenset(['category', 'features', 'name'])
self._extra_names = frozenset(data.dtype.names) - reg_names
# TODO: avoid copying data (as much as is possible) by examining
# feats[i].base. If we're copying or subsetting, we should be
# able to be smarter than this.
self._features = np.vstack(feats)
if copy:
if deep:
from copy import deepcopy
self.data = d = np.empty_like(data)
for n in d.dtype.names:
if n != 'features':
d[n] = deepcopy(data[n], _memo) if deep else data[n]
else:
self.data = data
self._refresh_features()
def _refresh_features(self):
self.data['features'] = _group(self._boundaries, self._features)
def __copy__(self):
return Features.from_data(self.data, copy=True, deep=False)
def copy(self):
return self.__copy__()
def __deepcopy__(self, _memo=None):
return Features.from_data(self.data, copy=True, deep=True, _memo=_memo)
def __getstate__(self):
return (self.data,)
def __setstate__(self, state):
data, = state
self._update_from_data(state, copy=False)
############################################################################
## General magic methods for basic behavior
def __repr__(self):
s = '<Features: {:,} bags with {} {}-dimensional points ({:,} total)>'
min_p = self._n_pts.min()
max_p = self._n_pts.max()
if min_p == max_p:
pts = "{:,}".format(min_p)
else:
pts = '{:,} to {:,}'.format(min_p, max_p)
return s.format(len(self), pts, self.dim, self.total_points)
def __len__(self):
return self.data.size
def __iter__(self):
return iter(self.data)
def __getitem__(self, key):
if (isinstance(key, str_types) or
(isinstance(key, tuple) and
any(isinstance(x, str_types) for x in key))):
raise TypeError("Features indexing only subsets rows")
if np.isscalar(key):
return self.data[key]
else:
return type(self).from_data(self.data[key], copy=False)
def __add__(self, oth):
if isinstance(oth, Features):
common_extras = dict(
(k, np.hstack((getattr(self, k), getattr(oth, k))))
for k in self._extra_names & oth._extra_names)
return Features(
np.vstack((self._features, oth._features)),
n_pts=np.hstack((self._n_pts, oth._n_pts)),
categories=np.hstack((self.categories, oth.categories)),
names=np.hstack((self.names, oth.names)),
**common_extras)
if isinstance(oth, list): # TODO: support np object arrays too?
feats = np.vstack([self._features] + oth)
n_pts = np.hstack([self._n_pts] + [len(x) for x in oth])
oth_cats = np.repeat(_default_category, len(oth))
cats = np.hstack([self.categories, oth_cats])
names = [str(i) for i in range(len(feats), len(feats) + len(oth))]
names.insert(0, self.names)
names = np.hstack(names)
return type(self)(feats, n_pts=n_pts, categories=cats, names=names)
return NotImplemented
def __radd__(self, oth):
if isinstance(oth, list):
feats = np.vstack(oth + [self._features])
n_pts = np.hstack([len(x) for x in oth] + [self._n_pts])
oth_cats = np.repeat(_default_category, len(oth))
cats = np.hstack([oth_cats, self.categories])
names = [str(i) for i in range(len(feats), len(feats) + len(oth))]
names.append(self.names)
names = np.hstack(names)
return Features(feats, n_pts=n_pts, categories=cats, names=names)
return NotImplemented
############################################################################
### Properties to get at the basic data
@property
def total_points(self):
"The total number of points in all bags."
return self._features.shape[0]
@property
def dim(self):
"The dimensionality of the features."
return self._features.shape[1]
@property
def dtype(self):
"The data type of the feature vectors."
return self._features.dtype
features = property(lambda self: self.data['features'])
categories = category = property(lambda self: self.data['category'])
names = name = property(lambda self: self.data['name'])
# handle extras too, even though we don't know their names in advance...
# TODO: actually make these in the constructor, so tab-complete/etc works
def __getattr__(self, name):
if name in self._extra_names:
return self.data[name]
else:
return getattr(super(Features, self), name)
############################################################################
### Adding new extras to an existing object
def add_extra(self, name, values, dtype=None, inplace=False):
'''
Adds a single "extra" value to this Features object.
See add_extras for details.
'''
dtypes = None if dtype is None else [dtype]
return self.add_extras(names=[name], values=[values], dtypes=dtypes,
inplace=inplace)
def add_extras(self, names, values, dtypes=None, inplace=False):
'''
Adds new "extra" values to this Features object.
Note that for implementation reasons, this requires making a copy of
the .data array containing all the metadata (though not the actual
features array itself).
Arguments:
- names: a list of names for the new extra values
- values: a list of the actual values for the new extras. Should
be broadcastable to be of shape (len(self),).
- dtypes (optional): a list of the data types for the new extras.
If not passed, uses the dtype of np.asarray(val) for each
value. If you don't pass dtypes and values contains
objects other than numpy arrays, an extra copy will be
made during this process.
- inplace (optional, default False): if True, adds the extra to
this object (though metadata is copied as noted above).
If False, returns a new object with the extra added. Note
that the new object will be like a shallow copy of this
one: the features array and any object-type extras will
be shared.
'''
# Can't use numpy.lib.recfunctions.append_fields:
# https://github.com/numpy/numpy/issues/2346
len_set = set([len(names), len(values)])
if dtypes is not None:
len_set.add(len(dtypes))
if len(len_set) != 1:
raise ValueError("names, values, and dtypes (if passed) should be "
"of same length")
name_set = set(names)
if len(name_set) != len(names):
raise ValueError("can't repeat names...")
elif not name_set.isdisjoint(self.data.dtype.names):
raise ValueError("can't use names already in use")
if dtypes is None:
values = [np.asarray(val) for val in values]
dtypes = [val.dtype for val in values]
old_descr = self.data.dtype.descr
new_descr = strict_zip(names, dtypes)
new = np.empty(len(self), dtype=old_descr + new_descr)
for name, dtype in old_descr:
new[name] = self.data[name]
for name, value in izip(names, values):
new[name] = value
if inplace:
self.data = new
self._extra_names = self._extra_names.union(names)
else:
return Features.from_data(new)
############################################################################
### Transforming the features
def _replace_bags(self, bags, n_pts=None, inplace=False):
if n_pts is None:
n_pts = [b.shape[0] for b in bags]
bags = np.vstack(bags)
else:
bags = np.asarray(bags)
assert bags.ndim == 2
if inplace:
self._n_pts = np.asarray(n_pts)
self._boundaries = np.hstack([[0], np.cumsum(self._n_pts)])
self._features = bags
self._refresh_features()
else:
return self.__class__(
bags, n_pts=n_pts, categories=self.categories, names=self.names,
**dict((k, self.data[k]) for k in self._extra_names))
def _apply_transform(self, transformer, fit_first, inplace=False,
dtype=None):
'''
Transforms the features using an sklearn-style transformer object that
should be fit to the full, stacked feature matrix. Assumes that the
transformer supports the "copy" attribute, and that it does not change
the number or order of points (though it may change their
dimensionality).
transformer: the transformer object
fit_first: whether to fit the transformer to the objects first
dtype: fit to the features.astype(dtype) if not None
By default, returns a new Features instance.
If inplace is passed, modifies this instance; doesn't return anything.
'''
transformer.copy = not inplace
feats = self._features
if dtype is not None:
feats = feats.astype(dtype)
if fit_first:
transformed = transformer.fit_transform(feats)
else:
transformed = transformer.transform(feats)
return self._replace_bags(
transformed, n_pts=self._n_pts, inplace=inplace)
def pca(self, pca=None, unfit_pca=None,
k=None, varfrac=DEFAULT_VARFRAC, randomize=False, whiten=False,
dtype=None,
ret_pca=False, inplace=False):
'''
Runs the features through principal components analysis to reduce their
dimensionality.
By default, returns a new Features instance.
If inplace is passed, modifies this instance; doesn't return anything.
If ret_pca is passed: returns the PCA object as well as whatever else
it would have returned.
If `pca` is passed, uses that pre-fit PCA object to transform. This is
useful for transforming test objects consistently with training objects.
Otherwise, if `unfit_pca` is passed, that object's fit_transform()
method is called to fit the samples and transform them.
Otherwise, the following options specify which type of PCA to perform:
k: a dimensionality to reduce to. Default: use varfrac instead.
varfrac: the fraction of variance to preserve. Overridden by k.
Default: 0.7. Can't be used for randomized or sparse PCA.
randomize: use a randomized PCA implementation. Default: no.
whiten: whether to whiten the inputs, removing linear correlations
across features
dtype: the dtype of the feature matrix to use.
'''
# figure out what PCA instance we should use
if pca is not None:
fit_first = False
elif unfit_pca is not None:
pca = unfit_pca
fit_first = True
else:
from sklearn.decomposition import PCA, RandomizedPCA
fit_first = True
if k is None:
if randomize:
raise ValueError("can't randomize without a specific k")
pca = PCA(varfrac, whiten=whiten)
else:
pca = (RandomizedPCA if randomize else PCA)(k, whiten=whiten)
r = self._apply_transform(pca, fit_first=fit_first, inplace=inplace)
if ret_pca:
return pca if inplace else (r, pca)
else:
return r
def standardize(self, scaler=None, ret_scaler=False, inplace=False,
cast_dtype=np.float32):
'''
Standardizes the features so that each dimension has zero mean and unit
variance.
By default, returns a new Features instance.
If inplace is passed, modifies this instance; doesn't return anything.
If ret_scaler is passed: returns the scaler object as well as whatever
else it would have returned.
If cast_dtype is not None, casts non-float data arrays to this dtype
first.
If `scaler` is passed, uses that pre-fit scaler to transform. This is
useful for transforming test objects consistently with training objects.
'''
fit_first = False
if scaler is None:
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
fit_first = True
kw = {'fit_first': fit_first, 'inplace': inplace}
if self._features.dtype.kind != 'f':
kw['dtype'] = cast_dtype
r = self._apply_transform(scaler, **kw)
if ret_scaler:
return scaler if inplace else (r, scaler)
else:
return r
def normalize(self, norm='l2', inplace=False, cast_dtype=np.float32):
'''
Normalizes the features so that each vector has unit norm (l1 or l2).
By default, returns a new Features instance.
If inplace is passed, modifies this instance; doesn't return anything.
If cast_dtype is not None, casts non-float data arrays to this dtype
first.
norm: 'l2' (default) or 'l1'.
This transformation is stateless, so unlike pca() or standardize()
there's no point in returning the normalizer object.
'''
from sklearn.preprocessing import Normalizer
normalizer = Normalizer(norm)
dtype = None if self.dtype.kind == 'f' else cast_dtype
return self._apply_transform(
normalizer, fit_first=False, inplace=inplace, dtype=dtype)
def condense_kmeans(self, n_clusters, max_iter=20, inplace=False,
progressbar=False, cast_dtype=np.float32,
library='vlfeat', algorithm='lloyd'):
'''
Condenses the number of points in a sample set through k-means.
'''
feats_iter = iter(self.features)
if self.dtype.kind != 'f':
feats_iter = (np.asarray(b, dtype=cast_dtype) for b in feats_iter)
if progressbar:
from .mp_utils import progress
feats_iter = progress(maxval=len(self))(feats_iter)
if library == 'vlfeat':
fn = self._condense_kmeans_vlfeat
elif library == 'sklearn':
fn = self._condense_kmeans_sklearn
do = fn(n_clusters=n_clusters, max_iter=max_iter, algorithm=algorithm)
new_bags = [bag if bag.shape[0] <= n_clusters else do(bag)
for bag in feats_iter]
return self._replace_bags(new_bags, inplace=inplace)
def _condense_kmeans_vlfeat(self, n_clusters, max_iter=20,
algorithm='lloyd'):
from vlfeat import vl_kmeans
return partial(vl_kmeans, num_centers=n_clusters, algorithm=algorithm,
max_iter=max_iter, num_rep=1, initialization='random')
def _condense_kmeans_sklearn(self, n_clusters, max_iter=20,
algorithm='minibatch'):
if algorithm == 'minibatch':
from sklearn.cluster import MiniBatchKMeans
cls = partial(MiniBatchKMeans, compute_labels=False)
elif algorithm in ('batch', 'lloyd'):
from sklearn.cluster import KMeans
cls = partial(KMeans, n_init=1, n_jobs=1)
# most of the work is parallelized by MKL. still, not super fast.
kmeans = cls(n_clusters=n_clusters, init='random', max_iter=max_iter)
def do(bag):
kmeans.fit(bag)
return kmeans.cluster_centers_
return do
def bag_of_words(self, n_codewords, max_iter=100, num_rep=1,
cast_dtype=np.float32,
library='vlfeat', algorithm='lloyd'):
'''
Transforms each bag into a single vector with the bag of words
representation:
1. Run k-means (with n_codewords means) on the full set of all
points from all bags.
2. Represent each bag by the count of points assigned to each
codeword.
Returns:
- an n_bags x n_codewords integer array of histograms
- an n_codewords x dim array of cluster centers
'''
if n_codewords > self.total_points:
msg = "asked for {} codewords with only {} points"
raise ValueError(msg.format(n_codewords, self.total_points))
if self.dtype.kind != 'f':
feats = np.asarray(self._features, dtype=cast_dtype)
else:
feats = self._features
if library == 'vlfeat':
from vlfeat import vl_kmeans
centers, assignments = vl_kmeans(feats,
num_centers=n_codewords, algorithm=algorithm,
max_iter=max_iter, num_rep=num_rep, quantize=True)
else:
if algorithm == 'minibatch':
from sklearn.cluster import MiniBatchKMeans
cls = partial(MiniBatchKMeans, compute_labels=True)
else:
from sklearn.cluster import KMeans
cls = partial(KMeans, n_jobs=1)
kmeans = cls(n_clusters=n_codewords,
max_iter=max_iter, n_init=num_rep)
assignments = kmeans.fit_predict(feats)
centers = kmeans.cluster_centers_
grouped = [np.bincount(g, minlength=n_codewords)
for g in _group(self._boundaries, assignments)]
return np.asarray(grouped), centers
############################################################################
### generic I/O helpers
@staticmethod
def _missing_extras(dtype):
"""
For arrays with dtype with missing vals: returns new dtype, default val.
"""
if dtype.kind in 'fc': # float/complex types
return dtype, np.nan
elif dtype.kind in 'O': # object types
return dtype, None
elif dtype.kind in 'aSU': # string types
return dtype, ''
elif dtype.kind in 'biu': # integer types: no missing type, so switch
# to float and use nan
return np.float, np.nan
else: # other types: no default, so switch to object type and use None
return object, None
def save(self, path, format='hdf5', **attrs):
'''
Saves into an output file. Default format is 'hdf5'; other options are
'perbag' or 'typedbytes'.
'''
if format == 'hdf5':
return self.save_as_hdf5(path, **attrs)
elif format == 'perbag':
return self.save_as_perbag(path, **attrs)
elif format == 'typedbytes':
return self.save_as_typedbytes(path, **attrs)
else:
raise TypeError("unknown save format '{}'".format(format))
@classmethod
def load(cls, path, **kwargs):
'''
Loads from an hdf5 file or a perbag/typedbytes directory.
If path is a directory:
if it contains any data_*.tb files, calls load_from_typedbytes
otherwise calls load_from_perbag
otherwise, calls load_from_hdf5.
See any of those functions for documentation of the arguments.
'''
if os.path.isdir(path):
if glob(os.path.join(path, 'data_*.tb')):
return cls.load_from_typedbytes(path, **kwargs)
else:
return cls.load_from_perbag(path, **kwargs)
else:
return cls.load_from_hdf5(path, **kwargs)
############################################################################
### Stuff relating to hdf5 feature files
def save_as_hdf5(self, filename, file_root=None, append=False, **attrs):
'''
Saves into an HDF5 file filename,
rooted at file_root (default: root of the file).
If the file already exists it, overwrites it completely unless
append=True. In that case, it probably only makes sense if you're
using a different file_root; otherwise the meta attributes will clash
and there may be a crash due to name conflicts or something.
Also saves any keyword args as a dateset under '/_meta'.
Each bag is saved as "features" and "frames" in /category/filename; any
"extras" get added there as a (probably scalar) dataset named by the
extra's name.
'''
import h5py
with h5py.File(filename, 'a' if append else 'w') as f:
if file_root is not None:
f = f.require_group(file_root)
skip_set = frozenset(['category', 'name'])
for row in self:
g = f.require_group(row['category']).create_group(row['name'])
for name, val in izip(row.dtype.names, row):
if name not in skip_set:
g[name] = val
meta = f.require_group('_meta')
for k, v in iteritems(attrs):
meta[k] = v
@classmethod
def load_from_hdf5(cls, filename, file_root=None,
load_attrs=False, features_dtype=None,
cats=None, pairs=None, subsample_fn=None,
names_only=False):
'''
Reads a Features instance from an h5py file created by save_features().
filename is the path to the hdf5 file.
If file_root is passed, it gives the path within the hdf5 file of the
features object. (By default, assumes it's at the root.)
If load_attrs, also returns a dictionary of meta values loaded from
root attributes, '/_meta' attributes, '/_meta' datasets.
features_dtype specifies the datatype to load features as.
If cats is passed, only load those with a category in cats (as checked
by the `in` operator, aka the __contains__ special method).
If pairs is passed, tuples of (category, name) are checked with the `in`
operator. If cats is also passed, that check applies first.
subsample_fn is applied to a list of (category, name) pairs, and returns
another list of that format. functools.partial(random.sample, k=100) can
be used to subsample 100 bags unifornmly at random, for example.
If names_only is passed, the list of (category, name) pairs is returned
without having loaded any data. load_attrs is also ignored.
'''
import h5py
with h5py.File(filename, 'r') as f:
if file_root is not None:
f = f[file_root]
bag_names = []
for cat, cat_g in iteritems(f):
if cat != '_meta' and (cats is None or cat in cats):
for fname in iterkeys(cat_g):
if pairs is None or (cat, fname) in pairs:
bag_names.append((cat, fname))
if subsample_fn is not None:
bag_names = subsample_fn(bag_names)
if names_only:
return bag_names
# first pass: get numbers/type of features, names/types of metadata
dim = None
n_pts = []
dtypes = set()
extra_types = {}
with_extras = Counter()
for cat, fname in bag_names:
g = f[cat][fname]
feats = g['features']
shape = feats.shape
if len(shape) != 2:
msg = "malformed file: {}/{}/features has shape {}"
raise ValueError(msg.format(cat, fname, shape))
elif shape[0] == 0:
msg = "malformed file: {}/{} has no features"
raise ValueError(msg.format(cat, fname))
if dim is None:
dim = shape[1]
elif shape[1] != dim:
msg = "malformed file: {}/{} has feature dim {}, expected {}"
raise ValueError(msg.format(cat, fname, shape[1], dim))
n_pts.append(feats.shape[0])
dtypes.add(feats.dtype)
for name, val in iteritems(g):
if name == 'features':
continue
dt = val.dtype if all(s == 1 for s in val.shape) else object
if name not in extra_types:
extra_types[name] = dt
elif extra_types[name] != dt:
msg = "different {}s have different dtypes"
raise TypeError(msg.format(name))
# TODO: find a dtype that'll cover all of them
with_extras[name] += 1
n_bags = len(bag_names)
n_pts = np.asarray(n_pts)
boundaries = np.hstack([[0], np.cumsum(n_pts)])
# allocate space for features and extras
# TODO: go straight into a data array, save a copy...
if features_dtype is None:
dtype = dtypes.pop()
if dtypes:
raise TypeError("different features have different dtypes")
# TODO: find a dtype that'll cover all of them
else:
dtype = features_dtype
features = np.empty((n_pts.sum(), dim), dtype=dtype)
extras = {}
extra_defaults = {}
for name, dt in iteritems(extra_types):
if with_extras[name] != n_bags:
dt, d = cls._missing_extras(dt)
extra_defaults[name] = d
print("WARNING: {} missing values for {}. using {} instead"
.format(n_bags - with_extras[name], name, d),
file=sys.stderr)
extras[name] = np.empty(n_bags, dtype=dt)
# actually load all the features and extras
for i, (cat, fname) in enumerate(bag_names):
g = f[cat][fname]
features[boundaries[i]:boundaries[i+1]] = g['features']
for ex_name in extra_types:
if ex_name in g:
extras[ex_name][i] = g[ex_name][()]
else:
extras[ex_name][i] = extra_defaults[ex_name]
categories, names = zip(*bag_names)
obj = cls(features, n_pts=n_pts, categories=categories, names=names,
**extras)
if load_attrs:
attrs = {}
if '_meta' in f:
for k, v in iteritems(f['_meta']):
attrs[k] = v[()]
for k, v in iteritems(f['_meta'].attrs):
if k not in attrs:
attrs[k] = v
for k, v in iteritems(f.attrs):
if k not in attrs:
attrs[k] = v
return obj, attrs
return obj
############################################################################
### Stuff relating to per-bag npz feature files
def save_as_perbag(self, path, **attrs):
'''
Save into one npz file for each bag, named like
path/category/name.npz
Also saves any extra attributes passed as keyword arguments in
path/attrs.pkl
'''
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, 'attrs.pkl'), 'wb') as f:
pickle.dump(attrs, f)
skip_set = frozenset(['category', 'name'])
for row in self:
dirpath = os.path.join(path, row['category'])
if not os.path.isdir(dirpath):
os.mkdir(dirpath)
data = dict((k, v) for k, v in izip(row.dtype.names, row)
if k not in skip_set)
np.savez(os.path.join(dirpath, row['name'] + '.npz'), **data)
@classmethod
def load_from_perbag(cls, path, load_attrs=False, features_dtype=None,
cats=None, pairs=None, subsample_fn=None,
names_only=False):
'''
Reads a Features instance from a directory of npz files created
by save_as_perbag().
If load_attrs, also returns a dictionary of meta values loaded from the
`attrs.pkl` file, if it exists.
features_dtype specifies the datatype to load features as.
If cats is passed, only load those with a category in cats (as checked
by the `in` operator, aka the __contains__ special method).
If pairs is passed, tuples of (category, name) are checked with the `in`
operator. If cats is also passed, that check applies first.
subsample_fn is applied to a list of (category, name) pairs, and returns
another list of that format. functools.partial(random.sample, k=100) can
be used to subsample 100 bags unifornmly at random, for example.
If names_only is passed, the list of (category, name) pairs is returned
without having loaded any data. load_attrs is also ignored.
'''
from glob import glob
bag_names = []
for cat in os.listdir(path):
dirpath = os.path.join(path, cat)
if os.path.isdir(dirpath) and (cats is None or cat in cats):
for npz_fname in glob(os.path.join(dirpath, '*.npz')):
fname = npz_fname[len(dirpath) + 1:-len('.npz')]
if pairs is None or (cat, fname) in pairs:
bag_names.append((cat, fname))
if subsample_fn is not None:
bag_names = subsample_fn(bag_names)
if names_only:
return bag_names
# Unlike in read_from_hdf5, we can't just get the size of arrays without
# loading them in. So we do the vstacking thing.
bags = []
extras = [] # a list of dictionaries. will replace with a dict of
# arrays after we load them all in.
for cat, fname in bag_names:
npz_path = os.path.join(path, cat, fname + '.npz')
with closing(np.load(npz_path)) as data:
feats = None
extra = {}
for k, v in iteritems(data):
if k == 'features':
if features_dtype is not None:
feats = np.asarray(v, dtype=features_dtype)
else:
feats = v[()]
else:
extra[k] = v[()]
bags.append(feats)
extras.append(extra)
categories, names = zip(*bag_names)
obj = cls._postprocess(categories, names, bags, extras)
return cls._maybe_load_attrs(obj, path, load_attrs=load_attrs)
@classmethod
def _postprocess(cls, categories, names, bags, extras):
# post-process the extras
n_bags = len(bags)
extra_types = defaultdict(Counter)
the_extras = {}
extra_defaults = {}
for extra in extras:
for k, v in iteritems(extra):
dt = v.dtype if all(s == 1 for s in v.shape) else object
extra_types[k][dt] += 1
for name, dt_counts in iteritems(extra_types):
if len(dt_counts) == 1:
dt = next(iter(iterkeys(dt_counts)))
else:
# TODO: reconcile similar types?
dt = object
num_seen = sum(extra_types[name].values())
if num_seen != n_bags:
dt, d = cls._missing_extras(dt)
extra_defaults[name] = d
msg = "WARNING: {} missing values for {}. using {} instead"
print(msg.format(n_bags - num_seen, name, d), file=sys.stderr)
else:
extra_defaults[name] = None
the_extras[name] = np.empty(n_bags, dtype=dt)
for i, extra_d in enumerate(extras):
for name, default in iteritems(extra_defaults):
the_extras[name][i] = extra_d.get(name, default)
return cls(bags, categories=categories, names=names, **the_extras)
@classmethod
def _maybe_load_attrs(cls, obj, path, load_attrs):
if load_attrs:
try:
with open(os.path.join(path, 'attrs.pkl'), 'rb') as f:
attrs = pickle.load(f)
except IOError:
attrs = {}
return obj, attrs
else:
return obj
############################################################################
### Stuff relating to typedbytes feature file
def save_as_typedbytes(self, path, **attrs):
'''
Save into a directory of Hadoop typedbytes file.
They're split to have about 500MB of data each, named like "data_0.tb",
"data_350.tb", etc, where the number is the first index contained.
TODO: allow customizing this, giving explicit splits, ...
The keys in the files are "name/category".
Each value is a mapping, with elements:
"features": a numpy array (the content of np.save/np.load; see
sdm.typedbytes_utils)
any extras: the value (a scalar, numpy array, ...)
Note that any scalars which have an exact representation
in the typedbytes format will be written as such; others
will be pickled.
Also saves any extra attributes passed as keyword arguments in
path/attrs.pkl
Requires the "ctypedbytes" or "typedbytes" library (in pip).
'''
from . import typedbytes_utils as tbu
if os.path.exists(path):
shutil.rmtree(path)
os.makedirs(path)
with open(os.path.join(path, 'attrs.pkl'), 'wb') as f:
pickle.dump(attrs, f)
def output_file(idx):
f = open(os.path.join(path, 'data_{}.tb'.format(idx)), 'wb')
out = tbu.tb.PairedOutput(f)
tbu.register_write(out)
return out
amt_per_file = 500 * 2**20
skip_set = frozenset(['category', 'name'])
out = output_file(0)
try:
for idx, bag in enumerate(self):
if out.file.tell() >= amt_per_file:
out.close()
out = output_file(idx)
out.write((
"{}/{}".format(bag['category'], bag['name']),
dict((k, v) for k, v in izip(bag.dtype.names, bag)
if k not in skip_set)
))
finally:
out.close()
@classmethod
def _proc_from_typedbytes(cls, val, features_dtype=None):
bag = np.asarray(val.pop('features'), dtype=features_dtype)
return bag, dict((k, np.asarray(v)) for k, v in iteritems(val))
@classmethod
def _load_typedbytes(cls, f, features_dtype=None, cats=None, pairs=None):
from . import typedbytes_utils as tbu
inp = tbu.tb.Input(f)
tbu.register_read_ndarray(inp)
tbu.check_seekable(inp)
categories = []
names = []
bags = []
extras = []
while True:
key = inp.read()
if key is None:
break
cat, name = key.split('/', 1)
if ((cats is not None and cat not in cats) or
(pairs is not None and (cat, name) not in pairs)):
# skipping this one
type_byte = f.read(1)
val_length, = inp.read_int()
if f._file_seekable:
f.seek(val_length, os.SEEK_CUR)
else:
f.read(val_length)
else:
# loading this one
val = inp.read()
bag, extra = cls._proc_from_typedbytes(
val, features_dtype=features_dtype)
categories.append(cat)
names.append(name)
bags.append(bag)
extras.append(extra)
return cls._postprocess(categories, names, bags, extras)
@classmethod
def load_from_typedbytes(cls, path, load_attrs=False, features_dtype=None,
cats=None, pairs=None, subsample_fn=None,
names_only=False):
'''
Reads a Features instance from a directory of typedbytes files created
by save_as_typedbytes().
If load_attrs, also returns a dictionary of meta values loaded from the
`attrs.pkl` file, if it exists.
features_dtype specifies the datatype to load features as.
If cats is passed, only load those with a category in cats (as checked
by the `in` operator, aka the __contains__ special method).
If pairs is passed, tuples of (category, name) are checked with the `in`
operator. If cats is also passed, that check applies first.
subsample_fn is applied to a list of (category, name) pairs, and returns
another list of that format. functools.partial(random.sample, k=100) can
be used to subsample 100 bags unifornmly at random, for example.
If names_only is passed, the list of (category, name) pairs is returned
instead of any data. load_attrs is also ignored.
Note that for this type (as opposed to HDF5/perbag), we have to actually
walk over all the data to get the categories and names for names_only or
a subsample_fn. To not double the I/O, we just load it all. If you're
trying to save on memory, you could modify this method to do so....
'''
# load everything
def load_files():
for fname in glob(os.path.join(path, 'data_*.tb')):
with open(fname, 'rb') as f:
yield cls._load_typedbytes(f, features_dtype=features_dtype,
cats=cats, pairs=pairs)
feats = reduce(op.add, load_files())
if subsample_fn is not None or names_only:
bag_names = zip(feats.categories, feats.names)
if subsample_fn is not None:
idx_map = dict(imap(reversed, enumerate(bag_names)))
bag_names = subsample_fn(bag_names)
which = [idx_map[bag_name] for bag_name in bag_names]
feats = feats[which]
if names_only:
return bag_names
return cls._maybe_load_attrs(feats, path, load_attrs=load_attrs)
| StarcoderdataPython |
179428 | <gh_stars>0
from typing import List
from infobip_channels.core.models import CamelCaseModel, ResponseBase
from infobip_channels.email.models.response.core import ResultBase
class Error(CamelCaseModel):
group_id: int
group_name: str
id: int
name: str
description: str
permanent: bool
class Result(ResultBase):
error: Error
channel: str
class DeliveryReportsResponse(ResponseBase):
results: List[Result]
| StarcoderdataPython |
3394168 | from api.model.food import Food
from api.model.foodComment import FoodComment
from api.model.ingredient import Ingredient
from api.model.inclusion import Inclusion
from api.model.ateIngredient import AteIngredient
from api.model.ateFood import AteFood
from api.model.restaurant import Restaurant
from api.model.diet import Diet
| StarcoderdataPython |
1748652 | #primer
'''
__author__ = "<NAME>"
__Copyright__ "Copyright September 2019, <NAME>"
__License__ = "GPL"
__email__ "<EMAIL>"
'''
import os, glob
import numpy as np
import pandas as pd
import Bio
from Bio.Seq import MutableSeq, Seq
from Bio import SeqIO
from Bio.SeqUtils import GC
from typing import Tuple
def degenerate_primer(primer:'Bio.Seq.MutableSeq') -> str:
forward = (str(primer),)
for i in range(0, len(primer)):
for j in range(0, len(forward)):
primer = MutableSeq(forward[j])
if (primer[i] == 'A') or (primer[i] == 'C') or (primer[i] == 'G') or (primer[i] == 'T'):
pass
else:
forward = degenerate_primer_list(forward, primer, i, primer[i])
return forward
def degenerate_primer_list(forward:'str', primer:'Bio.Seq.MutableSeq', i:'int', letter:'str') -> str:
R = ['A', 'G', 'R']
M = ['A', 'C', 'M']
S = ['C', 'G', 'S']
B = ['C', 'G', 'T', 'B']
H = ['A', 'C', 'T', 'H']
N = ['A', 'C', 'G', 'T', 'N']
Y = ['C', 'T', 'Y']
K = ['G', 'T', 'K']
W = ['A', 'T', 'W']
D = ['A', 'G', 'T', 'D']
V = ['A', 'C', 'G', 'V']
mixed_nucleotides = [R, M, S, B, H, N, Y, K, W, D, V]
mixed_strings = ['R', 'M', 'S', 'B', 'H', 'N', 'Y', 'K', 'W', 'D', 'V']
k = 0
for string in mixed_strings:
if letter == string:
break
else:
k = k+1
for basepair in mixed_nucleotides[k]:
primer[i] = basepair
forward = forward + (str(primer),)
return forward
def forward_primer_search(species:'str', forward_primer:'tuple') -> Tuple[str, str, str]:
primer_match_query = []
fwd_primer_set = []
init_len = len(species)
for i in range(0,len(forward_primer)):
primer_match_query.append(species.find(forward_primer[i]))
fwd_primer_set.append(forward_primer[i])
if all(item == -1 for item in primer_match_query):
return str(''), str('N/a'), str('N/a')
else:
for k in range(0, len(primer_match_query)):
if primer_match_query[k] != -1:
forward_amplicon_segment = species[primer_match_query[k]:len(species)]
fwd_primer_used = forward_primer[k]
foward_primer_position = len(species) - len(forward_amplicon_segment)
else:
pass
return forward_amplicon_segment, fwd_primer_used, foward_primer_position
def reverse_primer_search(species:'str', reverse_primer_set:'tuple') -> Tuple[str, str, str]:
primer_match_query = []
rev_primer_set = []
for i in range(0,len(reverse_primer_set)):
reverse_primer = Seq(reverse_primer_set[i])
reverse_primer_complement = str(reverse_primer.reverse_complement())
primer_match_query.append(species.find(reverse_primer_complement))
rev_primer_set.append(reverse_primer_complement)
if all(item == -1 for item in primer_match_query):
return str(''), str('N/a'), str('N/a')
else:
for j in range(0,len(primer_match_query)):
if primer_match_query[j] != -1:
amplicon_segment = species[0:primer_match_query[j]+len(reverse_primer_complement)]
rev_primer_used = rev_primer_set[j]
reverse_primer_position = len(amplicon_segment)-len(reverse_primer_complement)
else:
pass
return amplicon_segment, rev_primer_used, reverse_primer_position
def create_PCR_amplicon(core_data:'pd.DataFrame', rev_tup:'tuple', fwd_tup:'tuple') -> pd.DataFrame:
add_on_data = []
all_sequnces = []
for item in core_data['Record id']:
[item_rev, rev_primer_used, reverse_primer_position] = reverse_primer_search(core_data.loc[(core_data['Record id'] == item)]['16S Sequence'].item(), rev_tup)
[item_amplicon, fwd_primer_used, forward_primer_position] = forward_primer_search(item_rev, fwd_tup)
add_on_data.append([core_data.loc[(core_data['Record id'] == item)]['Species'].item(),
item,
fwd_primer_used,
forward_primer_position,
rev_primer_used, reverse_primer_position,
round(GC(item_amplicon), 1),
len(item_amplicon),
item_amplicon])
columns = ['Species', 'Record id', 'Forward Primer', 'forward_primer_position', 'Reverse Primer', 'reverse_primer_position', 'GC Content', 'Length of Amplicon', 'Amplicon',]
calculated_data = pd.DataFrame(add_on_data, columns=columns)
return calculated_data
| StarcoderdataPython |
112948 | <gh_stars>1-10
t = int(raw_input())
for u in range(0,t):
try:
p = int(raw_input())
punkty = ()
wspolrzedne = ()
for v in range(0,p):
try:
z = raw_input().split()
punkty = punkty.append(z[0])
wspolrzedne = wspolrzedne.append(z[1]).append(z[2])
print punkty
print wspolrzedne
except EOFError:
break
except EOFError:
break
| StarcoderdataPython |
1686755 | <reponame>Mythologos/data-science-project<gh_stars>0
"""
This file contains the prototypical network.
Currently, it takes three optional arguments:
* learning-rate: a floating point value indicating the learning rate of the neural network.
* max-epochs: an integer indicating the maximum number of epochs for which the network will be trained.
* epoch-size: an integer indicating the size of each epoch.
"""
import multiprocessing as mp
import os
from argparse import ArgumentParser
import cv2
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from scipy import ndimage
from torch.autograd import Variable
from torchvision.datasets.omniglot import Omniglot # Currently not used below, but could be used.
from data.dataset import OmniglotReactionTimeDataset # Currently not used below, but could be used.
from data.full_omniglot import FullOmniglot # Currently not used below, but could be used.
# Main Model Class
class ProtoNet(nn.Module):
def __init__(self, encoder):
"""
Args:
encoder : CNN encoding the images in sample
n_way (int): number of classes in a classification task
n_support (int): number of labeled examples per class in the support set
n_query (int): number of labeled examples per class in the query set
"""
super(ProtoNet, self).__init__()
self.encoder = encoder.cuda()
def set_forward_loss(self, sample):
"""
Computes loss, accuracy and output for classification task
Args:
sample (torch.Tensor): shape (n_way, n_support+n_query, (dim))
Returns:
torch.Tensor: shape(2), loss, accuracy and y_hat
"""
sample_images = sample['images'].cuda()
n_way = sample['n_way']
n_support = sample['n_support']
n_query = sample['n_query']
x_support = sample_images[:, :n_support]
x_query = sample_images[:, n_support:]
# target indices are 0 ... n_way-1
target_inds = torch.arange(0, n_way).view(n_way, 1, 1).expand(n_way, n_query, 1).long()
target_inds = Variable(target_inds, requires_grad=False)
target_inds = target_inds.cuda()
# encode images of the support and the query set
x = torch.cat([x_support.contiguous().view(n_way * n_support, *x_support.size()[2:]),
x_query.contiguous().view(n_way * n_query, *x_query.size()[2:])], 0)
z = self.encoder.forward(x)
z_dim = z.size(-1) # usually 64
z_proto = z[:n_way*n_support].view(n_way, n_support, z_dim).mean(1)
z_query = z[n_way*n_support:]
# compute distances
dists = euclidean_dist(z_query, z_proto)
# compute probabilities
log_p_y = F.log_softmax(-dists, dim=1).view(n_way, n_query, -1)
loss_val = -log_p_y.gather(2, target_inds).squeeze().view(-1).mean()
_, y_hat = log_p_y.max(2)
acc_val = torch.eq(y_hat, target_inds.squeeze()).float().mean()
return loss_val, {'loss': loss_val.item(), 'acc': acc_val.item(), 'y_hat': y_hat}
# helper classes for `ProtoNet`
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def load_protonet_conv(**kwargs):
"""
Loads the prototypical network model
Arg:
x_dim (tuple): dimension of input image
hid_dim (int): dimension of hidden layers in conv blocks
z_dim (int): dimension of embedded image
Returns:
Model (Class ProtoNet)
"""
x_dim = kwargs['x_dim']
hid_dim = kwargs['hid_dim']
z_dim = kwargs['z_dim']
def conv_block(in_channels, out_channels):
return nn.Sequential(
nn.Conv2d(in_channels, out_channels, 3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(),
nn.MaxPool2d(2)
)
encoder = nn.Sequential(
conv_block(x_dim[0], hid_dim),
conv_block(hid_dim, hid_dim),
conv_block(hid_dim, hid_dim),
conv_block(hid_dim, z_dim),
Flatten()
)
return ProtoNet(encoder)
def euclidean_dist(x, y):
"""
Computes euclidean distance btw x and y
Args:
x (torch.Tensor): shape (n, d). n usually n_way*n_query
y (torch.Tensor): shape (m, d). m usually n_way
Returns:
torch.Tensor: shape(n, m). For each query, the distances to each centroid
"""
n = x.size(0)
m = y.size(0)
d = x.size(1)
assert d == y.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
return torch.pow(x - y, 2).sum(2)
def extract_sample(n_way, n_support, n_query, datax, datay):
"""
Picks random sample of size n_support+ n_query, for n_way classes
Args:
n_way (int): number of classes in a classification task
n_support (int): number of labeled examples per class in the support set
n_query (int): number of labeled examples per class in the query set
datax (np.array): dataset of images
datay (np.array): dataset of labels
Returns: (dict) of:
(torch.Tensor): sample of images. Size (n_way, n_support+n_query, (dim))
(int): n_way
(int): n_support
(int): n_query
"""
sample = []
K = np.random.choice(np.unique(datay), n_way, replace=False)
for cls in K:
datax_cls = datax[datay == cls]
perm = np.random.permutation(datax_cls)
sample_cls = perm[:(n_support+n_query)]
sample.append(sample_cls)
sample = np.array(sample)
sample = torch.from_numpy(sample).float()
sample = sample.permute(0, 1, 4, 2, 3)
return {'images': sample, 'n_way': n_way, 'n_support': n_support, 'n_query': n_query}
def read_alphabets(alphabet_directory_path, alphabet_directory_name):
"""
Reads all the characters from a given alphabet_directory
"""
datax = []
datay = []
characters = os.listdir(alphabet_directory_path)
for character in characters:
images = os.listdir(alphabet_directory_path + character + '/')
for img in images:
image = cv2.resize(
cv2.imread(alphabet_directory_path + character + '/' + img),
(28,28)
)
# rotations of image
rotated_90 = ndimage.rotate(image, 90)
rotated_180 = ndimage.rotate(image, 180)
rotated_270 = ndimage.rotate(image, 270)
datax.extend((image, rotated_90, rotated_180, rotated_270))
datay.extend((
alphabet_directory_name + '_' + character + '_0',
alphabet_directory_name + '_' + character + '_90',
alphabet_directory_name + '_' + character + '_180',
alphabet_directory_name + '_' + character + '_270'
))
return np.array(datax), np.array(datay)
def read_images(base_directory):
"""
Reads all the alphabets from the base_directory
Uses multithreading to decrease the reading time drastically
"""
datax = None
datay = None
pool = mp.Pool(mp.cpu_count())
results = [pool.apply(read_alphabets,
args=(base_directory + '/' + directory + '/',
directory,)) for directory in os.listdir(base_directory)]
pool.close()
for result in results:
if datax is None:
datax = result[0]
datay = result[1]
else:
datax = np.vstack([datax, result[0]])
datay = np.concatenate([datay, result[1]])
return datax, datay
# train function
def train(model, optimizer, train_x, train_y, n_way, n_support, n_query, max_epoch, epoch_size):
"""
Trains the protonet
Args:
model
optimizer
train_x (np.array): images of training set
train_y(np.array): labels of training set
n_way (int): number of classes in a classification task
n_support (int): number of labeled examples per class in the support set
n_query (int): number of labeled examples per class in the query set
max_epoch (int): max epochs to train on
epoch_size (int): episodes per epoch
"""
# divide the learning rate by 2 at each epoch, as suggested in paper
scheduler = optim.lr_scheduler.StepLR(optimizer, 1, gamma=0.5, last_epoch=-1)
epoch = 0 # epochs done so far
stop = False # status to know when to stop
while epoch < max_epoch and not stop:
running_loss = 0.0
running_acc = 0.0
for i in range(epoch_size):
sample = extract_sample(n_way, n_support, n_query, train_x, train_y)
optimizer.zero_grad()
loss, output = model.set_forward_loss(sample)
running_loss += output['loss']
running_acc += output['acc']
loss.backward()
optimizer.step()
epoch_loss = running_loss / epoch_size
epoch_acc = running_acc / epoch_size
print('Epoch {:d} -- Loss: {:.4f} Acc: {:.4f}'.format(epoch+1,epoch_loss, epoch_acc))
epoch += 1
scheduler.step()
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument("--learning-rate", type=float, nargs="?", default=0.001)
parser.add_argument("--max-epochs", type=int, nargs="?", default=5)
parser.add_argument("--epoch-size", type=int, nargs="?", default=2000)
args = parser.parse_args()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
trainx, trainy = read_images('omniglot-py/images_background')
testx, testy = read_images('omniglot-py/images_evaluation')
model = load_protonet_conv(
x_dim=(3, 28, 28),
hid_dim=64,
z_dim=64,
)
optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
n_way = 60
n_support = 5
n_query = 5
train_x = trainx
train_y = trainy
max_epoch = args.max_epochs
epoch_size = args.epoch_size
train(model, optimizer, train_x, train_y, n_way, n_support, n_query, max_epoch, epoch_size)
| StarcoderdataPython |
3202607 | <reponame>kzenstratus/Finance<filename>stocks.py
from yahoo_finance import Share
from pprint import pprint
from datetime import datetime, timedelta
import numpy as np
import pandas as pd
import time
DATAFILE = "datafile.csv"
SYMBOLS = ['YHOO', 'GM', 'APPL','C','FB']
pd.DataFrame({'Symbols' : SYMBOLS}).to_csv("datafile.csv", index = False)
FREQ = 10.0 # In seconds
def getPrice (symbol):
tmp = Share(symbol).get_price()
if tmp: # nan control
return str(tmp)
return ""
def addOneColData():
""" input :
dt = 2d array with 1st column of symbols,
rest of the columns go by datetime -> price
"""
dt = pd.read_csv(DATAFILE)
dt[str(datetime.now())] = dt['Symbols'].apply(getPrice) # apply getPrice to all symbols
dt.to_csv(DATAFILE, index=False)
starttime=time.time()
while True:
addOneColData()
time.sleep(FREQ - ((time.time() - starttime) % FREQ)) | StarcoderdataPython |
1696048 | """Module defining decoders."""
from opennmt.decoders.decoder import Decoder
from opennmt.decoders.decoder import get_sampling_probability
from opennmt.decoders.rnn_decoder import AttentionalRNNDecoder
from opennmt.decoders.rnn_decoder import RNMTPlusDecoder
from opennmt.decoders.rnn_decoder import RNNDecoder
from opennmt.decoders.self_attention_decoder import SelfAttentionDecoder
| StarcoderdataPython |
1706735 | <filename>qutebrowser/fkd/gxeneralaj.py<gh_stars>10-100
#---------------------------------------------------------------------------------------------------
# Ĝeneralaj
#---------------------------------------------------------------------------------------------------
c.url.default_page = "~/.qutebrowser/index.html"
c.content.pdfjs = True
c.downloads.location.prompt = True
c.auto_save.session = True
c.scrolling.smooth = True
c.completion.height = "20%"
c.completion.cmd_history_max_items = -1
c.content.autoplay = False
c.tabs.background = True
c.tabs.new_position.unrelated = "next"
c.tabs.title.format = "{index}: {current_title}"
c.tabs.position = "top"
c.tabs.select_on_remove = "next"
c.content.desktop_capture = False
c.content.geolocation = False
c.content.mouse_lock = False
c.content.persistent_storage = False
c.content.register_protocol_handler = False
c.content.tls.certificate_errors = "ask"
c.completion.open_categories = ["history", "quickmarks", "bookmarks", "searchengines"]
c.zoom.default = "100%"
c.zoom.levels = ["25%", "33%", "50%", "60%", "70%", "80%", "90%", "100%", "110%", "125%", "150%", "175%", "200%", "250%", "300%", "400%", "500%"]
c.hints.chars = "aoeuidhtns"
with config.pattern('http://gigamonkeys.com/book/') as p:
p.content.images = False
| StarcoderdataPython |
132873 | class Solution(object):
def XXX(self, root, sum):
"""
:type root: TreeNode
:type sum: int
:rtype: bool
"""
count=0
ret=[]
def dfs(root,count):
if root:
count+=root.val
if not root.left and not root.right:
if count==sum:
ret.append(True)
return
else:
dfs(root.left,count)
dfs(root.right,count)
dfs(root,count)
ret.append(False)
return ret[0]
| StarcoderdataPython |
73545 | def insertion_sort(lst):
"""
Sorts list using insertion sort
:param lst: list of unsorted elements
:return comp: number of comparisons
"""
comp = 0
for i in range(1, len(lst)):
key = lst[i]
j = i - 1
cur_comp = 0
while j >= 0 and key < lst[j]:
lst[j + 1] = lst[j]
j -= 1
cur_comp += 1
comp += cur_comp
if cur_comp == 0:
comp += 1
lst[j + 1] = key
return comp
| StarcoderdataPython |
181351 | <filename>aispace/models/base_model.py
# !/usr/bin/env python
# coding=utf-8
# @Time : 2019-07-05 10:27
# @Author : <EMAIL>
# @File : base_model.py
from abc import ABCMeta, abstractmethod
import tensorflow as tf
from aispace.utils.hparams import Hparams
from aispace.utils.registry import Registry
__all__ = [
"BaseModel"
]
class BaseModel(tf.keras.Model, Registry):
__metaclass__ = ABCMeta
def __init__(self, hparams: Hparams, **kwargs):
super(BaseModel, self).__init__(**kwargs)
self._hparams = hparams
@abstractmethod
def call(self, inputs, training=None, mask=None):
raise NotImplementedError
@abstractmethod
def deploy(self):
raise NotImplementedError | StarcoderdataPython |
3379398 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from threading import Thread
from functools import wraps
def thread(func):
@wraps(func)
def wrap(*args, **kwargs):
return Thread(target=lambda: func(*args, **kwargs)).start()
return wrap
| StarcoderdataPython |
3301983 | <reponame>avwx-rest/account-backend<gh_stars>0
"""
Token management router
"""
from datetime import datetime, timedelta, timezone
from bson.objectid import ObjectId
from fastapi import APIRouter, Depends, HTTPException, Response
from account.models.token import (
AllTokenUsageOut,
Token,
TokenUpdate,
TokenUsage,
TokenUsageOut,
)
from account.models.user import User, UserToken
from account.util.current_user import current_user
router = APIRouter(prefix="/token", tags=["Token"])
@router.get("", response_model=list[Token])
async def get_user_tokens(user: User = Depends(current_user)):
"""Returns the current user's tokens"""
return user.tokens
@router.post("", response_model=Token)
async def new_token(user: User = Depends(current_user)):
"""Creates a new user token"""
token = await UserToken.new()
user.tokens.append(token)
await user.save()
return token
@router.get("/history", response_model=list[AllTokenUsageOut])
async def get_all_history(days: int = 30, user: User = Depends(current_user)):
"""Returns all recent token history"""
days_since = datetime.now(tz=timezone.utc) - timedelta(days=days)
data = (
await TokenUsage.find(
TokenUsage.user_id == ObjectId(user.id),
TokenUsage.date >= days_since,
)
.aggregate(
[
{"$project": {"_id": 0, "date": 1, "count": 1, "token_id": 1}},
{
"$group": {
"_id": "$token_id",
"days": {"$push": {"date": "$date", "count": "$count"}},
}
},
]
)
.to_list()
)
for i, item in enumerate(data):
data[i]["token_id"] = item["_id"]
del data[i]["_id"]
return data
@router.get("/{value}", response_model=Token)
async def get_token(value: str, user: User = Depends(current_user)):
"""Returns token details by string value"""
_, token = user.get_token(value)
if token is None:
raise HTTPException(404, f"Token with value {value} does not exist")
return token
@router.patch("/{value}", response_model=Token)
async def update_token(
value: str, update: TokenUpdate, user: User = Depends(current_user)
):
"""Updates token details by string value"""
i, token = user.get_token(value)
if token is None:
raise HTTPException(404, f"Token with value {value} does not exist")
token = token.copy(update=update.dict(exclude_unset=True))
user.tokens[i] = token
await user.save()
return token
@router.delete("/{value}")
async def delete_token(value: str, user: User = Depends(current_user)):
"""Deletes a token by string value"""
i, token = user.get_token(value)
if token is None:
raise HTTPException(404, f"Token with value {value} does not exist")
user.tokens.pop(i)
await user.save()
return Response(status_code=204)
@router.post("/{value}/refresh", response_model=Token)
async def refresh_token(value: str, user: User = Depends(current_user)):
"""Refreshes token value by string value"""
i, token = user.get_token(value)
if token is None:
raise HTTPException(404, f"Token with value {value} does not exist")
await user.tokens[i].refresh()
await user.save()
return user.tokens[i]
@router.get("/{value}/history", response_model=list[TokenUsageOut])
async def get_token_history(
value: str, days: int = 30, user: User = Depends(current_user)
):
"""Return a token's usage history"""
_, token = user.get_token(value)
if token is None:
raise HTTPException(404, f"Token with value {value} does not exist")
days_since = datetime.now(tz=timezone.utc) - timedelta(days=days)
return await TokenUsage.find(
TokenUsage.token_id == ObjectId(token.id), TokenUsage.date >= days_since
).to_list()
| StarcoderdataPython |
53513 | """
bot implementation.
"""
import os
import boto3
from botocore.exceptions import ClientError
import sendgrid
import ciscospark
# Sets config values from the config file
ACCESS_TOKEN_SPARK = "Bearer " + os.environ['access_token_spark']
MYSELF = os.environ['my_person_id']
SENDGRID_API_TOKEN = os.environ['sendgrid_api_token']
AWS_REGION = "us-west-2"
CHARSET = "UTF-8"
SENDER = '<EMAIL>'
SENDER_NAME = 'boomerang'
def mask_email(email):
"""
masks important part of email
"""
at_index = email.find('@')
email_substring_to_mask = email[1:at_index]
masked_email = email.replace(
email_substring_to_mask, '*' * len(email_substring_to_mask))
return masked_email
def send_email(subject, plaintext_email, recipient):
"""
sends email via Sendgrid
"""
sendgrid.send_email(SENDGRID_API_TOKEN, SENDER_NAME,
SENDER, recipient, subject, plaintext_email)
def send_email_ses(subject, plaintext_email, recipient):
"""
sends email via SES
"""
# Create a new SES resource and specify a region.
client = boto3.client('ses', region_name=AWS_REGION)
# Try to send the email.
try:
#Provide the contents of the email.
response = client.send_email(
Destination={
'ToAddresses': [
recipient,
],
},
Message={
'Body': {
'Text': {
'Charset': CHARSET,
'Data': plaintext_email,
},
},
'Subject': {
'Charset': CHARSET,
'Data': subject,
},
},
Source=SENDER,
)
# Display an error if something goes wrong.
except ClientError as e:
print e.response['Error']['Message']
else:
print "Email sent! Message ID:",
print response['ResponseMetadata']['RequestId']
def handler(event, context):
"""
boomerang
"""
# print "Event is {0}".format(event)
person_email = None
try:
room_id = event['data']['roomId']
message_id = event['data']['id']
person_id = event['data']['personId']
person_email = event['data']['personEmail']
print "Consumer: {}".format(person_email)
except KeyError as error:
print "Duh - key error %r" % error
return False
if person_id == MYSELF:
return False
if person_email is None:
return False
message = ciscospark.get_message(ACCESS_TOKEN_SPARK, message_id)
user_message = message.get('text', "None")
# print "Message: {}".format(user_message)
if user_message is None:
return False
if user_message.lower().startswith('boomerang'):
user_message = user_message[9:]
# print "Query (final): {}".format(user_message)
if "help" in user_message[:6].lower():
ciscospark.post_message_rich(
ACCESS_TOKEN_SPARK, room_id, "Supported commands: help, or just add your note")
return True
subject = 'boomerang: {}...'.format(user_message[:30])
# print 'subject: {}'.format(subject)
# print 'body: {}'.format(user_message)
send_email(subject, user_message, person_email)
masked_email = mask_email(person_email)
ciscospark.post_message_rich(
ACCESS_TOKEN_SPARK, room_id, 'boom...the message is on it\'s way to ``{}``'.format(masked_email))
return True
| StarcoderdataPython |
3273479 | <reponame>codefair2019/VarNotWar
class Artist:
def __init__(self, id, name, genre, desc, albums, inactive):
self.id = id
self.name = name
self.genre = genre
self.desc = desc
self.albums = albums
self.inactive = inactive
def __str__(self):
return str(self.id) + ", " + self.name
| StarcoderdataPython |
1791561 | #!/usr/bin/env python
import sys
from . import consolekit as ck
from .get_args import get_args
from .path_to_x import path_to_filename, path_to_text
from . import pygments_util
from . import shakyo
from . import text_to_lines
from . import log
def get_example_lines(example_path,
example_text,
console,
*,
lexer_name,
style_name,
colorize,
decorate):
return text_to_lines.text_to_lines(
example_text,
console,
lexer=pygments_util.guess_lexer(
lexer_name=lexer_name,
filename=path_to_filename(example_path),
text=example_text),
style_name=style_name,
colorize=colorize,
decorate=decorate)
def main():
args = get_args()
if not sys.stdout.isatty(): log.error("stdout is not a tty.")
example_text = path_to_text(args.example_path)
with ck.Console(asciize=args.asciize,
spaces_per_tab=args.spaces_per_tab,
background_rgb=args.background_rgb) as console:
shakyo.Shakyo(console,
get_example_lines(args.example_path,
example_text,
console,
lexer_name=args.lexer_name,
style_name=args.style_name,
colorize=args.colorize,
decorate=args.decorate)).do()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1628079 | <filename>infotv_test/tests/test_deck.py
import json
import pytest
from django.test.client import RequestFactory
from django.test.utils import override_settings
from django.utils.encoding import force_str
from infotv.views import InfoTvView
EXAMPLE_DECK_DATA = {
"decks": {
"default": [
{
"duration": 1,
"content": "# test",
"type": "text",
"id": "s24t7h1n0q"
},
{
"duration": 1,
"src": "https://placehold.it/304x220",
"type": "image",
"id": "s2534m3sqo"
},
{
"duration": 1,
"type": "nownext",
"id": "s2533iqgbo"
}
],
"testdeck": [
{
"type": "text",
"duration": 1,
"id": "s29nhihhe8",
"content": "slide in testdeck"
}
]
},
"eep": None
}
def get_deck_post_request():
return RequestFactory().post("/", {"action": "post_deck", "data": json.dumps(EXAMPLE_DECK_DATA)})
@pytest.mark.django_db
def test_post_deck(rf, settings):
settings.INFOTV_POLICY_CLASS = "infotv.policy.AnythingGoesPolicy"
request = get_deck_post_request()
last_deck_id = 0
for x in range(3):
response = InfoTvView.as_view()(request=request, event="dsfargeg")
assert response.status_code == 200
deck_id = json.loads(force_str(response.content))["id"]
assert deck_id > last_deck_id
last_deck_id = deck_id
response = InfoTvView.as_view()(request=rf.get("/", {"action": "get_deck"}), event="dsfargeg")
deck_data = json.loads(force_str(response.content))
assert deck_data["id"] == last_deck_id
assert deck_data["data"] == EXAMPLE_DECK_DATA
@pytest.mark.django_db
def test_get_bogus_event_deck(rf):
response = InfoTvView.as_view()(request=rf.get("/", {"action": "get_deck"}), event="dkfjstwr4iunm")
assert json.loads(force_str(response.content))["id"] == "missing"
@pytest.mark.django_db
def test_post_deck_auth():
request = get_deck_post_request()
with override_settings(INFOTV_POLICY_CLASS="infotv.policy.BasePolicy"):
response = InfoTvView.as_view()(request, event="dsfargeg")
assert response.status_code == 401
| StarcoderdataPython |
1722050 | import logging
from flask import jsonify
from flask import render_template
from flask import request
import config
logger = logging.getLogger(__name__)
app = config.app
@app.route('/')
def index():
return render_template('index.html')
@app.route('/controller/')
def controller():
return render_template('controller.html')
def run():
app.run(host=config.WEB_ADDRESS, port=config.WEB_PORT, threaded=True)
| StarcoderdataPython |
1607999 | #!/usr/bin/python
#
# Copyright 2012 Software Freedom Conservancy
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import platform
import signal
import subprocess
import time
from selenium.common.exceptions import WebDriverException
from selenium.webdriver.common import utils
class Service(object):
"""
Object that manages the starting and stopping of PhantomJS / Ghostdriver
"""
def __init__(self, executable_path, port=0, service_args=None, log_path=None):
"""
Creates a new instance of the Service
:Args:
- executable_path : Path to PhantomJS binary
- port : Port the service is running on
- service_args : A List of other command line options to pass to PhantomJS
- log_path: Path for PhantomJS service to log to
"""
self.port = port
self.path = executable_path
self.service_args= service_args
if self.port == 0:
self.port = utils.free_port()
if self.service_args is None:
self.service_args = []
else:
self.service_args=service_args[:]
self.service_args.insert(0, self.path)
self.service_args.append("--webdriver=%d" % self.port)
self.process = None
if not log_path:
log_path = "ghostdriver.log"
self._log = open(log_path, 'w')
def __del__(self):
# subprocess.Popen doesn't send signal on __del__;
# we have to try to stop the launched process.
self.stop()
def start(self):
"""
Starts PhantomJS with GhostDriver.
:Exceptions:
- WebDriverException : Raised either when it can't start the service
or when it can't connect to the service
"""
try:
self.process = subprocess.Popen(self.service_args, stdin=subprocess.PIPE,
close_fds=platform.system() != 'Windows',
stdout=self._log, stderr=self._log)
except Exception as e:
raise WebDriverException("Unable to start phantomjs with ghostdriver.", e)
count = 0
while not utils.is_connectable(self.port):
count += 1
time.sleep(1)
if count == 30:
raise WebDriverException("Can not connect to GhostDriver")
@property
def service_url(self):
"""
Gets the url of the GhostDriver Service
"""
return "http://localhost:%d/wd/hub" % self.port
def stop(self):
"""
Cleans up the process
"""
if self._log:
self._log.close()
self._log = None
#If its dead dont worry
if self.process is None:
return
#Tell the Server to properly die in case
try:
if self.process:
self.process.stdin.close()
self.process.send_signal(signal.SIGTERM)
self.process.wait()
except OSError:
# kill may not be available under windows environment
pass
| StarcoderdataPython |
1621526 | <filename>mayan/apps/mime_types/backends/file_command.py
from shutil import copyfileobj
import sh
from django.utils.translation import ugettext_lazy as _
from mayan.apps.dependencies.exceptions import DependenciesException
from mayan.apps.storage.utils import NamedTemporaryFile
from ..classes import MIMETypeBackend
from .literals import DEFAULT_FILE_PATH
class MIMETypeBackendFileCommand(MIMETypeBackend):
def _init(self, copy_length=None, file_path=None):
self.file_path = file_path or DEFAULT_FILE_PATH
self.copy_length = copy_length
try:
self.command_file = sh.Command(path=self.file_path).bake(
brief=True, mime_type=True
)
except sh.CommandNotFound:
raise DependenciesException(
_('file command not installed or not found.')
)
def _get_mime_type(self, file_object, mime_type_only):
with NamedTemporaryFile() as temporary_file_object:
file_object.seek(0)
copyfileobj(
fsrc=file_object, fdst=temporary_file_object,
length=self.copy_length
)
file_object.seek(0)
temporary_file_object.seek(0)
output = self.command_file(
temporary_file_object.name, mime_encoding=not mime_type_only
).split(';')
file_mime_type = output[0]
if mime_type_only:
file_mime_encoding = 'binary'
else:
file_mime_encoding = output[1]
return (file_mime_type, file_mime_encoding)
| StarcoderdataPython |
98352 | <gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (C) 2020 CERN.
#
# Invenio-Records-Resources is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see LICENSE file for more
# details.
"""Facets parameter interpreter API."""
import itertools
import operator
from elasticsearch_dsl import Q
from .base import ParamInterpreter
class FacetsParam(ParamInterpreter):
"""Evaluate facets."""
def iter_aggs_options(self, options):
"""Iterate over aggregation options."""
return options.get("aggs", {}).items()
def apply(self, identity, search, params):
"""Evaluate the query str on the search."""
options = self.config.facets_options
# Apply aggregations
for name, agg in self.iter_aggs_options(options):
# `aggs[]=` mutates `self.search`
search.aggs[name] = agg if not callable(agg) else agg()
# Apply post filters
facets_args = params.pop("facets", {})
post_filters = options.get("post_filters", {})
# List of term queries of all the requested facets
queries = []
# Iterating the intersection of facets_args and post_filter keys
# to avoid key error and invalid facets injection in the request.
for k in set(facets_args.keys()) & set(post_filters.keys()):
filter_factory = post_filters[k]
values = facets_args[k]
queries.append(filter_factory(values))
params[k] = values
if queries:
final_query = list(itertools.accumulate(queries, operator.or_))[-1]
search = search.post_filter(final_query)
return search
| StarcoderdataPython |
1720545 | <gh_stars>0
# encoding: utf-8
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class SecurityConfig(AppConfig):
"""
'handlers': {
...
'workon_security_disallowed_hosts': {
'level': 'ERROR',
'class': 'workon.contrib.security.handler.DisallowedHostHandler',
}
},
'loggers': {
...
'django.security.DisallowedHost': {
'handlers': ['workon_security_disallowed_hosts'],
'level': 'ERROR',
'propagate': False,
}
},
"""
name = 'workon.contrib.security'
label = 'workon_security'
verbose_name = _(u"Securité")
def ready(self):
from . import models
# logger = logging.getLogger('django.security.DisallowedHost')
# logger.setLevel(logging.ERROR)
# handler = DisallowedHostHandler()
# handler.setLevel(logging.ERROR)
# logger.addHandler(handler)
# # handlers 'spoof_logfile': {
# # 'level': 'ERROR',
# # 'class': 'logging.FileHandler',
# # 'filename': '/path/to/spoofed_requests.log',
# # },
# # loggers 'django.security.DisallowedHost': {
# # 'handlers': ['spoof_logfile'],
# # 'level': 'ERROR',
# # 'propagate': False,
# # }, | StarcoderdataPython |
59837 | <reponame>dhilipsiva/talks<filename>assets/2019-11-30/app.py
from flask import Flask, request
from flask_opentracing import FlaskTracer
from utils import get_config
from github_pb2 import Request
from gist_client import gist_stub
from repo_client import repo_stub
from account_client import account_stub
from commondb_client import commondb_stub
app = Flask(__name__)
@app.route("/")
def index():
return "Hello, World!"
@app.route("/account")
def account():
request_id = request.args.get('request_id')
req = Request(request_id=request_id)
commondb_stub.GetCommonData(req)
reply = account_stub.GetUserName(req)
return reply.msg
@app.route("/repo")
def repo():
request_id = request.args.get('request_id')
req = Request(request_id=request_id)
commondb_stub.GetCommonData(req)
reply = repo_stub.GetLatestCommit(req)
return reply.msg
@app.route("/gist")
def gist():
request_id = request.args.get('request_id')
req = Request(request_id=request_id)
commondb_stub.GetCommonData(req)
reply = gist_stub.GetPublicGist(req)
return reply.msg
def initialize_tracer():
config = get_config('api')
return config.initialize_tracer()
flask_tracer = FlaskTracer(initialize_tracer, True, app)
| StarcoderdataPython |
3398338 | <filename>batchglm/train/numpy/base_glm/vars.py
import dask.array
import numpy as np
import scipy.sparse
import abc
class ModelVarsGlm:
"""
Build variables to be optimzed and their constraints.
"""
constraints_loc: np.ndarray
constraints_scale: np.ndarray
params: np.ndarray
a_var: np.ndarray
b_var: np.ndarray
converged: np.ndarray
npar_a: int
dtype: str
n_features: int
def __init__(
self,
init_a: np.ndarray,
init_b: np.ndarray,
constraints_loc: np.ndarray,
constraints_scale: np.ndarray,
chunk_size_genes: int,
dtype: str
):
"""
:param init_a: nd.array (mean model size x features)
Initialisation for all parameters of mean model.
:param init_b: nd.array (dispersion model size x features)
Initialisation for all parameters of dispersion model.
:param dtype: Precision used in tensorflow.
"""
self.constraints_loc = np.asarray(constraints_loc, dtype)
self.constraints_scale = np.asarray(constraints_scale, dtype)
init_a_clipped = self.np_clip_param(np.asarray(init_a, dtype=dtype), "a_var")
init_b_clipped = self.np_clip_param(np.asarray(init_b, dtype=dtype), "b_var")
self.params = dask.array.from_array(np.concatenate(
[
init_a_clipped,
init_b_clipped,
],
axis=0
), chunks=(1000, chunk_size_genes))
self.npar_a = init_a_clipped.shape[0]
# Properties to follow gene-wise convergence.
self.converged = np.repeat(a=False, repeats=self.params.shape[1]) # Initialise to non-converged.
self.dtype = dtype
self.n_features = self.params.shape[1]
self.idx_train_loc = np.arange(0, init_a.shape[0])
self.idx_train_scale = np.arange(init_a.shape[0], init_a.shape[0] + init_b.shape[0])
@property
def idx_not_converged(self):
return np.where(np.logical_not(self.converged))[0]
@property
def a_var(self):
a_var = self.params[0:self.npar_a]
return self.np_clip_param(a_var, "a_var")
@a_var.setter
def a_var(self, value):
# Threshold new entry:
value = self.np_clip_param(value, "a_var")
# Write either new dask array or into numpy array:
if isinstance(self.params, dask.array.core.Array):
temp = self.params.compute()
temp[0:self.npar_a] = value
self.params = dask.array.from_array(temp, chunks=self.params.chunksize)
else:
self.params[0:self.npar_a] = value
@property
def b_var(self):
b_var = self.params[self.npar_a:]
return self.np_clip_param(b_var, "b_var")
@b_var.setter
def b_var(self, value):
# Threshold new entry:
value = self.np_clip_param(value, "b_var")
# Write either new dask array or into numpy array:
if isinstance(self.params, dask.array.core.Array):
temp = self.params.compute()
temp[self.npar_a:] = value
self.params = dask.array.from_array(temp, chunks=self.params.chunksize)
else:
self.params[self.npar_a:] = value
def b_var_j_setter(self, value, j):
# Threshold new entry:
value = self.np_clip_param(value, "b_var")
# Write either new dask array or into numpy array:
if isinstance(self.params, dask.array.core.Array):
temp = self.params.compute()
temp[self.npar_a:, j] = value
self.params = dask.array.from_array(temp, chunks=self.params.chunksize)
else:
self.params[self.npar_a:, j] = value
@abc.abstractmethod
def param_bounds(self, dtype):
pass
| StarcoderdataPython |
1615989 | #!/usr/bin/env python3
"""
Author : patarajarina
Date : 2019-02-11
Purpose: Rock the Casbah
"""
import os
import sys
# --------------------------------------------------
def main():
args = sys.argv[1:]
if len(args) != 1:
print('Usage: {} NUM'.format(os.path.basename(sys.argv[0])))
sys.exit(1)
number = int(args[0])
if number not in range(2,9):
print('NUM ({}) must be between 1 and 9'.format(number))
sys.exit(1)
# sq = number**2
# sq_ls = range(1,sq+1)
#
# for numbers in sq_ls:
# print('{:3}'.format(numbers), end='')
# if numbers % number == 0:
# print('')
for j in range(1,number*number+1):
print(' '.join(str(j)), \n if j%number == 0)
# print('{} '.format(i+1), end=' ')
# --------------------------------------------------
main()
| StarcoderdataPython |
121996 | import tensorflow as tf
from absl import flags
from absl import app
from absl import logging
from tokenization import FullTokenizer
from tokenization_en import load_subword_vocab
from transformer import Transformer, FileConfig
FLAGS = flags.FLAGS
MODEL_DIR = "/Users/livingmagic/Documents/deeplearning/models/bert-nmt/zh-en_bert-tf2_L6-D256/"
flags.DEFINE_string("bert_config_file", MODEL_DIR + "bert_config.json", "The bert config file")
flags.DEFINE_string("bert_vocab_file", MODEL_DIR + "vocab.txt",
"The vocabulary file that the BERT model was trained on.")
flags.DEFINE_string("init_checkpoint", MODEL_DIR + "bert_nmt_ckpt", "")
flags.DEFINE_string("config_file", MODEL_DIR + "config.json", "The transformer config file except bert")
flags.DEFINE_string("vocab_file", MODEL_DIR + "vocab_en", "The english vocabulary file")
flags.DEFINE_integer("max_seq_length", 128, "Max length to sequence length")
flags.DEFINE_string("inp_sentence", None, "")
def create_padding_mask(seq):
seq = tf.cast(tf.math.equal(seq, 0), tf.float32)
# add extra dimensions so that we can add the padding
# to the attention logits.
return seq[:, tf.newaxis, tf.newaxis, :] # (batch_size, 1, 1, seq_len)
def create_look_ahead_mask(size):
"""
The look-ahead mask is used to mask the future tokens in a sequence.
In other words, the mask indicates which entries should not be used.
"""
mask = 1 - tf.linalg.band_part(tf.ones((size, size)), -1, 0)
return mask # (seq_len, seq_len)
def create_masks(inp, tar):
# Used in the 2nd attention block in the decoder.
# This padding mask is used to mask the encoder outputs.
dec_padding_mask = create_padding_mask(inp)
# Used in the 1st attention block in the decoder.
# It is used to pad and mask future tokens in the input received by
# the decoder.
look_ahead_mask = create_look_ahead_mask(tf.shape(tar)[1])
dec_target_padding_mask = create_padding_mask(tar)
combined_mask = tf.maximum(dec_target_padding_mask, look_ahead_mask)
return combined_mask, dec_padding_mask
def encode_zh(tokenizer_zh, zh):
tokens_zh = tokenizer_zh.tokenize(zh)
lang1 = tokenizer_zh.convert_tokens_to_ids(['[CLS]'] + tokens_zh + ['[SEP]'])
return lang1
def evaluate(transformer,
tokenizer_zh,
tokenizer_en,
inp_sentence,
max_seq_length):
# normalize input sentence
inp_sentence = encode_zh(tokenizer_zh, inp_sentence)
encoder_input = tf.expand_dims(inp_sentence, 0)
# as the target is english, the first word to the transformer should be the
# english start token.
decoder_input = [tokenizer_en.vocab_size]
output = tf.expand_dims(decoder_input, 0)
for i in range(max_seq_length):
combined_mask, dec_padding_mask = create_masks(
encoder_input, output)
# predictions.shape == (batch_size, seq_len, vocab_size)
predictions, attention_weights = transformer(encoder_input,
output,
False,
combined_mask,
dec_padding_mask)
# select the last word from the seq_len dimension
predictions = predictions[:, -1:, :] # (batch_size, 1, vocab_size)
predicted_id = tf.cast(tf.argmax(predictions, axis=-1), tf.int32)
# return the result if the predicted_id is equal to the end token
if tf.equal(predicted_id, tokenizer_en.vocab_size + 1):
return tf.squeeze(output, axis=0), attention_weights
# concatentate the predicted_id to the output which is given to the decoder
# as its input.
output = tf.concat([output, predicted_id], axis=-1)
return tf.squeeze(output, axis=0), attention_weights
def main(_):
tokenizer_zh = FullTokenizer(
vocab_file=FLAGS.bert_vocab_file, do_lower_case=True)
tokenizer_en = load_subword_vocab(FLAGS.vocab_file)
target_vocab_size = tokenizer_en.vocab_size + 2
config = FileConfig(FLAGS.config_file)
transformer = Transformer(config=config,
target_vocab_size=target_vocab_size,
bert_config_file=FLAGS.bert_config_file)
inp = tf.random.uniform((1, FLAGS.max_seq_length))
tar_inp = tf.random.uniform((1, FLAGS.max_seq_length))
fn_out, _ = transformer(inp, tar_inp,
True,
look_ahead_mask=None,
dec_padding_mask=None)
transformer.load_weights(FLAGS.init_checkpoint)
print(transformer.encoder.weights[0])
result, _ = evaluate(transformer,
tokenizer_zh,
tokenizer_en,
FLAGS.inp_sentence,
FLAGS.max_seq_length)
predicted_sentence = tokenizer_en.decode([i for i in result
if i < tokenizer_en.vocab_size])
print('Input: {}'.format(FLAGS.inp_sentence))
print('Predicted translation: {}'.format(predicted_sentence))
if __name__ == "__main__":
flags.mark_flag_as_required("inp_sentence")
app.run(main)
| StarcoderdataPython |
1636055 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('timeslots', '0003_auto_20141102_0853'),
('common', '0002_report_last_sent'),
]
operations = [
migrations.AddField(
model_name='user',
name='constraints',
field=models.ManyToManyField(related_name='+', to='timeslots.Constraint'),
preserve_default=True,
),
]
| StarcoderdataPython |
1653245 | <gh_stars>0
import torch
import torch.nn as nn
import torch.nn.functional as F
__all__ = ['mymodel51']
class FocalLoss(nn.Module):
def __init__(self, alpha=1, gamma=0):
super(FocalLoss, self).__init__()
self.gamma = gamma
self.alpha = alpha
def get_attention(self, input, target):
prob = F.softmax(input, dim=-1)
prob = prob[range(target.shape[0]), target]
prob = 1 - prob
prob = prob ** self.gamma
return prob
def get_celoss(self, input, target):
ce_loss = F.log_softmax(input, dim=1)
ce_loss = -ce_loss[range(target.shape[0]), target]
return ce_loss
def forward(self, input, target):
attn = self.get_attention(input, target)
ce_loss = self.get_celoss(input, target)
loss = self.alpha * ce_loss * attn
return loss.mean()
floss1 = FocalLoss(alpha=0.25, gamma=2)
class AttnLoss(nn.Module):
def __init__(self, alpha=1):
super(AttnLoss, self).__init__()
self.alpha = alpha
def forward(self, input, target):
loss = torch.log(input)
loss = loss[range(target.shape[0]), target]
return loss.mean()
aloss1 = AttnLoss(alpha=0.25)
def get_focalloss(output_dict, gt_labels, **kwargs):
return floss1(output_dict['logits'], gt_labels.long()) + aloss1(output_dict['attn'], gt_labels.long())
| StarcoderdataPython |
1634044 | <reponame>kaicLimaOliveira/Task-app<gh_stars>0
from flask import Flask, Blueprint
from routers import userRoutes
from routers import pagesRoutes
app = Flask(__name__)
app.register_blueprint(pagesRoutes.pages)
app.register_blueprint(userRoutes.user)
@app.template_filter()
def pretty_date(dttm):
return dttm.strftime("%m/%d/%Y")
@app.template_filter()
def length(l):
return len(l)
if __name__ == '__main__':
app.run(host='localhost', port=4444, debug=True)
| StarcoderdataPython |
3334724 | """ Helpers for getting the locations of places """
__author__ = "<NAME>"
__copyright__ = "Copyright (c) 2016 Black Radley Limited."
import re # for regular expressions
import urllib # for url encoding
import urllib2 # for getting the gear from Wikipedia
import string
from random import randint
from time import sleep
data = None
headers = { 'User-Agent' : 'HeathMynd (+http://www.blackradley.com/contact-us/)' }
def get_wikipedia_location(wikipedia_link):
""" Get the location (lat, lng) from the Wikipedia page if it is available """
wikipedia_link = urllib.quote_plus(wikipedia_link)
kml_url = 'http://toolserver.org/~para/cgi-bin/kmlexport?article=' + wikipedia_link
kml_request = urllib2.Request(kml_url, data, headers)
kml_response = urllib2.urlopen(kml_request)
kml = kml_response.read()
coordinates = re.search('(?<=<coordinates>)(.*?)(?=</coordinates>)', kml) # <coordinates>-0.46408,52.13607,0</coordinates>
lat = lng = 0.0
if coordinates != None:
lat = string.split(coordinates.group(), ',')[1]
lng = string.split(coordinates.group(), ',')[0]
lat = float(lat)
lng = float(lng)
return {'lat': lat, 'lng': lng}
def get_google_location(name, county):
""" Get the location (lat, lng) using a Google search """
# e.g. https://www.google.co.uk/search?q=A+La+Ronde,+Devon&num=1&hl=en&start=0&cr=countryUK%7CcountryGB
query = name + ", " + county
html_url = "http://www.google.com/search?q=%s&num=1&hl=en&start=0&cr=countryUK|countryGB" % (urllib.quote_plus(query))
sleep(randint(2, 10)) # delay request so it doesn't look like an attack
html_request = urllib2.Request(html_url, data, headers)
html_response = urllib2.urlopen(html_request)
html = html_response.read()
location = re.search('ll=\d+\.\d+,(-|)\d+\.\d+', html)
if location is None: # then Google Search doesn't have a location
location = [0.0,0.0]
else:
location = location.group(0)
location = location[3:].split(',')
location = [float(i) for i in location]
return location
def __uk_boundry_box():
""" Return a string representing a boundary box around most of UK """
# SV00 in the OS Grid (the origin)
south_west_lat = 49.766807
south_west_lng = -7.557160
# Somewhere in the North Sea
north_east_lat = 56.474628
north_east_lng = 3.493652
return str(south_west_lat) + ',' + str(south_west_lng) + '|' + str(north_east_lat) + ',' + str(north_east_lng)
| StarcoderdataPython |
191049 | <reponame>hwangyoungjae/hwangyoungjae.github.io
# asyncio_lock.py
import asyncio
import functools
def unlock(lock: asyncio.Lock):
print("callback releasing lock")
lock.release()
async def coro1(lock: asyncio.Lock):
print("coro1 waiting for the lock")
async with lock:
print("coro1 acquired lock")
print("coro1 released lock")
async def coro2(lock: asyncio.Lock):
print("coro2 waiting for the lock")
await lock.acquire()
try:
print("coro2 acquired lock")
finally:
print("coro2 released lock")
lock.release()
async def main(loop: asyncio.AbstractEventLoop):
# 공유 락의 생성과 획득
lock = asyncio.Lock()
print("acquiring the lock before starting coroutines")
await lock.acquire()
print(f"lock acquired: {lock.locked()}")
# 락을 해제하기 위한 콜백 예약
loop.call_later(0.1, functools.partial(unlock, lock))
# 락을 사용하려는 코루틴을 실행
print("waiting for coroutines")
await asyncio.wait([coro1(lock), coro2(lock)])
event_loop = asyncio.get_event_loop()
try:
event_loop.run_until_complete(main(event_loop))
finally:
event_loop.close()
| StarcoderdataPython |
3311071 | <filename>carnival/utils.py<gh_stars>1-10
import typing
import os
def envvar(varname: str) -> str:
"""
Получить переменную из окружения
Замена context_ref для carnival v3
:raises: ValueError если переменной в окружении нет
"""
if varname not in os.environ:
raise ValueError(f"{varname} is not persent in environment")
return os.environ[varname]
def get_class_full_name(klass: typing.Type[typing.Any]) -> str:
from carnival.cli import carnival_tasks_module
klass_name = klass.__name__
klass_mod = klass.__module__
if klass_mod == "":
return klass_name
task_full_name = f"{klass_mod}.{klass_name}"
if task_full_name.startswith(f"{carnival_tasks_module}."):
task_full_name = task_full_name[len(carnival_tasks_module) + 1:]
return task_full_name
| StarcoderdataPython |
149102 | import math
def adição(x, y):
return x+y
def subtração(x, y):
return x-y
def multiplicação(x, y):
return x*y
def divisão(x, y):
return x/y
def potencia(x, y):
return x**y
def raiz(x, y):
return math.sqrt(x)
print("\n***** Python Calculator *****")
print('Escolha uma operação (1/2/3/4/5/6):\n')
print('1 - Soma')
print('2 - Subtração')
print('3 - Multiplicação')
print('4 - Divisão')
print('5 - Potência')
print('6 - Raiz quadrada')
operacao = input('Digite sua opção: 1/2/3/4/5/6\n')
num1 = float(input('Insira um número: '))
num2 = float(input('Insira um segundo número: '))
if operacao == '1':
print(num1, '+', num2, '=', adição(num1, num2))
elif operacao == '2':
print(num1, '-', num2, '=', subtração(num1, num2))
elif operacao == '3':
print(num1, 'x', num2, '=', multiplicação(num1, num2))
elif operacao == '4':
print(num1, '/', num2, '=', divisão(num1, num2))
elif operacao == '5':
print(potencia(num1, num2))
else:
print(raiz(num1, num2))
| StarcoderdataPython |
3289201 | <gh_stars>1-10
# users/app/api/utils/__init__.py
| StarcoderdataPython |
195861 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
@File : remove_invalid_question.py
@Time : 2021/1/26 下午10:43
@Author : <NAME>
@Contact : <EMAIL>
"""
import os
import json
import linecache
import ast
from tqdm import tqdm
import logging
logging.basicConfig(
format='%(asctime)s | %(levelname)s | %(name)s | %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
level=logging.DEBUG,
)
logger = logging.getLogger("reformat")
base_dir = "/data1/data-xlx/semeval21_task4/SemEval2021-Task4"
# input_file = os.path.join(base_dir, "training_data/Task_2_train_bktrans_google.jsonl")
# input_file = os.path.join(base_dir, "training_data/Task_2_dev_bktrans_google.jsonl")
input_file = os.path.join(base_dir, "trail_data/Task_2_Nonspecificity_trans_google.jsonl")
# output_file = os.path.join(base_dir, "training_data/Task_2_train_bktrans_google_valid.jsonl")
# output_file = os.path.join(base_dir, "training_data/Task_2_dev_bktrans_google_valid.jsonl")
output_file = os.path.join(base_dir, "trail_data/Task_2_Nonspecificity_trans_google_valid.jsonl")
examples = []
with open(input_file, 'r') as fin:
for (i, line) in tqdm(enumerate(fin), desc="Loading: "):
record = json.loads(line.strip())
bktrans_article = record['bk_article']
if bktrans_article is None or len(bktrans_article) == 0:
print(i)
continue
original_qst = record['question']
bktrans_qst = record['bk_question']
if "@placeholder" in original_qst and "@placeholder" in bktrans_qst:
examples.append(record)
logger.info("Get original bktrans examples = [{}]".format(len(linecache.getlines(input_file))))
logger.info("Get valid bktrans examples = [{}]".format(len(examples)))
logger.info("Write valid bktrans examples to [{}]".format(output_file))
with open(output_file, 'w') as fout:
for i, example in tqdm(enumerate(examples), desc="Writing: "):
fout.write("{}\n".format(json.dumps(example)))
logger.info("Done.")
| StarcoderdataPython |
3287907 | <filename>opengl/gl/raw/gl_1_3.py
#BEWARE: automatically generated code
#This code was generated by /generate/__main__.py
from opengl.gl.raw.bindings import *
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def active_texture(texture):
'''
select active texture unit.
gl.active_texture selects which texture unit subsequent texture state calls
will affect. The number of texture units an implementation supports is
implementation dependent, but must be at least 80.
Args:
texture: which texture unit to make active.
'''
@accepts(t.float, t.boolean)
@returns(t.void)
@binds(dll)
def sample_coverage(value, invert):
'''
specify multisample coverage parameters.
Args:
value: a single floating-point sample coverage value.
invert: a single boolean value representing if the coverage masks should
be inverted.
'''
@accepts(t.enum, t.int, t.enum, t.sizei, t.sizei, t.sizei, t.int, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def compressed_tex_image3_d(target, level, internalformat, width, height, depth, border, imagesize, data):
'''
specify a three-dimensional texture image in a compressed format.
Args:
target: the target texture.
level: the level-of-detail number.
internalformat: the format of the compressed image data stored at
address data.
width: the width of the texture image.
height: the height of the texture image.
depth: the depth of the texture image.
border: this value must be 0.
imagesize: the number of unsigned bytes of image data starting at the
address specified by data.
data: a pointer to the compressed image data in memory.
'''
@accepts(t.enum, t.int, t.enum, t.sizei, t.sizei, t.int, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def compressed_tex_image2_d(target, level, internalformat, width, height, border, imagesize, data):
'''
specify a two-dimensional texture image in a compressed format.
Args:
target: the target texture.
level: the level-of-detail number.
internalformat: the format of the compressed image data stored at
address data.
width: the width of the texture image.
height: the height of the texture image.
border: this value must be 0.
imagesize: the number of unsigned bytes of image data starting at the
address specified by data.
data: a pointer to the compressed image data in memory.
'''
@accepts(t.enum, t.int, t.enum, t.sizei, t.int, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def compressed_tex_image1_d(target, level, internalformat, width, border, imagesize, data):
'''
specify a one-dimensional texture image in a compressed format.
Args:
target: the target texture.
level: the level-of-detail number.
internalformat: the format of the compressed image data stored at
address data.
width: the width of the texture image.
border: this value must be 0.
imagesize: the number of unsigned bytes of image data starting at the
address specified by data.
data: a pointer to the compressed image data in memory.
'''
@accepts(t.enum, t.int, t.int, t.int, t.int, t.sizei, t.sizei, t.sizei, t.enum, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def compressed_tex_sub_image3_d(target, level, xoffset, yoffset, zoffset, width, height, depth, format, imagesize, data):
'''
specify a three-dimensional texture subimage in a compressed format.
Args:
target: the target to which the texture is bound for
glcompressedtexsubimage3d function.
level: the level-of-detail number.
xoffset: a texel offset in the x direction within the texture array.
yoffset: a texel offset in the y direction within the texture array.
width: the width of the texture subimage.
height: the height of the texture subimage.
depth: the depth of the texture subimage.
format: the format of the compressed image data stored at address data.
imagesize: the number of unsigned bytes of image data starting at the
address specified by data.
data: a pointer to the compressed image data in memory.
'''
@accepts(t.enum, t.int, t.int, t.int, t.sizei, t.sizei, t.enum, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def compressed_tex_sub_image2_d(target, level, xoffset, yoffset, width, height, format, imagesize, data):
'''
specify a two-dimensional texture subimage in a compressed format.
Args:
target: the target to which the texture is bound for
glcompressedtexsubimage2d function.
level: the level-of-detail number.
xoffset: a texel offset in the x direction within the texture array.
yoffset: a texel offset in the y direction within the texture array.
width: the width of the texture subimage.
height: the height of the texture subimage.
format: the format of the compressed image data stored at address data.
imagesize: the number of unsigned bytes of image data starting at the
address specified by data.
data: a pointer to the compressed image data in memory.
'''
@accepts(t.enum, t.int, t.int, t.sizei, t.enum, t.sizei, t.void)
@returns(t.void)
@binds(dll)
def compressed_tex_sub_image1_d(target, level, xoffset, width, format, imagesize, data):
'''
specify a one-dimensional texture subimage in a compressed format.
Args:
target: the target, to which the texture is bound, for
glcompressedtexsubimage1d function.
level: the level-of-detail number.
xoffset: a texel offset in the x direction within the texture array.
width: the width of the texture subimage.
format: the format of the compressed image data stored at address data.
imagesize: the number of unsigned bytes of image data starting at the
address specified by data.
data: a pointer to the compressed image data in memory.
'''
@accepts(t.enum, t.int, t.void)
@returns(t.void)
@binds(dll)
def get_compressed_tex_image(target, level, img):
'''
return a compressed texture image.
gl.get_compressed_tex_image and gl.getn_compressed_tex_image return the
compressed texture image associated with target and lod into pixels.
gl.get_compressed_texture_image serves the same purpose, but instead of
taking a texture target, it takes the ID of the texture object. pixels
should be an array of bufSize bytes for gl.getn_compresed_tex_image and
gl.get_compressed_texture_image functions, and of
gl.TEXTURE_COMPRESSED_IMAGE_SIZE bytes in case of
gl.get_compressed_tex_image. If the actual data takes less space than
bufSize, the remaining bytes will not be touched.
Args:
target: the target to which the texture is bound for
glgetcompressedteximage and glgetncompressedteximage functions.
level: the level-of-detail number of the desired image.
img: returns the compressed texture image.
'''
@accepts(t.enum)
@returns(t.void)
@binds(dll)
def client_active_texture(texture):
'''
select active texture unit.
gl.client_active_texture selects the vertex array client state parameters to
be modified by gl.tex_coord_pointer, and enabled or disabled with
gl.enable_client_state or gl.disable_client_state, respectively, when called
with a parameter of gl.TEXTURE_COORD_ARRAY.
Args:
texture: which texture unit to make active.
'''
@accepts(t.enum, t.double)
@returns(t.void)
@binds(dll)
def multi_tex_coord1d(target, s):
pass
@accepts(t.enum, POINTER(t.double))
@returns(t.void)
@binds(dll)
def multi_tex_coord1dv(target, v):
pass
@accepts(t.enum, t.float)
@returns(t.void)
@binds(dll)
def multi_tex_coord1f(target, s):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def multi_tex_coord1fv(target, v):
pass
@accepts(t.enum, t.int)
@returns(t.void)
@binds(dll)
def multi_tex_coord1i(target, s):
pass
@accepts(t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def multi_tex_coord1iv(target, v):
pass
@accepts(t.enum, t.short)
@returns(t.void)
@binds(dll)
def multi_tex_coord1s(target, s):
pass
@accepts(t.enum, POINTER(t.short))
@returns(t.void)
@binds(dll)
def multi_tex_coord1sv(target, v):
pass
@accepts(t.enum, t.double, t.double)
@returns(t.void)
@binds(dll)
def multi_tex_coord2d(target, s, t):
pass
@accepts(t.enum, POINTER(t.double))
@returns(t.void)
@binds(dll)
def multi_tex_coord2dv(target, v):
pass
@accepts(t.enum, t.float, t.float)
@returns(t.void)
@binds(dll)
def multi_tex_coord2f(target, s, t):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def multi_tex_coord2fv(target, v):
pass
@accepts(t.enum, t.int, t.int)
@returns(t.void)
@binds(dll)
def multi_tex_coord2i(target, s, t):
pass
@accepts(t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def multi_tex_coord2iv(target, v):
pass
@accepts(t.enum, t.short, t.short)
@returns(t.void)
@binds(dll)
def multi_tex_coord2s(target, s, t):
pass
@accepts(t.enum, POINTER(t.short))
@returns(t.void)
@binds(dll)
def multi_tex_coord2sv(target, v):
pass
@accepts(t.enum, t.double, t.double, t.double)
@returns(t.void)
@binds(dll)
def multi_tex_coord3d(target, s, t, r):
pass
@accepts(t.enum, POINTER(t.double))
@returns(t.void)
@binds(dll)
def multi_tex_coord3dv(target, v):
pass
@accepts(t.enum, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def multi_tex_coord3f(target, s, t, r):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def multi_tex_coord3fv(target, v):
pass
@accepts(t.enum, t.int, t.int, t.int)
@returns(t.void)
@binds(dll)
def multi_tex_coord3i(target, s, t, r):
pass
@accepts(t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def multi_tex_coord3iv(target, v):
pass
@accepts(t.enum, t.short, t.short, t.short)
@returns(t.void)
@binds(dll)
def multi_tex_coord3s(target, s, t, r):
pass
@accepts(t.enum, POINTER(t.short))
@returns(t.void)
@binds(dll)
def multi_tex_coord3sv(target, v):
pass
@accepts(t.enum, t.double, t.double, t.double, t.double)
@returns(t.void)
@binds(dll)
def multi_tex_coord4d(target, s, t, r, q):
pass
@accepts(t.enum, POINTER(t.double))
@returns(t.void)
@binds(dll)
def multi_tex_coord4dv(target, v):
pass
@accepts(t.enum, t.float, t.float, t.float, t.float)
@returns(t.void)
@binds(dll)
def multi_tex_coord4f(target, s, t, r, q):
pass
@accepts(t.enum, POINTER(t.float))
@returns(t.void)
@binds(dll)
def multi_tex_coord4fv(target, v):
pass
@accepts(t.enum, t.int, t.int, t.int, t.int)
@returns(t.void)
@binds(dll)
def multi_tex_coord4i(target, s, t, r, q):
pass
@accepts(t.enum, POINTER(t.int))
@returns(t.void)
@binds(dll)
def multi_tex_coord4iv(target, v):
pass
@accepts(t.enum, t.short, t.short, t.short, t.short)
@returns(t.void)
@binds(dll)
def multi_tex_coord4s(target, s, t, r, q):
pass
@accepts(t.enum, POINTER(t.short))
@returns(t.void)
@binds(dll)
def multi_tex_coord4sv(target, v):
pass
@accepts(POINTER(t.float))
@returns(t.void)
@binds(dll)
def load_transpose_matrixf(m):
pass
@accepts(POINTER(t.double))
@returns(t.void)
@binds(dll)
def load_transpose_matrixd(m):
pass
@accepts(POINTER(t.float))
@returns(t.void)
@binds(dll)
def mult_transpose_matrixf(m):
pass
@accepts(POINTER(t.double))
@returns(t.void)
@binds(dll)
def mult_transpose_matrixd(m):
pass
TEXTURE0 = 0x84C0
TEXTURE1 = 0x84C1
TEXTURE2 = 0x84C2
TEXTURE3 = 0x84C3
TEXTURE4 = 0x84C4
TEXTURE5 = 0x84C5
TEXTURE6 = 0x84C6
TEXTURE7 = 0x84C7
TEXTURE8 = 0x84C8
TEXTURE9 = 0x84C9
TEXTURE10 = 0x84CA
TEXTURE11 = 0x84CB
TEXTURE12 = 0x84CC
TEXTURE13 = 0x84CD
TEXTURE14 = 0x84CE
TEXTURE15 = 0x84CF
TEXTURE16 = 0x84D0
TEXTURE17 = 0x84D1
TEXTURE18 = 0x84D2
TEXTURE19 = 0x84D3
TEXTURE20 = 0x84D4
TEXTURE21 = 0x84D5
TEXTURE22 = 0x84D6
TEXTURE23 = 0x84D7
TEXTURE24 = 0x84D8
TEXTURE25 = 0x84D9
TEXTURE26 = 0x84DA
TEXTURE27 = 0x84DB
TEXTURE28 = 0x84DC
TEXTURE29 = 0x84DD
TEXTURE30 = 0x84DE
TEXTURE31 = 0x84DF
ACTIVE_TEXTURE = 0x84E0
MULTISAMPLE = 0x809D
SAMPLE_ALPHA_TO_COVERAGE = 0x809E
SAMPLE_ALPHA_TO_ONE = 0x809F
SAMPLE_COVERAGE = 0x80A0
SAMPLE_BUFFERS = 0x80A8
SAMPLES = 0x80A9
SAMPLE_COVERAGE_VALUE = 0x80AA
SAMPLE_COVERAGE_INVERT = 0x80AB
TEXTURE_CUBE_MAP = 0x8513
TEXTURE_BINDING_CUBE_MAP = 0x8514
TEXTURE_CUBE_MAP_POSITIVE_X = 0x8515
TEXTURE_CUBE_MAP_NEGATIVE_X = 0x8516
TEXTURE_CUBE_MAP_POSITIVE_Y = 0x8517
TEXTURE_CUBE_MAP_NEGATIVE_Y = 0x8518
TEXTURE_CUBE_MAP_POSITIVE_Z = 0x8519
TEXTURE_CUBE_MAP_NEGATIVE_Z = 0x851A
PROXY_TEXTURE_CUBE_MAP = 0x851B
MAX_CUBE_MAP_TEXTURE_SIZE = 0x851C
COMPRESSED_RGB = 0x84ED
COMPRESSED_RGBA = 0x84EE
TEXTURE_COMPRESSION_HINT = 0x84EF
TEXTURE_COMPRESSED_IMAGE_SIZE = 0x86A0
TEXTURE_COMPRESSED = 0x86A1
NUM_COMPRESSED_TEXTURE_FORMATS = 0x86A2
COMPRESSED_TEXTURE_FORMATS = 0x86A3
CLAMP_TO_BORDER = 0x812D
CLIENT_ACTIVE_TEXTURE = 0x84E1
MAX_TEXTURE_UNITS = 0x84E2
TRANSPOSE_MODELVIEW_MATRIX = 0x84E3
TRANSPOSE_PROJECTION_MATRIX = 0x84E4
TRANSPOSE_TEXTURE_MATRIX = 0x84E5
TRANSPOSE_COLOR_MATRIX = 0x84E6
MULTISAMPLE_BIT = 0x20000000
NORMAL_MAP = 0x8511
REFLECTION_MAP = 0x8512
COMPRESSED_ALPHA = 0x84E9
COMPRESSED_LUMINANCE = 0x84EA
COMPRESSED_LUMINANCE_ALPHA = 0x84EB
COMPRESSED_INTENSITY = 0x84EC
COMBINE = 0x8570
COMBINE_RGB = 0x8571
COMBINE_ALPHA = 0x8572
SOURCE0_RGB = 0x8580
SOURCE1_RGB = 0x8581
SOURCE2_RGB = 0x8582
SOURCE0_ALPHA = 0x8588
SOURCE1_ALPHA = 0x8589
SOURCE2_ALPHA = 0x858A
OPERAND0_RGB = 0x8590
OPERAND1_RGB = 0x8591
OPERAND2_RGB = 0x8592
OPERAND0_ALPHA = 0x8598
OPERAND1_ALPHA = 0x8599
OPERAND2_ALPHA = 0x859A
RGB_SCALE = 0x8573
ADD_SIGNED = 0x8574
INTERPOLATE = 0x8575
SUBTRACT = 0x84E7
CONSTANT = 0x8576
PRIMARY_COLOR = 0x8577
PREVIOUS = 0x8578
DOT3_RGB = 0x86AE
DOT3_RGBA = 0x86AF | StarcoderdataPython |
65255 | <reponame>tranmanhdat/FastSpeech2
import re
import argparse
from string import punctuation
from scipy.io import wavfile
import torch
import yaml
import numpy as np
from torch.utils.data import DataLoader
from g2p_en import G2p
from pypinyin import pinyin, Style
from utils.model import get_model, get_vocoder
from utils.tools import to_device, synth_samples, synth_wav
from dataset import TextDataset
from text import text_to_sequence, vi_number_1, vi_abbreviation
import time
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
# device = torch.device("cpu")
def read_lexicon(lex_path):
lexicon = {}
with open(lex_path, encoding='utf-8') as f:
for line in f:
temp = re.split(r"\s+", line.strip("\n"))
word = temp[0]
phones = temp[1:]
if word.lower() not in lexicon:
lexicon[word.lower()] = phones
return lexicon
g2p = G2p()
def preprocess_english(text, preprocess_config):
text = text.rstrip(punctuation)
lexicon = read_lexicon(preprocess_config["path"]["lexicon_path"])
phones = []
# clean text number
text = vi_number_1.normalize_number(text)
words = re.split(r"([,;.\"\-\?\!\(\)\s+])", text)
for w in words:
if w.lower() in lexicon:
phones += lexicon[w.lower()]
else:
# check number with sign
list_number_with_sign = vi_number_1.process_number_sign(w)
if len(list_number_with_sign) != 0:
for number_with_sign in list_number_with_sign:
try:
read_number_string = vi_number_1.process_number(int(number_with_sign))
except ValueError:
str_abbr = vi_abbreviation.check_abbr(number_with_sign)
if str_abbr != '':
read_number_string = str_abbr
else:
read_number_string = ''
numbers_list = re.split(r"([,;.\-\?\!\s+])", read_number_string)
for num in numbers_list:
if num.lower() in lexicon:
phones += lexicon[num.lower()]
continue
# check abbreviation
str_abbr = vi_abbreviation.check_abbr(w)
if str_abbr != '':
w_abbr = re.split(r"([,;.\-\?\!\s+])", str_abbr)
for abbr in w_abbr:
if abbr.lower() in lexicon:
phones += lexicon[abbr.lower()]
continue
# default
phones += list(filter(lambda p: p != " ", g2p(w)))
phones = "{" + "}{".join(phones) + "}"
phones = re.sub(r"\{[^\w\s]?\}", "{sp}", phones)
phones = phones.replace("}{", " ")
print("Raw Text Sequence: {}".format(text))
print("Phoneme Sequence: {}".format(phones))
sequence = np.array(
text_to_sequence(
phones, preprocess_config["preprocessing"]["text"]["text_cleaners"]
)
)
return np.array(sequence)
# @torch.jit.script
def preprocess_vie(text: str, lexicon_path: str, cleaner: str):
punctuation = '!"#$%&\'()*+,-./:;<=>?@[\\]^_`{|}~'
text = text.rstrip(punctuation)
lexicon = read_lexicon(lexicon_path)
phones: str = []
errs: str = []
words = re.split(r"([,;.\-\?\!\s+])", text)
for w in words:
if w.lower() in lexicon:
phones += lexicon[w.lower()]
else:
# phones += list(filter(lambda p: p != " ", g2p(w)))
errs.append(w.lower())
print(f"Error words: {' '.join(errs)}")
phones = "{" + "}{".join(phones) + "}"
phones = re.sub(r"\{[^\w\s]?\}", "{sp}", phones)
phones = phones.replace("}{", " ")
print("Raw Text Sequence: {}".format(text))
print("Phoneme Sequence: {}".format(phones))
sequence = np.array(
text_to_sequence(
phones, [cleaner]
)
)
return np.array(sequence)
def preprocess_mandarin(text, preprocess_config):
lexicon = read_lexicon(preprocess_config["path"]["lexicon_path"])
phones = []
pinyins = [
p[0]
for p in pinyin(
text, style=Style.TONE3, strict=False, neutral_tone_with_five=True
)
]
for p in pinyins:
if p in lexicon:
phones += lexicon[p]
else:
phones.append("sp")
phones = "{" + " ".join(phones) + "}"
print("Raw Text Sequence: {}".format(text))
print("Phoneme Sequence: {}".format(phones))
sequence = np.array(
text_to_sequence(
phones, preprocess_config["preprocessing"]["text"]["text_cleaners"]
)
)
return np.array(sequence)
def synthesize(model, step, configs, vocoder, batchs, control_values):
preprocess_config, model_config, train_config = configs
pitch_control, energy_control, duration_control = control_values
_start = time.time()
for batch in batchs:
batch = to_device(batch, device)
with torch.no_grad():
# Forward
output = model(
*(batch[2:]),
p_control=pitch_control,
e_control=energy_control,
d_control=duration_control
)
synth_samples(
batch,
output,
vocoder,
model_config,
preprocess_config,
train_config["path"]["result_path"],
)
print(f"Reference done after {time.time()-_start}")
def synthesize_wav(model, step, configs, vocoder, batchs, control_values):
preprocess_config, model_config, train_config = configs
pitch_control, energy_control, duration_control = control_values
_start = time.time()
wav_files = []
for batch in batchs:
batch = to_device(batch, device)
with torch.no_grad():
# Forward
# output = model(
# *(batch[2:]),
# p_control=pitch_control,
# e_control=energy_control,
# d_control=duration_control
# )
output = torch.jit.trace(model,
*(batch[2:]),
)
wav_files += synth_wav(
batch,
output,
vocoder,
model_config,
preprocess_config,
train_config["path"]["result_path"],
)
print(f"Reference done after {time.time()-_start}")
return wav_files
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--restore_step", type=int, required=True)
parser.add_argument(
"--mode",
type=str,
choices=["batch", "single", "single_wav"],
required=True,
help="Synthesize a whole dataset or a single sentence",
)
parser.add_argument(
"--source",
type=str,
default=None,
help="path to a source file with format like train.txt and val.txt, for batch mode only",
)
parser.add_argument(
"--text",
type=str,
default=None,
help="raw text to synthesize, for single-sentence mode only",
)
parser.add_argument(
"--speaker_id",
type=int,
default=0,
help="speaker ID for multi-speaker synthesis, for single-sentence mode only",
)
parser.add_argument(
"-p",
"--preprocess_config",
type=str,
required=True,
help="path to preprocess.yaml",
)
parser.add_argument(
"-m", "--model_config", type=str, required=True, help="path to model.yaml"
)
parser.add_argument(
"-t", "--train_config", type=str, required=True, help="path to train.yaml"
)
parser.add_argument(
"--pitch_control",
type=float,
default=1.0,
help="control the pitch of the whole utterance, larger value for higher pitch",
)
parser.add_argument(
"--energy_control",
type=float,
default=1.0,
help="control the energy of the whole utterance, larger value for larger volume",
)
parser.add_argument(
"--duration_control",
type=float,
default=1.0,
help="control the speed of the whole utterance, larger value for slower speaking rate",
)
args = parser.parse_args()
# Check source texts
if args.mode == "batch":
assert args.source is not None and args.text is None
if args.mode == "single":
assert args.source is None and args.text is not None
# Read Config
preprocess_config = yaml.load(
open(args.preprocess_config, "r"), Loader=yaml.FullLoader
)
model_config = yaml.load(open(args.model_config, "r"), Loader=yaml.FullLoader)
train_config = yaml.load(open(args.train_config, "r"), Loader=yaml.FullLoader)
configs = (preprocess_config, model_config, train_config)
# Get model
model = get_model(args, configs, device, train=False)
# wrapped_model = torch.jit.script(model)
# wrapped_model.save('script_model.pt')
# model = torch.jit.load("script_model.pt")
# Load vocoder
vocoder = get_vocoder(model_config, device)
# vocoder = torch.jit.script(vocoder)
# vocoder.save('script_vocoder.pt')
# vocoder = torch.jit.load('script_vocoder.pt')
# exit()
control_values = args.pitch_control, args.energy_control, args.duration_control
# Preprocess texts
if args.mode == "batch":
# Get dataset
_start = time.time()
dataset = TextDataset(args.source, preprocess_config)
batchs = DataLoader(
dataset,
batch_size=8,
collate_fn=dataset.collate_fn,
)
print(f"Loaded {len(dataset)} file after {time.time()-_start}")
synthesize(model, args.restore_step, configs, vocoder, batchs, control_values)
if args.mode == "single":
ids = raw_texts = [args.text[:100]]
speakers = np.array([args.speaker_id])
if preprocess_config["preprocessing"]["text"]["language"] == "en":
texts = np.array([preprocess_english(
args.text, preprocess_config)])
elif preprocess_config["preprocessing"]["text"]["language"] == "zh":
texts = np.array(preprocess_mandarin(
args.text, preprocess_config))
text_lens = np.array([len(texts)])
batchs = [(ids, raw_texts, speakers, texts, text_lens, max(text_lens))]
synthesize(model, args.restore_step, configs,
vocoder, batchs, control_values)
if args.mode == "single_wav":
ids = raw_texts = [args.text[:100]]
speakers = torch.tensor([args.speaker_id])
if preprocess_config["preprocessing"]["text"]["language"] == "en":
# texts = torch.tensor(preprocess_english(args.text, preprocess_config))
texts = torch.tensor(preprocess_vie(
args.text, './lexicon/viet-tts-lexicon.txt', 'vietnamese_cleaners'))
elif preprocess_config["preprocessing"]["text"]["language"] == "zh":
texts = torch.tensor(preprocess_mandarin(
args.text, preprocess_config))
# preprocess_vie.save('./script_preprocess_vie.pt')
text_lens = torch.tensor([len(texts[0])])
batchs = [(ids, raw_texts, speakers, texts, text_lens, max(text_lens))]
# synthesize_wav(model, args.restore_step, configs, vocoder, batchs, control_values)
from e2e import E2E
e2e_model = E2E('./script_model.pt', './script_vocoder.pt',
model_config, preprocess_config)
# e2e_model = torch.jit.script(e2e_model)
# e2e_model.save('./script_e2e.pt')
# e2e_model = torch.jit.load('./script_e2e.pt')
wav_files = e2e_model(to_device(batchs[0], device))
print(wav_files)
| StarcoderdataPython |
1792241 | from .brfunds import * | StarcoderdataPython |
109262 | <filename>stock_prediction.py
# -*- coding: utf-8 -*-
"""stock-prediction.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1amgI6VqbJRj8XSLlozTmme5ZPjL3Pi3B
"""
#pip install quandl
# Import required libraries
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import csv
from sklearn.linear_model import LinearRegression
from sklearn.svm import SVR
from sklearn.linear_model import Ridge
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import make_pipeline
from sklearn.neighbors import KNeighborsRegressor
import quandl
dates = []
adj_price = []
def get_data():
df = quandl.get('WIKI/TSLA')
#print(df.head())
# reset index to get dates
df = df.reset_index()
# load and adj close into arrays
for row in df['Adj. Close']:
adj_price.append(row)
# Get day of the month into array from Date row
for row in df['Date']:
dates.append(int(row.strftime('%d')))
return
def get_csv_data(filename):
with open(filename, 'r') as csv_file:
csv_file_reader = csv.reader(csv_file)
next(csv_file_reader)
for row in csv_file_reader:
dates.append(int(row[0].split('-')[0]))
adj_price.append(float(row[1]))
return
def show_plot(dates, prices):
lr = LinearRegression()
# reshape
dates = np.reshape(dates, (len(dates), 1))
prices = np.reshape(prices, (len(prices), 1))
#trai the model
lr.fit(dates, prices)
plt.scatter(dates, prices, color = 'yellow')
plt.plot(dates, lr.predict(dates), color = 'blue', linewidth = 3)
plt.show()
return
get_csv_data('TSLA.csv')
show_plot(dates, adj_price)
def predict_price_linear_regression(dates, prices, x):
lr = LinearRegression()
# reshape
dates = np.reshape(dates, (len(dates), 1))
prices = np.reshape(prices, (len(prices), 1))
#trai the model
lr.fit(dates, prices)
predicted_price = lr.predict(x)
# Create a plot
plt.scatter(dates, prices, color = 'black', label = 'Data')
plt.plot(dates, lr.predict(dates), color = 'red', label = 'Linear Model')
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Linear Regression')
plt.legend()
plt.show()
return predicted_price[0][0], lr.coef_[0][0], lr.intercept_[0 ]
def predict_price_svr_regression(dates, prices, x):
svr_rbf = SVR(kernel='rbf', C=1e3, gamma=0.1)
# reshape
dates = np.reshape(dates, (len(dates), 1))
prices = np.reshape(prices, (len(prices), 1))
#trai the model
svr_rbf.fit(dates, prices)
svr_predicted_price = svr_rbf.predict(x)
# Create a plot
plt.scatter(dates, prices, color = 'black', label = 'Data') # init data point
plt.plot(dates, svr_rbf.predict(dates), color = 'red', label = 'RBF Model')
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Support Vector Regression')
plt.legend()
plt.show()
return svr_predicted_price[0]
def predict_price_poly_regression(dates, prices, x):
# Quadratic Regression 2
poly2 = make_pipeline(PolynomialFeatures(2), Ridge())
# reshape
dates = np.reshape(dates, (len(dates), 1))
prices = np.reshape(prices, (len(prices), 1))
#Train the model
poly2.fit(dates, prices)
#Predict
poly2_prediction = poly2.predict(x)
#Plot
plt.scatter(dates, prices, color = 'black', label = 'Data')
plt.plot(dates, poly2.predict(dates), color = 'red', label = 'Poly 2 Model')
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Polynomial Regression 2')
plt.legend()
plt.show()
return poly2_prediction[0][0]
def predict_price_knn_regression(dates, prices, x):
# KNN Regressor
knn = KNeighborsRegressor(n_neighbors = 2)
# reshape
dates = np.reshape(dates, (len(dates), 1))
prices = np.reshape(prices, (len(prices), 1))
#Train the model
knn.fit(dates, prices)
#Predict
knn_prediction = knn.predict(x)
#plot
plt.scatter(dates, prices, color = 'black', label = 'Data')
plt.plot(dates, knn.predict(dates), color = 'red', label = 'KNN Model')
plt.xlabel('Date')
plt.ylabel('Price')
plt.title('Polynomial Regression 2')
plt.legend()
plt.show()
return knn_prediction[0][0]
# Get data
get_csv_data('TSLA.csv')
# Linear Regression
lr_predicted_price, lr_coef, lr_const = predict_price_linear_regression(dates, adj_price, [[29]])
print(f'Predicted Price LR: {lr_predicted_price}')
print(f'COEF LR: {lr_coef}')
print(f'COST LR: {lr_const}')
# SVR Regression
svr_predicted_price = predict_price_svr_regression(dates, adj_price, [[29]])
print(f'Predicted Price SVR: {svr_predicted_price}')
# Polynomial Regression
poly2_predicted_price = predict_price_poly_regression(dates, adj_price, [[29]])
print(f'Predicted Price Poly 2: {poly2_predicted_price}')
# KNN Regression
knn_predicted_price = predict_price_knn_regression(dates, adj_price, [[29]])
print(f'Predicted Price KNN: {knn_predicted_price}') | StarcoderdataPython |
3348002 | <reponame>michsmit99/snapback
################################################################################
# _ ____ ___ _____ _ _ _ _ #
# / \ / ___|_ _| |_ _|__ ___ | | | _(_) |_ #
# / _ \| | | | | |/ _ \ / _ \| | |/ / | __| #
# / ___ \ |___ | | | | (_) | (_) | | <| | |_ #
# ____ /_/ \_\____|___|___|_|\___/ \___/|_|_|\_\_|\__| #
# / ___|___ __| | ___ / ___| __ _ _ __ ___ _ __ | | ___ ___ #
# | | / _ \ / _` |/ _ \ \___ \ / _` | '_ ` _ \| '_ \| |/ _ \/ __| #
# | |__| (_) | (_| | __/ ___) | (_| | | | | | | |_) | | __/\__ \ #
# \____\___/ \__,_|\___| |____/ \__,_|_| |_| |_| .__/|_|\___||___/ #
# |_| #
################################################################################
# #
# Copyright (c) 2015 Cisco Systems #
# All Rights Reserved. #
# #
# Licensed under the Apache License, Version 2.0 (the "License"); you may #
# not use this file except in compliance with the License. You may obtain #
# a copy of the License at #
# #
# http://www.apache.org/licenses/LICENSE-2.0 #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# License for the specific language governing permissions and limitations #
# under the License. #
# #
################################################################################
"""
Simple demonstration application configuring a basic ACI fabric
"""
from acitoolkit.acisession import Session
from acitoolkit.acitoolkit import Credentials, Tenant, AppProfile, EPG
from acitoolkit.acitoolkit import Context, BridgeDomain, Contract, FilterEntry
def main():
""" Create 2 EPGs within the same Context and have
1 EPG provide a contract to the other EPG.
"""
description = ('Create 2 EPGs within the same Context and have'
'1 EPG provide a contract to the other EPG.')
creds = Credentials('apic', description)
args = creds.get()
# Create the Tenant
tenant = Tenant('aci-toolkit-demo')
# Create the Application Profile
app = AppProfile('my-demo-app', tenant)
# Create the EPGs
web_epg = EPG('web-frontend', app)
db_epg = EPG('database-backend', app)
# Create a Context and BridgeDomain
# Place both EPGs in the Context and in the same BD
context = Context('VRF-1', tenant)
bd = BridgeDomain('BD-1', tenant)
bd.add_context(context)
web_epg.add_bd(bd)
db_epg.add_bd(bd)
# Define a contract with a single entry
contract = Contract('mysql-contract', tenant)
entry1 = FilterEntry('entry1',
applyToFrag='no',
arpOpc='unspecified',
dFromPort='3306',
dToPort='3306',
etherT='ip',
prot='tcp',
sFromPort='1',
sToPort='65535',
tcpRules='unspecified',
parent=contract)
# Provide the contract from 1 EPG and consume from the other
db_epg.provide(contract)
web_epg.consume(contract)
# Login to APIC and push the config
session = Session(args.url, args.login, args.password)
session.login()
# Cleanup (uncomment the next line to delete the config)
# tenant.mark_as_deleted()
resp = tenant.push_to_apic(session)
if resp.ok:
# Print what was sent
print('Pushed the following JSON to the APIC')
print('URL: ' + str(tenant.get_url()))
print('JSON: ' + str(tenant.get_json()))
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| StarcoderdataPython |
157965 | <reponame>ManthanKeim/code-attempt1<filename>code1/sample.py<gh_stars>0
print(__name__)
def cool():
print("You are Cool") | StarcoderdataPython |
4819117 | import collections
import os
from statistics import mode
from typing import Any, Iterable, List, Tuple
import joblib
import numpy as np
from xarm_hand_control.processing.classifier_base import Classifier
class RandomForest(Classifier):
"""Classifier with a Multi-layer Percptron using ONNX."""
model: Any
model_path: os.PathLike
classes: list
buffer_size: int
classification_buffer: collections.deque
def __init__(self, buffer_size: int = 5):
self.buffer_size = buffer_size
self.classification_buffer = collections.deque(maxlen=buffer_size)
def load_model(self, model_path: os.PathLike, classes: list) -> None:
self.model_path = model_path
self.classes = classes
self.model = joblib.load(self.model_path)
def format_data(self, data: Iterable) -> List[np.ndarray]:
ret = []
for hand_landmarks in data:
hand_landmarks_xys = [
[point.x, point.y] for point in hand_landmarks.landmark
]
ret.append(
np.array(
[
hand_landmarks_xys,
]
).reshape(1, -1)
)
return ret
def run_classification(self, data: List[np.ndarray]) -> Tuple[str]:
ret = []
for item in data:
result_one = self.model.predict(item)[0]
ret.append(self.classes[result_one]["name"])
ret = tuple(ret)
self.classification_buffer.appendleft(ret)
return ret
def get_most_common(self) -> Tuple[str]:
return mode(self.classification_buffer)
| StarcoderdataPython |
4800046 | """urlconf for the base application"""
from django.conf.urls import url, patterns
urlpatterns = patterns(
"base.views",
)
| StarcoderdataPython |
3332815 | <gh_stars>0
import argparse
import sys
from pathlib import Path
from .photorename import Renamer
from .version import __version__
version = "1.0.0"
def main():
parser = argparse.ArgumentParser(
description='Bulk rename pictures in a directory')
parser.add_argument('-i', '--input', dest='input', default='.',
help='input directory with the fotos which should be ordered')
parser.add_argument('-n', '--name', dest='name', default='pic',
help='base name for pictures; the filename is extended with a number (e.g. pic-001.png)')
parser.add_argument('-o', '--output', dest='output', default='.',
help="""output directory for the sorted pictures, if not used default is the current
directory""")
parser.add_argument('-v', '--version', dest='version', action='store_true',
help="Show version")
args = parser.parse_args()
if args.version:
print(f"{__version__}")
sys.exit(0)
renamer = Renamer(args.input, args.name, args.output)
print(renamer)
renamer.do()
| StarcoderdataPython |
1744773 | <gh_stars>0
#-*- coding: utf-8 -*-
import datetime, random, math
class ProductModel:
def __init__(self, product_id, product_type, product_name, img_url=None):
self.id = product_id
if (product_type == 1):
self.type = '농산물'
elif(product_type == 2):
self.type = '수산물'
elif(product_type == 3):
self.type = '축산물'
else:
self.type=''
self.name = product_name
self.img_url = img_url
self.change_day = int(math.floor((random.random() - 0.5) * 10))
self.change_week = int(math.floor((random.random() - 0.5) * 10))
self.change_month = int(math.floor((random.random() - 0.5) * 10))
def setSeason(self, season_start, season_end):
d = datetime.date.today()
current = d.month * 100 + d.day
start = season_start.month * 100 + season_start.day
end = season_end.month * 100 + season_end.day
self.season_start = '%02d' % (season_start.month) + '-' + '%02d' % (season_start.day)
self.season_end = '%02d' % (season_end.month) + '-' + '%02d' % (season_end.day)
if ((start <= end and (start <= current <= end)) or ( start >= end and (current <= end or current >= start ))) :
self.season = True
else :
self.season = False
class CommentModel:
def __init__(self, comment_id, user_email, comment_content, timestamp):
self.id = comment_id
self.email = user_email
self.content = comment_content
self.timestamp = timestamp
class StoreModel:
def __init__(self, store_name, latitude, longitude):
self.name = store_name
self.latitude = latitude
self.longitude = longitude
class PriceChartModel:
def __init__(self, product_class_id, product_class_name):
self.label_color = "#AAAAAA"
self.label_color_r = int("AA", 16)
self.label_color_g = int("AA", 16)
self.label_color_b = int("AA", 16)
self.product_class_id = product_class_id
self.product_class_name = product_class_name
def setPrice_values(self, price_values):
self.price_values = price_values
def setLabel_color(self, label_color):
self.label_color = label_color
self.label_color_r = int(label_color[1:3], 16)
self.label_color_g = int(label_color[3:5], 16)
self.label_color_b = int(label_color[5:] , 16)
print self.label_color_r
print self.label_color_g
print self.label_color_b
| StarcoderdataPython |
3370642 | <gh_stars>10-100
#The MIT License (MIT)
#
#Copyright (c) 2017, <NAME>
#
#Permission is hereby granted, free of charge, to any person obtaining a copy of this software and
#associated documentation files (the "Software"), to deal in the Software without restriction,
#including without limitation the rights to use, copy, modify, merge, publish, distribute,
#sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is
#furnished to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all copies or
#substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT
#NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
#NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,
#DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
#OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
#Reads RUC-style PCRT constrained routing files, based on those from <NAME> "Routing Under Constraints" (FMCAD 2016).
#From README:
# ; 2D Grid definition
# G X Y; Comments
# ; Nets
# N vid1 vid2 ... vidn; Comments
# ; Constraints
# C vid1 vid2 ... vidn; Comments
# ; Disabled vertices
# D vid1; Comments
#Returns a tuple, ((GridWidth,GridHeight),(Tuples of vertices to route), (Tuples of vertices that cannot be used simultaneously))
def read(filename_or_stream):
if isinstance(filename_or_stream, str):
file = open(filename_or_stream,"r", encoding="utf-8")
else:
file = filename_or_stream
X=None
Y=None
all_netids=[]
all_constraints=[]
all_disabled=[]
allow_diagonals=False
def toVertex(vID):
# A vertex ID for (x,y) is y*X+x.
assert(X is not None)
assert (Y is not None)
assert(vID>=0)
y = vID//X
x = vID - (y*X)
assert(x<=X)
assert(x>=0)
assert(y>=0)
assert(y<=Y)
return (x,y)
try:
for line in file:
line = line.rstrip()
if line.startswith(";"):
continue
line = line.partition(';')[0].strip()
if (len(line)==0):
continue
parts = line.split()
if parts[0] == "G":
#this is a 2D grid definition
#A 2D grid (X,Y) where a vertex is associated with every grid point and an edge is created between every two (neighbouring) vertices
assert(len(parts)==3 or len(parts)==4)
X = int(parts[1])
Y = int(parts[2])
if len(parts)>=4 and parts[3]=='45':
#this file supports 45 degree routing
allow_diagonals=True
elif parts[0] == "N":
#this is a net definition - a list of vertex IDs to be connected together.
netids = list(map(toVertex, map(int,parts[1:])))
all_netids.append(netids)
elif parts[0] == "C":
#constraints definition, which is a list of one or more vertex IDs that cannot be used simultaneously.
#A vertex ID for (x,y) is y*X+x.
cids = list(map(toVertex, map(int,parts[1:])))
all_constraints.append(cids)
elif parts[0] == "D":
#a disabled vertex
assert(len(parts)==2)
all_disabled.append(toVertex(int(parts[1])))
finally:
file.close()
return ((X,Y),allow_diagonals,all_netids,all_constraints,all_disabled)
if __name__ == '__main__':
import sys
print(read(sys.argv[1]))
| StarcoderdataPython |
4817091 | import sys
from datetime import datetime
from sqlalchemy import INTEGER, TIMESTAMP, VARCHAR, Column, create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
from . import config
reload(sys)
sys.setdefaultencoding('utf-8')
Base = declarative_base()
class Message(Base):
__tablename__ = "Message"
message = Column(VARCHAR(1024))
timestamp = Column(TIMESTAMP, primary_key=True)
TTL = Column(INTEGER, default=10)
def __init__(self, message, timestamp):
self.message = message
self.timestamp = timestamp
def __repr__(self):
return "<Message %s>" % self.message
class DB():
def __init__(self):
db_config = config.get('database')
engine = create_engine("mysql+mysqldb://{user}:{password}@{host}:{port}/{database}?charset=utf8".format(
user=db_config['user'], password=db_config['password'], host=db_config[
'host'], port=db_config['port'], database=db_config['database']
), encoding="utf-8")
Session = sessionmaker(bind=engine)
self.session = Session()
def send_message(self, message):
now = datetime.now()
time = now.strftime("%Y-%m-%d %H:%M:%S")
test = Message(message=message, timestamp=time)
self.session.rollback()
self.session.add(test)
self.session.commit()
self.session.close()
| StarcoderdataPython |
189232 | # -*- coding: utf-8 -*-
def main():
import sys
input = sys.stdin.readline
n, q = map(int, input().split())
x = [0] * n
y = [0] * n
for i in range(n):
xi, yi = map(int, input().split())
x[i] = xi + yi
y[i] = xi - yi
x_min, x_max = min(x), max(x)
y_min, y_max = min(y), max(y)
for j in range(q):
qi = int(input())
qi -= 1
xi = x[qi]
yi = y[qi]
print(max(x_max - xi, xi - x_min, y_max - yi, yi - y_min))
if __name__ == "__main__":
main()
| StarcoderdataPython |
1601250 | from xml.etree import ElementTree
from .value import Value, Pair
class ResponseParsingError(Exception):
pass
def parse_responses(str_elements):
"""
in the ideal case, str_elements is a string that contains
a set of valid xml elements, but it is not a proper xml
string since they are not encapsulated in a root element
"""
# encapsulate all received messages
str_elements = "<root>" + str_elements + "</root>"
xml = ElementTree.fromstring(str_elements)
return list(map(Response.from_element, xml))
class Response:
def is_feedback(self):
return isinstance(self, Feedback)
def is_value(self):
return isinstance(self, ValueResp)
@classmethod
def from_element(cls, e):
if e.tag == "feedback":
return Feedback.from_element(e)
if e.tag == "value":
return ValueResp.from_element(e)
else:
assert False, 'unhandled xml element tag %s.' % e.tag
class Feedback(Response):
def __init__(self, state_id):
self.state_id = state_id
@classmethod
def from_element(cls, e):
assert e[0].tag == 'state_id', 'invalid feedback element that does not starts with a state_id'
return cls(Value.from_element(e[0]))
class ValueResp(Response):
def __init__(self, val, data, errormsg=None, errorstart=None, errorend=None):
self.val = val
self.data = data
self.errormsg = errormsg
self.errorstart = errorstart
self.errorend = errorend
def succeed(self):
return self.val == 'good'
@classmethod
def from_element(cls, e):
if e.attrib['val'] == 'good':
assert len(e.getchildren()) == 1
return cls(
e.attrib['val'],
Value.from_element(e[0])
)
elif e.attrib['val'] == 'fail':
return cls(
e.attrib['val'],
data=None,
errormsg = e[1][0][0].text,
errorstart = None if 'loc_s' not in e.attrib else e.attrib['loc_s'],
errorend = None if 'loc_e' not in e.attrib else e.attrib['loc_e'],
)
| StarcoderdataPython |
1725758 | ##
# Copyright 2021 IBM Corp. All Rights Reserved.
#
# SPDX-License-Identifier: Apache-2.0
##
from .model import Model
from .symbolic import (Proposition, Predicate, And, Or,
Implies, Bidirectional, Not, ForAll, Exists, Variable,
NeuralActivationClass)
from .utils import (truth_table, truth_table_dict, predicate_truth_table,
plot_graph, plot_loss, plot_params, fact_to_bool,
bool_to_fact)
from .constants import Fact, World, Direction, Join
# constants
UPWARD = Direction.UPWARD
DOWNWARD = Direction.DOWNWARD
TRUE = Fact.TRUE
FALSE = Fact.FALSE
UNKNOWN = Fact.UNKNOWN
CONTRADICTION = Fact.CONTRADICTION
AXIOM = World.AXIOM
OPEN = World.OPEN
CLOSED = World.CLOSED
Lukasiewicz = NeuralActivationClass.Lukasiewicz
LukasiewiczTransparent = NeuralActivationClass.LukasiewiczTransparent
OUTER = Join.OUTER
INNER = Join.INNER
OUTER_PRUNED = Join.OUTER_PRUNED
__all__ = [
'Model',
'Proposition', 'Predicate',
'And', 'Or', 'Implies', 'Bidirectional', 'Not',
'ForAll', 'Exists', 'Variable',
'truth_table', 'truth_table_dict', 'predicate_truth_table',
'fact_to_bool', 'bool_to_fact',
'plot_graph', 'plot_loss', 'plot_params',
'Direction',
'UPWARD', 'DOWNWARD',
'Fact', 'World',
'OPEN', 'CLOSED', 'AXIOM',
'TRUE', 'FALSE', 'UNKNOWN', 'CONTRADICTION',
'Lukasiewicz', 'LukasiewiczTransparent',
'Join',
'OUTER', 'INNER', 'OUTER_PRUNED'
]
| StarcoderdataPython |
155769 | import numpy as np
import torch
import math
def TLift(in_score, gal_cam_id, gal_time, prob_cam_id, prob_time, num_cams, tau=100, sigma=200, K=10, alpha=0.2):
"""Function for the Temporal Lifting (TLift) method
TLift is a model-free temporal cooccurrence based score weighting method proposed in
<NAME> and <NAME>, "Interpretable and Generalizable Person Re-Identification with Query-Adaptive
Convolution and Temporal Lifting." In The European Conference on Computer Vision (ECCV), 23-28 August, 2020.
Inputs:
in_score: the similarity score of size [num_probs, num_gals] between the gallery and probe sets.
gal_cam_id: camera index for samples in the gallery set, starting from 0 and continuously numbered.
gal_time: time stamps of samples in the gallery set.
prob_cam_id: camera index for samples in the probe set, starting from 0 and continuously numbered.
prob_time: time stamps of samples in the probe set.
num_cams: the number of cameras.
tau: the interval threshold to define nearby persons. Default: 100.
sigma: the sensitivity parameter of the time difference. Default: 200.
K: parameter of the top K retrievals used to define the pivot set P. Default: 10.
alpha: regularizer for the multiplication fusion. Default: 0.2.
All the cam_id and time inputs are 1-dim vectors, and they are in the same order corresponding to
the first axis (probe) or second axis (gallery) of the in_score.
Outputs:
out_score: the refined score by TLift, with the same size as the in_score.
Comments:
The default alpha value works for the sigmoid or re-ranking matching scores. Otherwise, it is
suggested that your input scores are distributed in [0, 1], with an average score around 0.01-0.1
considering many negative matching pairs. To apply TLift directly on QAConv scores, please use
score = torch.sigmoid(score) instead of the scaled scores in qaconv.py.
Author:
<NAME>, reimplemented by <NAME>
<EMAIL>
Version:
V1.1
July 12, 2020
"""
out_score = torch.tensor(np.zeros_like(in_score))
if torch.cuda.is_available():
out_score = out_score.cuda()
if len(prob_time.shape) == 1:
prob_time = prob_time[np.newaxis, :]
prob_time_diff = prob_time - np.transpose(prob_time)
cooccur_mask = (abs(prob_time_diff) < tau)
g_sam_index = []
score = []
gal_time_diff = []
for g_cam in range(num_cams):
g_sam_index.append(np.where(gal_cam_id == g_cam)[0]) # camera id starting with 0.
score.append(in_score[:, g_sam_index[g_cam]])
frame_id = gal_time[g_sam_index[g_cam]]
if len(frame_id.shape) == 1:
frame_id = frame_id[np.newaxis, :]
gal_time_diff.append(
torch.tensor(frame_id - np.transpose(frame_id), dtype=out_score.dtype).to(out_score.device))
for p_cam in range(num_cams):
p_sam_index = np.where(prob_cam_id == p_cam)[0]
c_mask = cooccur_mask[p_sam_index][:, p_sam_index]
num_prob = len(p_sam_index)
for g_cam in range(num_cams):
# if p_cam == g_cam: # in some public datasets they still evaluate negative pairs in the same camera
# continue
prob_score = score[g_cam][p_sam_index, :]
for i in range(num_prob):
cooccur_index = np.where(c_mask[:, i] == True)[0]
cooccur_score = prob_score[cooccur_index, :]
sorted_score = np.sort(cooccur_score, axis=None)
if sorted_score.shape[0] > K:
thr = sorted_score[-K]
else:
thr = sorted_score[0]
mask_in_gal = np.where(cooccur_score >= thr)[1]
dt = gal_time_diff[g_cam][:, mask_in_gal]
weight = torch.mean(torch.exp(-1 * torch.pow(dt, 2).to(dtype=out_score.dtype) / math.pow(sigma, 2)),
dim=1)
out_score[p_sam_index[i], g_sam_index[g_cam]] = weight
out_score = out_score.cpu().numpy()
out_score = (out_score + alpha) * in_score
return out_score
if __name__ == '__main__':
in_score = np.random.randn(50, 100)
gal_cam_id = np.random.randint(0, 5, (100))
gal_time = np.random.randint(0, 20, (100))
prob_cam_id = np.random.randint(0, 5, (50))
prob_time = np.random.randint(0, 20, (50))
num_cams = 5
TLift(in_score, gal_cam_id, gal_time, prob_cam_id, prob_time, num_cams)
| StarcoderdataPython |
14553 | from testing_config import BaseTestConfig
from application.models import User
from application.models import Chatroom
import json
from application.utils import auth
class TestMatch(BaseTestConfig):
test_group = {
"name": "test_group",
"tag": "Poker",
}
test_group2 = {
"name": "test_group2",
"tag": "Study",
}
tag_p = {"query_tag": "Poker"}
tag_s = {"query_tag": "Study"}
tag_o = {"query_tag": "Outdoor"}
tag_l = {"query_tag": "Life"}
tag_t = {"query_tag": "Test"}
testrm_1 = {"room_id": 1}
testrm_2 = {"room_id": 185}
testrm_3 = {"room_id": 4}
def test_get_suggestions(self):
res = self.app.post(
"/api/get_suggestions",
data=json.dumps(self.tag_p),
content_type='application/json'
)
res1 = self.app.post(
"/api/get_suggestions",
data=json.dumps(self.tag_s),
content_type='application/json'
)
res2 = self.app.post(
"/api/get_suggestions",
data=json.dumps(self.tag_o),
content_type='application/json'
)
self.assertEqual(res.status_code,200)
self.assertEqual(res1.status_code,200)
self.assertEqual(res2.status_code,200)
res3 = self.app.post(
"/api/get_suggestions",
data=json.dumps(self.tag_t),
content_type='application/json'
)
self.assertEqual(res2.status_code,200)
def test_create_group(self):
res = self.app.post(
"/api/create_group",
data=json.dumps(self.test_group),
content_type='application/json'
)
self.assertEqual(json.loads(res.data.decode("utf-8"))["results"], 2)
self.assertEqual(res.status_code, 200)
res = self.app.post(
"/api/create_group",
data=json.dumps(self.test_group2),
content_type='application/json'
)
self.assertEqual(json.loads(res.data.decode("utf-8"))["results"], 3)
# def test_join_chatroom(self):
# res = self.app.post(
# "/api/join_chatroom",
# data=json.dumps(self.testrm_1),
# content_type='application/json'
# )
# res1 = self.app.post(
# "/api/join_chatroom",
# data=json.dumps(self.testrm_2),
# content_type='application/json'
# )
# res2 = self.app.post(
# "/api/join_chatroom",
# data=json.dumps(self.testrm_3),
# content_type='application/json'
# )
# self.assertEqual(res.status_code,201)
# self.assertEqual(res1.status_code,201)
# self.assertEqual(res2.status_code,201)
| StarcoderdataPython |
35502 | # -*- coding: utf-8 -*-
import matplotlib.colors as colorplt
import matplotlib.pyplot as plt
import numpy as np
from sktime.distances._distance import distance_alignment_path, pairwise_distance
gray_cmap = colorplt.LinearSegmentedColormap.from_list("", ["#c9cacb", "white"])
def _path_mask(cost_matrix, path, ax, theme=gray_cmap):
plot_matrix = np.zeros_like(cost_matrix)
max_size = max(cost_matrix.shape)
for i in range(max_size):
for j in range(max_size):
if (i, j) in path:
plot_matrix[i, j] = 1.0
elif cost_matrix[i, j] == np.inf:
plot_matrix[i, j] = 0.0
else:
plot_matrix[i, j] = 0.25
for i in range(max_size):
for j in range(max_size):
c = cost_matrix[j, i]
ax.text(i, j, str(round(c, 2)), va="center", ha="center", size=10)
ax.text(i, j, str(round(c, 2)), va="center", ha="center", size=10)
ax.matshow(plot_matrix, cmap=theme)
def _pairwise_path(x, y, metric):
pw_matrix = pairwise_distance(x, y, metric=metric)
path = []
for i in range(pw_matrix.shape[0]):
for j in range(pw_matrix.shape[1]):
if i == j:
path.append((i, j))
return path, pw_matrix.trace(), pw_matrix
def _plot_path(
x: np.ndarray,
y: np.ndarray,
metric: str,
dist_kwargs: dict = None,
title: str = "",
plot_over_pw: bool = False,
):
if dist_kwargs is None:
dist_kwargs = {}
try:
path, dist, cost_matrix = distance_alignment_path(
x, y, metric=metric, return_cost_matrix=True, **dist_kwargs
)
if metric == "lcss":
_path = []
for tup in path:
_path.append(tuple(x + 1 for x in tup))
path = _path
if plot_over_pw is True:
if metric == "lcss":
pw = pairwise_distance(x, y, metric="euclidean")
cost_matrix = np.zeros_like(cost_matrix)
cost_matrix[1:, 1:] = pw
else:
pw = pairwise_distance(x, y, metric="squared")
cost_matrix = pw
except NotImplementedError:
path, dist, cost_matrix = _pairwise_path(x, y, metric)
plt.figure(1, figsize=(8, 8))
x_size = x.shape[0]
# definitions for the axes
left, bottom = 0.01, 0.1
w_ts = h_ts = 0.2
left_h = left + w_ts + 0.02
width = height = 0.65
bottom_h = bottom + height + 0.02
rect_s_y = [left, bottom, w_ts, height]
rect_gram = [left_h, bottom, width, height]
rect_s_x = [left_h, bottom_h, width, h_ts]
ax_gram = plt.axes(rect_gram)
ax_s_x = plt.axes(rect_s_x)
ax_s_y = plt.axes(rect_s_y)
_path_mask(cost_matrix, path, ax_gram)
ax_gram.axis("off")
ax_gram.autoscale(False)
# ax_gram.plot([j for (i, j) in path], [i for (i, j) in path], "w-",
# linewidth=3.)
ax_s_x.plot(np.arange(x_size), y, "b-", linewidth=3.0, color="#818587")
ax_s_x.axis("off")
ax_s_x.set_xlim((0, x_size - 1))
ax_s_y.plot(-x, np.arange(x_size), "b-", linewidth=3.0, color="#818587")
ax_s_y.axis("off")
ax_s_y.set_ylim((0, x_size - 1))
ax_s_x.set_title(title, size=10)
return plt
def _plot_alignment(x, y, metric, dist_kwargs: dict = None, title: str = ""):
if dist_kwargs is None:
dist_kwargs = {}
try:
path, dist, cost_matrix = distance_alignment_path(
x, y, metric=metric, return_cost_matrix=True, **dist_kwargs
)
except NotImplementedError:
path, dist, cost_matrix = _pairwise_path(x, y, metric)
plt.figure(1, figsize=(8, 8))
plt.plot(x, "b-", color="black")
plt.plot(y, "g-", color="black")
for positions in path:
try:
plt.plot(
[positions[0], positions[1]],
[x[positions[0]], y[positions[1]]],
"--",
color="#818587",
)
except:
continue
plt.legend()
plt.title(title)
plt.tight_layout()
return plt
if __name__ == "__main__":
x = np.array(
[
-0.7553383207,
0.4460987596,
1.197682907,
0.1714334808,
0.5639929213,
0.6891222874,
1.793828873,
0.06570866314,
0.2877381702,
1.633620422,
]
)
y = np.array(
[
0.01765193577,
1.536784164,
-0.1413292622,
-0.7609346135,
-0.1767363331,
-2.192007072,
-0.1933165696,
-0.4648166839,
-0.9444888843,
-0.239523623,
]
)
import os
def _save_plt(plt):
plt[0].savefig(f"{metric_path}/{plt[1]}")
plt[0].cla()
plt[0].clf()
if not os.path.exists("./plots"):
os.makedirs("./plots")
metrics = [
"euclidean",
"erp",
"edr",
"lcss",
"squared",
"dtw",
"ddtw",
"wdtw",
"wddtw",
"msm",
]
# metrics = ['lcss']
for metric in metrics:
metric_path = f"./plots/{metric}"
if not os.path.exists(metric_path):
os.makedirs(metric_path)
save_plt(
(
_plot_path(x, y, metric, {"epsilon": 1.0}),
f"{metric}_path_through_cost_matrix",
)
)
_save_plt(
(
_plot_path(x, y, metric, {"window": 0.2, "epsilon": 1.0}),
f"{metric}_path_through_20_cost_matrix",
)
)
if metric == "wdtw":
g_val = [0.2, 0.3]
for g in g_val:
file_save = str(g).split(".")
_save_plt(
(
_plot_path(x, y, metric, {"g": g}),
f"{metric}_path_through_g{file_save[1]}_cost_matrix",
)
)
_save_plt((_plot_alignment(x, y, metric), f"{metric}_alignment"))
_save_plt(
(_plot_alignment(x, y, metric, {"window": 0.2}), f"{metric}_alignment_20")
)
| StarcoderdataPython |
3283707 | <reponame>fossabot/tkterminal<gh_stars>10-100
# Copyright 2021 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
def _bind(cls=None, *ags, **kw):
"""Internal function.\n
Binds and unbinds sequences with any name given as className."""
cls = cls or kw.pop('cls', ags.pop(0))
if ags:
return [_bind(cls=cls, **i) for i in ags]
classname = kw['className'] + str(cls)
bindtags = list(cls.bindtags())
if classname in bindtags:
bindtags.remove(classname)
if kw.get('func'):
_bind(cls, className=kw['className'], sequence=kw['sequence'])
bindtags.append(classname)
cls.bindtags(tuple(bindtags))
return cls.bind_class(classname, sequence=kw['sequence'],
func=kw['func'], add=kw.get('add', '+'))
cls.bindtags(tuple(bindtags))
cls.unbind_class(classname, kw['sequence'])
def threaded(fn=None, **kw):
"""To use as decorator to make a function call threaded.
takes function as argument. To join=True pass @threaded(True)."""
def wrapper(*args, **kwargs):
kw['return'] = kw['function'](*args, **kwargs)
def _threaded(fn):
kw['function'] = fn
def thread_func(*args, **kwargs):
thread = threading.Thread(
target=wrapper, args=args,
kwargs=kwargs, daemon=kw.get('daemon', True))
thread.start()
if kw.get('join'):
thread.join()
return kw.get('return', thread)
return thread_func
if fn and callable(fn):
return _threaded(fn)
return _threaded
| StarcoderdataPython |
3382544 | # -*- coding: utf-8 -*-
import scrapy
from MusicCourse.items import CNweikeItem
import json
class CnweikeSpider(scrapy.Spider):
name = 'cnweike'
allowed_domains = ['cnweike.cn']
url = 'http://dasai.cnweike.cn/index.php?r=matchV4/search/GetJson&pageSize=10&type=weike&order=quality&keyword=&subject=7&pointOne=0&pointTwo=0&pointThree=0&typeID=51&page='
custom_settings = {
"ITEM_PIPELINES": {
'MusicCourse.pipelines.CNweikePipeline': 300,
}
}
offset = 1
start_urls = [url + str(offset)]
def parse(self, response):
page_dict = json.loads(response.body)
for course_item in page_dict["data"]:
item = CNweikeItem()
item['title'] = course_item['fdName']
item['author'] = course_item['fdUser']
item['play'] = course_item['fdPlay']
item['time'] = course_item['fdCreate']
item['vote'] = course_item['voteNum']
item['collection'] = course_item['collectnum']
item['comment'] = course_item['commnets']
yield item
if self.offset < 45:
self.offset += 1
# 每次处理完一页的数据之后,重新发送下一页页面请求
# self.offset自增10,同时拼接为新的url,并调用回调函数self.parse处理Response
yield scrapy.Request(self.url + str(self.offset), callback = self.parse)
| StarcoderdataPython |
25653 | <reponame>ejkim1996/Unity-JSON-Manager<filename>JSONFormatter.py
import json
from tkinter import Tk
from tkinter.filedialog import askopenfilename
# Python script that allows user to select JSON file using TKinter and format it properly.
root = Tk()
filename = askopenfilename()
root.destroy() # Close the window
read = open(filename, 'r')
parsed = json.load(read)
write = open(filename, 'w')
newstr = json.dumps(parsed, indent = 3, sort_keys =True)
write.write(newstr) # Overwrite the old unformatted json file
read.close()
write.close()
| StarcoderdataPython |
52908 | <reponame>r3fang/MERlin<filename>merfishdecoder/util/imagereader.py
import hashlib
import numpy as np
import re
import tifffile
from typing import List
from merfishdecoder.util import dataportal
# The following code is adopted from github.com/ZhuangLab/storm-analysis and
# is subject to the following license:
#
# The MIT License
#
# Copyright (c) 2013 Zhuang Lab, Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
def infer_reader(filePortal: dataportal.FilePortal, verbose: bool = False):
"""
Given a file name this will try to return the appropriate
reader based on the file extension.
"""
ext = filePortal.get_file_extension()
if ext == '.dax':
return DaxReader(filePortal, verbose=verbose)
elif ext == ".tif" or ext == ".tiff":
if isinstance(filePortal, dataportal.LocalFilePortal):
# TODO implement tif reading from s3/gcloud
return TifReader(filePortal._fileName, verbose=verbose)
else:
raise IOError('Loading tiff files from %s is not yet implemented'
% type(filePortal))
raise IOError(
"only .dax and .tif are supported (case sensitive..)")
class Reader(object):
"""
The superclass containing those functions that
are common to reading a STORM movie file.
Subclasses should implement:
1. __init__(self, filename, verbose = False)
This function should open the file and extract the
various key bits of meta-data such as the size in XY
and the length of the movie.
2. loadAFrame(self, frame_number)
Load the requested frame and return it as np array.
"""
def __init__(self, filename, verbose=False):
super(Reader, self).__init__()
self.image_height = 0
self.image_width = 0
self.number_frames = 0
self.stage_x = 0
self.stage_y = 0
self.filename = filename
self.fileptr = None
self.verbose = verbose
def __del__(self):
self.close()
def __enter__(self):
return self
def __exit__(self, etype, value, traceback):
self.close()
def average_frames(self, start=None, end=None):
"""
Average multiple frames in a movie.
"""
length = 0
average = np.zeros((self.image_height, self.image_width),
np.float)
for [i, frame] in self.frame_iterator(start, end):
if self.verbose and ((i % 10) == 0):
print(" processing frame:", i, " of", self.number_frames)
length += 1
average += frame
if length > 0:
average = average / float(length)
return average
def close(self):
if self.fileptr is not None:
self.fileptr.close()
self.fileptr = None
def film_filename(self):
"""
Returns the film name.
"""
return self.filename
def film_size(self):
"""
Returns the film size.
"""
return [self.image_width, self.image_height, self.number_frames]
def film_location(self):
"""
Returns the picture x,y location, if available.
"""
if hasattr(self, "stage_x"):
return [self.stage_x, self.stage_y]
else:
return [0.0, 0.0]
def film_scale(self):
"""
Returns the scale used to display the film when
the picture was taken.
"""
if hasattr(self, "scalemin") and hasattr(self, "scalemax"):
return [self.scalemin, self.scalemax]
else:
return [100, 2000]
def frame_iterator(self, start=None, end=None):
"""
Iterator for going through the frames of a movie.
"""
if start is None:
start = 0
if end is None:
end = self.number_frames
for i in range(start, end):
yield [i, self.load_frame(i)]
def hash_ID(self):
"""
A (hopefully) unique string that identifies this movie.
"""
return hashlib.md5(self.load_frame(0).tostring()).hexdigest()
def load_frame(self, frame_number):
assert frame_number >= 0, \
"Frame_number must be greater than or equal to 0, it is "\
+ str(frame_number)
assert frame_number < self.number_frames, \
"Frame number must be less than " + str(self.number_frames)
def lock_target(self):
"""
Returns the film focus lock target.
"""
if hasattr(self, "lock_target"):
return self.lock_target
else:
return 0.0
class DaxReader(Reader):
"""
Dax reader class. This is a Zhuang lab custom format.
"""
def __init__(self, filePortal: dataportal.FilePortal,
verbose: bool = False):
super(DaxReader, self).__init__(
filePortal.get_file_name(), verbose=verbose)
self._filePortal = filePortal
infFile = filePortal.get_sibling_with_extension('.inf')
self._parse_inf(infFile.read_as_text().splitlines())
def close(self):
self._filePortal.close()
def _parse_inf(self, inf_lines: List[str]) -> None:
size_re = re.compile(r'frame dimensions = ([\d]+) x ([\d]+)')
length_re = re.compile(r'number of frames = ([\d]+)')
endian_re = re.compile(r' (big|little) endian')
stagex_re = re.compile(r'Stage X = ([\d.\-]+)')
stagey_re = re.compile(r'Stage Y = ([\d.\-]+)')
lock_target_re = re.compile(r'Lock Target = ([\d.\-]+)')
scalemax_re = re.compile(r'scalemax = ([\d.\-]+)')
scalemin_re = re.compile(r'scalemin = ([\d.\-]+)')
# defaults
self.image_height = None
self.image_width = None
for line in inf_lines:
m = size_re.match(line)
if m:
self.image_height = int(m.group(2))
self.image_width = int(m.group(1))
m = length_re.match(line)
if m:
self.number_frames = int(m.group(1))
m = endian_re.search(line)
if m:
if m.group(1) == "big":
self.bigendian = 1
else:
self.bigendian = 0
m = stagex_re.match(line)
if m:
self.stage_x = float(m.group(1))
m = stagey_re.match(line)
if m:
self.stage_y = float(m.group(1))
m = lock_target_re.match(line)
if m:
self.lock_target = float(m.group(1))
m = scalemax_re.match(line)
if m:
self.scalemax = int(m.group(1))
m = scalemin_re.match(line)
if m:
self.scalemin = int(m.group(1))
# set defaults, probably correct, but warn the user
# that they couldn't be determined from the inf file.
if not self.image_height:
print("Could not determine image size, assuming 256x256.")
self.image_height = 256
self.image_width = 256
def load_frame(self, frame_number):
"""
Load a frame & return it as a np array.
"""
super(DaxReader, self).load_frame(frame_number)
startByte = frame_number * self.image_height * self.image_width * 2
endByte = startByte + 2*(self.image_height * self.image_width)
dataFormat = np.dtype('uint16')
if self.bigendian:
dataFormat = dataFormat.newbyteorder('>')
image_data = np.frombuffer(
self._filePortal.read_file_bytes(startByte, endByte),
dtype=dataFormat)
image_data = np.reshape(image_data,
[self.image_height, self.image_width])
return image_data
class TifReader(Reader):
"""
TIF reader class.
This is supposed to handle the following:
1. A normal Tiff file with one frame/image per page.
2. Tiff files with multiple frames on a single page.
3. Tiff files with multiple frames on multiple pages.
"""
def __init__(self, filename, verbose=False):
super(TifReader, self).__init__(filename, verbose)
self.page_data = None
self.page_number = -1
# Save the filename
self.fileptr = tifffile.TiffFile(filename)
number_pages = len(self.fileptr.pages)
# Single page Tiff file, which might be a "ImageJ Tiff"
# with many frames on a page.
if number_pages == 1:
# Determines the size without loading the entire file.
isize = self.fileptr.series[0].shape
# Check if this is actually just a single frame tiff, if
# it is we'll just load it into memory.
if len(isize) == 2:
self.frames_per_page = 1
self.number_frames = 1
self.image_height = isize[0]
self.image_width = isize[1]
self.page_data = self.fileptr.asarray()
# Otherwise we'll memmap it in case it is really large.
else:
self.frames_per_page = isize[0]
self.number_frames = isize[0]
self.image_height = isize[1]
self.image_width = isize[2]
self.page_data = self.fileptr.asarray(out='memmap')
# Multiple page Tiff file.
#
else:
isize = self.fileptr.asarray(key=0).shape
# Check for one frame per page.
if len(isize) == 2:
self.frames_per_page = 1
self.number_frames = number_pages
self.image_height = isize[0]
self.image_width = isize[1]
# Multiple frames per page.
#
# FIXME: No unit test for this kind of file.
#
else:
self.frames_per_page = isize[0]
self.number_frames = number_pages * isize[0]
self.image_height = isize[1]
self.image_width = isize[2]
if self.verbose:
print("{0:0d} frames per page, {1:0d} pages".format(
self.frames_per_page, number_pages))
def load_frame(self, frame_number, cast_to_int16=True):
super(TifReader, self).load_frame(frame_number)
# All the data is on a single page.
if self.number_frames == self.frames_per_page:
if self.number_frames == 1:
image_data = self.page_data
else:
image_data = self.page_data[frame_number, :, :]
# Multiple frames of data on multiple pages.
elif self.frames_per_page > 1:
page = int(frame_number / self.frames_per_page)
frame = frame_number % self.frames_per_page
# This is an optimization for files with a large number of frames
# per page. In this case tifffile will keep loading the entire
# page over and over again, which really slows everything down.
# Ideally tifffile would let us specify which frame on the page
# we wanted.
#
# Since it was going to load the whole thing anyway we'll have
# memory overflow either way, so not much we can do about that
# except hope for small file sizes.
#
if page != self.page_number:
self.page_data = self.fileptr.asarray(key=page)
self.page_number = page
image_data = self.page_data[frame, :, :]
# One frame on each page.
else:
image_data = self.fileptr.asarray(key=frame_number)
assert (len(
image_data.shape) == 2), "Not a monochrome tif image! " + str(
image_data.shape)
if cast_to_int16:
image_data = image_data.astype(np.uint16)
return image_data
| StarcoderdataPython |
3233737 | <reponame>divindevaiah/e2xgrader
from traitlets import Unicode
import os
import nbformat
from .basemodel import BaseModel
class PresetModel(BaseModel):
task_preset_path = Unicode(
os.path.join(
os.path.dirname(__file__),
"..",
"server_extensions/formgrader/presets/questions/",
)
).tag(config=True)
template_preset_path = Unicode(
os.path.join(
os.path.dirname(__file__),
"..",
"server_extensions/formgrader/presets/template/",
)
).tag(config=True)
def list_presets(self, preset_path):
presets = []
for item in os.listdir(preset_path):
if ".ipynb_checkpoints" in item:
continue
if os.path.isfile(os.path.join(preset_path, item)) and item.endswith(
".ipynb"
):
presets.append(os.path.splitext(item)[0])
return sorted(presets)
def get_preset(self, preset_path, preset_name):
path = os.path.join(preset_path, "{}.ipynb".format(preset_name))
if os.path.isfile(path):
nb = nbformat.read(path, as_version=4)
return nb.cells
def list_question_presets(self):
return self.list_presets(self.task_preset_path)
def get_question_preset(self, preset_name):
return self.get_preset(self.task_preset_path, preset_name)
def list_template_presets(self):
return self.list_presets(self.template_preset_path)
def get_template_preset(self, preset_name):
return self.get_preset(self.template_preset_path, preset_name)
| StarcoderdataPython |
161536 | from ._anvil_designer import RowTemplate1Template
class RowTemplate1(RowTemplate1Template):
def __init__(self, **properties):
# Set Form properties and Data Bindings.
self.init_components(**properties)
# Any code you write here will run when the form opens.
# testing `item`
| StarcoderdataPython |
1652141 | <gh_stars>0
class RevasCalculations:
'''Functions related to analyzing data go there
'''
pass | StarcoderdataPython |
3346561 | import itertools
import time
from collections import defaultdict
from datetime import datetime, timedelta
from typing import Callable, Dict, List, Optional, Sequence, Tuple, Union
from django.conf import settings
from django.db import connection
from django.http import HttpRequest, HttpResponse
from django.shortcuts import render
from django.template import loader
from django.utils.timezone import now as timezone_now
from jinja2.utils import Markup as mark_safe
from psycopg2.sql import SQL, Composable, Literal
from analytics.lib.counts import COUNT_STATS
from analytics.views.activity_common import (
dictfetchall,
format_date_for_activity_reports,
make_table,
realm_activity_link,
realm_stats_link,
remote_installation_stats_link,
)
from analytics.views.support import get_plan_name
from zerver.decorator import require_server_admin
from zerver.lib.request import has_request_variables
from zerver.lib.timestamp import timestamp_to_datetime
from zerver.models import Realm, UserActivityInterval, UserProfile, get_org_type_display_name
if settings.BILLING_ENABLED:
from corporate.lib.stripe import (
estimate_annual_recurring_revenue_by_realm,
get_realms_to_default_discount_dict,
)
def get_realm_day_counts() -> Dict[str, Dict[str, str]]:
query = SQL(
"""
select
r.string_id,
(now()::date - date_sent::date) age,
count(*) cnt
from zerver_message m
join zerver_userprofile up on up.id = m.sender_id
join zerver_realm r on r.id = up.realm_id
join zerver_client c on c.id = m.sending_client_id
where
(not up.is_bot)
and
date_sent > now()::date - interval '8 day'
and
c.name not in ('zephyr_mirror', 'ZulipMonitoring')
group by
r.string_id,
age
order by
r.string_id,
age
"""
)
cursor = connection.cursor()
cursor.execute(query)
rows = dictfetchall(cursor)
cursor.close()
counts: Dict[str, Dict[int, int]] = defaultdict(dict)
for row in rows:
counts[row["string_id"]][row["age"]] = row["cnt"]
result = {}
for string_id in counts:
raw_cnts = [counts[string_id].get(age, 0) for age in range(8)]
min_cnt = min(raw_cnts[1:])
max_cnt = max(raw_cnts[1:])
def format_count(cnt: int, style: Optional[str] = None) -> str:
if style is not None:
good_bad = style
elif cnt == min_cnt:
good_bad = "bad"
elif cnt == max_cnt:
good_bad = "good"
else:
good_bad = "neutral"
return f'<td class="number {good_bad}">{cnt}</td>'
cnts = format_count(raw_cnts[0], "neutral") + "".join(map(format_count, raw_cnts[1:]))
result[string_id] = dict(cnts=cnts)
return result
def realm_summary_table(realm_minutes: Dict[str, float]) -> str:
now = timezone_now()
query = SQL(
"""
SELECT
realm.string_id,
realm.date_created,
realm.plan_type,
realm.org_type,
coalesce(wau_table.value, 0) wau_count,
coalesce(dau_table.value, 0) dau_count,
coalesce(user_count_table.value, 0) user_profile_count,
coalesce(bot_count_table.value, 0) bot_count
FROM
zerver_realm as realm
LEFT OUTER JOIN (
SELECT
value _14day_active_humans,
realm_id
from
analytics_realmcount
WHERE
property = 'realm_active_humans::day'
AND end_time = %(realm_active_humans_end_time)s
) as _14day_active_humans_table ON realm.id = _14day_active_humans_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = '7day_actives::day'
AND end_time = %(seven_day_actives_end_time)s
) as wau_table ON realm.id = wau_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = '1day_actives::day'
AND end_time = %(one_day_actives_end_time)s
) as dau_table ON realm.id = dau_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = 'active_users_audit:is_bot:day'
AND subgroup = 'false'
AND end_time = %(active_users_audit_end_time)s
) as user_count_table ON realm.id = user_count_table.realm_id
LEFT OUTER JOIN (
SELECT
value,
realm_id
from
analytics_realmcount
WHERE
property = 'active_users_audit:is_bot:day'
AND subgroup = 'true'
AND end_time = %(active_users_audit_end_time)s
) as bot_count_table ON realm.id = bot_count_table.realm_id
WHERE
_14day_active_humans IS NOT NULL
or realm.plan_type = 3
ORDER BY
dau_count DESC,
string_id ASC
"""
)
cursor = connection.cursor()
cursor.execute(
query,
{
"realm_active_humans_end_time": COUNT_STATS[
"realm_active_humans::day"
].last_successful_fill(),
"seven_day_actives_end_time": COUNT_STATS["7day_actives::day"].last_successful_fill(),
"one_day_actives_end_time": COUNT_STATS["1day_actives::day"].last_successful_fill(),
"active_users_audit_end_time": COUNT_STATS[
"active_users_audit:is_bot:day"
].last_successful_fill(),
},
)
rows = dictfetchall(cursor)
cursor.close()
# Fetch all the realm administrator users
realm_owners: Dict[str, List[str]] = defaultdict(list)
for up in UserProfile.objects.select_related("realm").filter(
role=UserProfile.ROLE_REALM_OWNER,
is_active=True,
):
realm_owners[up.realm.string_id].append(up.delivery_email)
for row in rows:
row["date_created_day"] = row["date_created"].strftime("%Y-%m-%d")
row["age_days"] = int((now - row["date_created"]).total_seconds() / 86400)
row["is_new"] = row["age_days"] < 12 * 7
row["realm_owner_emails"] = ", ".join(realm_owners[row["string_id"]])
# get messages sent per day
counts = get_realm_day_counts()
for row in rows:
try:
row["history"] = counts[row["string_id"]]["cnts"]
except Exception:
row["history"] = ""
# estimate annual subscription revenue
total_arr = 0
if settings.BILLING_ENABLED:
estimated_arrs = estimate_annual_recurring_revenue_by_realm()
realms_to_default_discount = get_realms_to_default_discount_dict()
for row in rows:
row["plan_type_string"] = get_plan_name(row["plan_type"])
string_id = row["string_id"]
if string_id in estimated_arrs:
row["arr"] = estimated_arrs[string_id]
if row["plan_type"] in [Realm.STANDARD, Realm.PLUS]:
row["effective_rate"] = 100 - int(realms_to_default_discount.get(string_id, 0))
elif row["plan_type"] == Realm.STANDARD_FREE:
row["effective_rate"] = 0
elif row["plan_type"] == Realm.LIMITED and string_id in realms_to_default_discount:
row["effective_rate"] = 100 - int(realms_to_default_discount[string_id])
else:
row["effective_rate"] = ""
total_arr += sum(estimated_arrs.values())
for row in rows:
row["org_type_string"] = get_org_type_display_name(row["org_type"])
# augment data with realm_minutes
total_hours = 0.0
for row in rows:
string_id = row["string_id"]
minutes = realm_minutes.get(string_id, 0.0)
hours = minutes / 60.0
total_hours += hours
row["hours"] = str(int(hours))
try:
row["hours_per_user"] = "{:.1f}".format(hours / row["dau_count"])
except Exception:
pass
# formatting
for row in rows:
row["stats_link"] = realm_stats_link(row["string_id"])
row["string_id"] = realm_activity_link(row["string_id"])
# Count active sites
def meets_goal(row: Dict[str, int]) -> bool:
return row["dau_count"] >= 5
num_active_sites = len(list(filter(meets_goal, rows)))
# create totals
total_dau_count = 0
total_user_profile_count = 0
total_bot_count = 0
total_wau_count = 0
for row in rows:
total_dau_count += int(row["dau_count"])
total_user_profile_count += int(row["user_profile_count"])
total_bot_count += int(row["bot_count"])
total_wau_count += int(row["wau_count"])
total_row = dict(
string_id="Total",
plan_type_string="",
org_type_string="",
effective_rate="",
arr=total_arr,
stats_link="",
date_created_day="",
realm_owner_emails="",
dau_count=total_dau_count,
user_profile_count=total_user_profile_count,
bot_count=total_bot_count,
hours=int(total_hours),
wau_count=total_wau_count,
)
rows.insert(0, total_row)
content = loader.render_to_string(
"analytics/realm_summary_table.html",
dict(
rows=rows,
num_active_sites=num_active_sites,
utctime=now.strftime("%Y-%m-%d %H:%MZ"),
billing_enabled=settings.BILLING_ENABLED,
),
)
return content
def user_activity_intervals() -> Tuple[mark_safe, Dict[str, float]]:
day_end = timestamp_to_datetime(time.time())
day_start = day_end - timedelta(hours=24)
output = "Per-user online duration for the last 24 hours:\n"
total_duration = timedelta(0)
all_intervals = (
UserActivityInterval.objects.filter(
end__gte=day_start,
start__lte=day_end,
)
.select_related(
"user_profile",
"user_profile__realm",
)
.only(
"start",
"end",
"user_profile__delivery_email",
"user_profile__realm__string_id",
)
.order_by(
"user_profile__realm__string_id",
"user_profile__delivery_email",
)
)
by_string_id = lambda row: row.user_profile.realm.string_id
by_email = lambda row: row.user_profile.delivery_email
realm_minutes = {}
for string_id, realm_intervals in itertools.groupby(all_intervals, by_string_id):
realm_duration = timedelta(0)
output += f"<hr>{string_id}\n"
for email, intervals in itertools.groupby(realm_intervals, by_email):
duration = timedelta(0)
for interval in intervals:
start = max(day_start, interval.start)
end = min(day_end, interval.end)
duration += end - start
total_duration += duration
realm_duration += duration
output += f" {email:<37}{duration}\n"
realm_minutes[string_id] = realm_duration.total_seconds() / 60
output += f"\nTotal duration: {total_duration}\n"
output += f"\nTotal duration in minutes: {total_duration.total_seconds() / 60.}\n"
output += f"Total duration amortized to a month: {total_duration.total_seconds() * 30. / 60.}"
content = mark_safe("<pre>" + output + "</pre>")
return content, realm_minutes
def ad_hoc_queries() -> List[Dict[str, str]]:
def get_page(
query: Composable, cols: Sequence[str], title: str, totals_columns: Sequence[int] = []
) -> Dict[str, str]:
cursor = connection.cursor()
cursor.execute(query)
rows = cursor.fetchall()
rows = list(map(list, rows))
cursor.close()
def fix_rows(
i: int, fixup_func: Union[Callable[[str], mark_safe], Callable[[datetime], str]]
) -> None:
for row in rows:
row[i] = fixup_func(row[i])
total_row = []
for i, col in enumerate(cols):
if col == "Realm":
fix_rows(i, realm_activity_link)
elif col in ["Last time", "Last visit"]:
fix_rows(i, format_date_for_activity_reports)
elif col == "Hostname":
for row in rows:
row[i] = remote_installation_stats_link(row[0], row[i])
if len(totals_columns) > 0:
if i == 0:
total_row.append("Total")
elif i in totals_columns:
total_row.append(str(sum(row[i] for row in rows if row[i] is not None)))
else:
total_row.append("")
if len(totals_columns) > 0:
rows.insert(0, total_row)
content = make_table(title, cols, rows)
return dict(
content=content,
title=title,
)
pages = []
###
for mobile_type in ["Android", "ZulipiOS"]:
title = f"{mobile_type} usage"
query = SQL(
"""
select
realm.string_id,
up.id user_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like {mobile_type}
group by string_id, up.id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, up.id, client.name
"""
).format(
mobile_type=Literal(mobile_type),
)
cols = [
"Realm",
"User id",
"Name",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Desktop users"
query = SQL(
"""
select
realm.string_id,
client.name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
client.name like 'desktop%%'
group by string_id, client.name
having max(last_visit) > now() - interval '2 week'
order by string_id, client.name
"""
)
cols = [
"Realm",
"Client",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Integrations by realm"
query = SQL(
"""
select
realm.string_id,
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by string_id, client_name
having max(last_visit) > now() - interval '2 week'
order by string_id, client_name
"""
)
cols = [
"Realm",
"Client",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
###
title = "Integrations by client"
query = SQL(
"""
select
case
when query like '%%external%%' then split_part(query, '/', 5)
else client.name
end client_name,
realm.string_id,
sum(count) as hits,
max(last_visit) as last_time
from zerver_useractivity ua
join zerver_client client on client.id = ua.client_id
join zerver_userprofile up on up.id = ua.user_profile_id
join zerver_realm realm on realm.id = up.realm_id
where
(query in ('send_message_backend', '/api/v1/send_message')
and client.name not in ('Android', 'ZulipiOS')
and client.name not like 'test: Zulip%%'
)
or
query like '%%external%%'
group by client_name, string_id
having max(last_visit) > now() - interval '2 week'
order by client_name, string_id
"""
)
cols = [
"Client",
"Realm",
"Hits",
"Last time",
]
pages.append(get_page(query, cols, title))
title = "Remote Zulip servers"
query = SQL(
"""
with icount as (
select
server_id,
max(value) as max_value,
max(end_time) as max_end_time
from zilencer_remoteinstallationcount
where
property='active_users:is_bot:day'
and subgroup='false'
group by server_id
),
remote_push_devices as (
select server_id, count(distinct(user_id)) as push_user_count from zilencer_remotepushdevicetoken
group by server_id
)
select
rserver.id,
rserver.hostname,
rserver.contact_email,
max_value,
push_user_count,
max_end_time
from zilencer_remotezulipserver rserver
left join icount on icount.server_id = rserver.id
left join remote_push_devices on remote_push_devices.server_id = rserver.id
order by max_value DESC NULLS LAST, push_user_count DESC NULLS LAST
"""
)
cols = [
"ID",
"Hostname",
"Contact email",
"Analytics users",
"Mobile users",
"Last update time",
]
pages.append(get_page(query, cols, title, totals_columns=[3, 4]))
return pages
@require_server_admin
@has_request_variables
def get_installation_activity(request: HttpRequest) -> HttpResponse:
duration_content, realm_minutes = user_activity_intervals()
counts_content: str = realm_summary_table(realm_minutes)
data = [
("Counts", counts_content),
("Durations", duration_content),
]
for page in ad_hoc_queries():
data.append((page["title"], page["content"]))
title = "Activity"
return render(
request,
"analytics/activity.html",
context=dict(data=data, title=title, is_home=True),
)
| StarcoderdataPython |
1708583 | <filename>msa_tools_old/preprocess_msa/untar.py
import tarfile
import os
def un_tar(file_path):
file_name = file_path.strip().split('/')[-1][:-4]
tar = tarfile.open(file_path)
# os.system(f'mkdir -p {file_name}')
tar.extractall(path=file_name)
tar.close()
filestr = """/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_6.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_1.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_8.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_0.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_4.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_3.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_2.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_5.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_3.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_4.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_5.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_2.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_8.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_1.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_6.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_7.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_0.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_1.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_6.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_7.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_0.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_3.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_4.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_5.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_2.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/MSA_AB384BL512_7.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_3.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_4.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_2.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_1.tar
/dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_0.tar"""
from joblib import Parallel, delayed
files = filestr.split('\n')
job_num = 31
parallel = Parallel(n_jobs=job_num, batch_size=1)
data = parallel(delayed(un_tar)(fold) for fold in files)
# files = """/dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_6.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_1.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_8.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_0.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_4.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_3.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_2.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB384BL512/MSA_AB384BL512_5.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_3.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_4.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_5.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_2.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_8.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_1.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_6.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_7.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA/AB256BL384/MSA_AB256BL384_0.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_1.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_6.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_7.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_0.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_3.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_4.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_5.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/AB128BL256/MSA_AB128BL256_2.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/MSA_AB384BL512_7.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_3.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_4.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_2.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_1.tar
# /dataset/ee84df8b/MSA_30T/MSA/MSA_2/BL128/MSA_BL128_0.tar"""
# for i in files.split('\n'):
# # print(i)
# fname = i.split('/')[-1][:-4]
# # tar xf MSA_BL128_4.tar -C MSA_BL128_4
# print(f"mkdir -p {fname}\ntar xf {i} -C {fname}")
| StarcoderdataPython |
3246809 | # -*- coding: utf-8 -*-
"""
==================
Benchmark Examples
==================
This submodule of benchpress consist of a broad range of benchmarks in different languages
"""
from __future__ import absolute_import
from . import util as util
| StarcoderdataPython |
124835 | """empty message
Revision ID: 09d3732eef24
Revises: <PASSWORD>
Create Date: 2020-03-12 15:13:32.832239
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '09d3732eef24'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('task',
sa.Column('id', sa.String(length=50), nullable=False),
sa.Column('amount', sa.Numeric(precision=10, scale=2), nullable=True),
sa.Column('trigger', sa.String(length=10), nullable=True),
sa.Column('category', sa.String(length=100), nullable=True),
sa.Column('type', sa.String(length=20), nullable=True),
sa.Column('name', sa.String(length=100), nullable=True),
sa.Column('trigger_kwargs', sa.String(length=200), nullable=True),
sa.Column('user_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['user_id'], ['user.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('task')
# ### end Alembic commands ###
| StarcoderdataPython |
85990 | <filename>scripts/SerialTransferTest/serialTransfer_loopback_test1.py
import time
from pySerialTransfer import pySerialTransfer as txfer
if __name__ == '__main__':
print('loopback.py')
try:
link = txfer.SerialTransfer('/dev/ttyAMA1')
link.open()
time.sleep(2) # allow some time for the Arduino to completely reset
while True:
send_size = 0
###################################################################
# Send a list
###################################################################
list_ = [1, 3]
list_size = link.tx_obj(list_)
send_size += list_size
###################################################################
# Send a string
###################################################################
str_ = 'hello'
str_size = link.tx_obj(str_, send_size) - send_size
send_size += str_size
###################################################################
# Send a float
###################################################################
float_ = 5.234
float_size = link.tx_obj(float_, send_size) - send_size
send_size += float_size
###################################################################
# Transmit all the data to send in a single packet
###################################################################
link.send(send_size)
print('Sent data, waiting for response')
###################################################################
# Wait for a response and report any errors while receiving packets
###################################################################
while not link.available():
if link.status < 0:
if link.status == txfer.CRC_ERROR:
print('ERROR: CRC_ERROR')
elif link.status == txfer.PAYLOAD_ERROR:
print('ERROR: PAYLOAD_ERROR')
elif link.status == txfer.STOP_BYTE_ERROR:
print('ERROR: STOP_BYTE_ERROR')
else:
print('ERROR: {}'.format(link.status))
###################################################################
# Parse response list
###################################################################
rec_list_ = link.rx_obj(obj_type=type(list_),
obj_byte_size=list_size,
list_format='i')
###################################################################
# Parse response string
###################################################################
rec_str_ = link.rx_obj(obj_type=type(str_),
obj_byte_size=str_size,
start_pos=list_size)
###################################################################
# Parse response float
###################################################################
rec_float_ = link.rx_obj(obj_type=type(float_),
obj_byte_size=float_size,
start_pos=(list_size + str_size))
###################################################################
# Display the received data
###################################################################
print('SENT: {} {} {}'.format(list_, str_, float_))
print('RCVD: {} {} {}'.format(rec_list_, rec_str_, rec_float_))
print(' ')
except KeyboardInterrupt:
try:
link.close()
except:
pass
except:
import traceback
traceback.print_exc()
try:
link.close()
except:
pass
| StarcoderdataPython |
3334263 | <reponame>gismaps/PDF_Utils<gh_stars>0
'''
Module for PDF utilities.
'''
from pdf_utils import PDF
from pathlib import Path
from pdfrw import PdfReader, PdfWriter
#from pdfrw import IndirectPdfDict # for file metadata
class PDF(object):
'''
An object that represents a single PDF file.
Contains utilities to solve basic problems, like reversing the
order of the pages in a scan.
**Note** - This 'PDF' class technically resides in a module of the
same name. Since the pdf_utils package imports this class, you
should skip the extra 'PDF' in the import, like this:
```
from pdf_utils import PDF
```
'''
# TODO: Subset pages
# TODO: Rotate pages
# TODO: Catch PermissionError exception for locked files?
# TODO: Get/set metadata
# in_pdf.keys()
# in_pdf.Info
# type(in_pdf.Info) <class 'pdfrw.objects.pdfdict.PdfDict'>
#
# This copied the metadata, but was breaking everything else
# trailer = pdf1_reader
# if not first: trailer = pdf2_reader
# outdata.write(trailer=trailer)
def __init__(self, file_path:str):
'''
Args:
file_path (str): The path to the PDF file.
Returns:
A PDF object.
Raises:
OSError: If the file_path parameter does not exist
Example:
How to import and instantiate:
from pdf_util import PDF
my_pdf = PDF('/path/to/file.pdf')
'''
self.__path = None
self.path = file_path
#TODO: Add pages property & save method?
# By design, these methods are very simple and self-contained. When they
# run, output is saved immediately. For scaling up, it could be more
# efficient to chain several operations together (saving the latest
# collection of pages to a property), and then save when ready.
@property
def path(self) -> str:
'''
The path to the PDF file.
## Raises
OSError: When setting this property, OSError is raised if
the file is not found at the specified path.
'''
return self.__path
@path.setter
def path(self, file_path:str):
'''
Path setter
Args:
file_path (str): File path
Raises:
OSError: If the path set does not exist.
'''
p = Path(file_path)
if not p.exists():
raise(OSError("Input path does not exist: {}".format(file_path)))
self.__path = file_path
@property
def page_count(self) -> int:
'''
Page count for the PDF document
'''
pages = PdfReader(self.path).pages
return len(pages)
def reverse(self, out_path:str = None) -> None:
'''
Reverse the page order (from last to first) of the PDF.
Note:
The default settings this will overwrite this object's PDF
file.
Args:
out_path: Optional string, default=None. If supplied,
the output will be saved to this path, instead of
overwriting this PDF object's path.
Returns:
None
Raises:
No exceptions raised
Examples:
Invoke like this to overwrite this PDF's file:
```>>> my_pdf.reverse()```
Pass in a path to save as a new file.
```>>> my_pdf.reverse('/path/to/new/file.pdf')```
'''
if not out_path: out_path = self.path
outdata = PdfWriter(out_path)
in_pdf = PdfReader(self.path)
pages = in_pdf.pages
for i in range((len(pages)-1), -1, -1):
outdata.addpage(pages[i])
outdata.write()
def interleave(self, pdf:PDF, first:bool = True,
out_path:str = None) -> None:
'''
Interleave the pages from another PDF with this one.
Use case is a two-sided paper doc scanned to separate PDFs for
the front and back. Notes:
- If one PDF is longer than the other, additional of the pages
from the longer document will be consecutively added at the
end.
- The output will overwrite this PDF object's file.
Args:
pdf: Another PDF object with pages to interleave with
this object. Must be of type `pdf_utils.PDF`.
first: Optional bool, default=True. In the interleave
ordering, should this object's pages come first (True,
i.e.: pages 1, 3, 5...) or second (False, i.e.: pages
2, 4, 6...)?
out_path: Optional string, default=None. If supplied,
the output will be saved to this path, instead of
overwriting this PDF object's path.
Returns:
None
Raises:
No exceptions raised
Examples:
Invoke like this to overwrite this PDF's file:
```>>> my_pdf.interleave('/files/even_pages.pdf')```
Save the output to a new file:
```>>> my_pdf.interleave('./even_pages.pdf',
out_path='./combo.pdf')```
Normally, this PDF object's pages come first, but you can
also make the incoming PDF's pages come first, instead:
```>>> my_pdf.interleave('/files/odd_pages.pdf',
first=False)```
'''
pdf1_path = self.path # this object's path
pdf2_path = pdf.path # incoming object's path
if not first: # incoming file comes first, reverse the order
pdf1_path = pdf.path
pdf2_path = self.path
pdf1_reader = PdfReader(pdf1_path)
pdf1_pages = pdf1_reader.pages
pdf1_len = len(pdf1_pages)
pdf2_reader = PdfReader(pdf2_path)
pdf2_pages = pdf2_reader.pages
pdf2_len = len(pdf2_pages)
if not out_path: out_path = self.path
outdata = PdfWriter(out_path)
for i in range(0, pdf1_len):
outdata.addPage(pdf1_pages[i])
if (i+1) > pdf2_len: continue # no more pages in pdf2
outdata.addPage(pdf2_pages[i])
if pdf2_len > pdf1_len: # out of pages in pdf1, but more in pdf2
for i in range(pdf1_len, pdf2_len):
outdata.addPage(pdf2_pages[i])
outdata.write()
| StarcoderdataPython |
163548 |
#import director
from director import cameraview
from director import transformUtils
from director import visualization as vis
from director import objectmodel as om
from director.ikparameters import IkParameters
from director.ikplanner import ConstraintSet
from director import polarisplatformplanner
from director import robotstate
from director import segmentation
from director import sitstandplanner
from director.timercallback import TimerCallback
from director import visualization as vis
from director import planplayback
from director import lcmUtils
from director.uuidutil import newUUID
import os
import functools
import numpy as np
import scipy.io
import vtkAll as vtk
import bot_core as lcmbotcore
from director.tasks.taskuserpanel import TaskUserPanel
import director.tasks.robottasks as rt
from director import filterUtils
from director import ioUtils
import director
from numpy import array
class CourseModel(object):
def __init__(self):
pose = transformUtils.poseFromTransform(vtk.vtkTransform())
self.pointcloud = ioUtils.readPolyData(director.getDRCBaseDir() + '/software/models/rehearsal_pointcloud.vtp')
self.pointcloudPD = vis.showPolyData(self.pointcloud, 'coursemodel', parent=None)
segmentation.makeMovable(self.pointcloudPD, transformUtils.transformFromPose(array([0, 0, 0]), array([ 1.0, 0. , 0. , 0.0])))
self.originFrame = self.pointcloudPD.getChildFrame()
t = transformUtils.transformFromPose(array([-4.39364111, -0.51507392, -0.73125563]), array([ 0.93821625, 0. , 0. , -0.34604951]))
self.valveWalkFrame = vis.updateFrame(t, 'ValveWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-3.31840048, 0.36408685, -0.67413123]), array([ 0.93449475, 0. , 0. , -0.35597691]))
self.drillPreWalkFrame = vis.updateFrame(t, 'DrillPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.24553758, -0.52990939, -0.73255338]), array([ 0.93697004, 0. , 0. , -0.34940972]))
self.drillWalkFrame = vis.updateFrame(t, 'DrillWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.51306835, -0.92994004, -0.74173541 ]), array([-0.40456572, 0. , 0. , 0.91450893]))
self.drillWallWalkFarthestSafeFrame = vis.updateFrame(t, 'DrillWallWalkFarthestSafe', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-2.5314524 , -0.27401861, -0.71302976]), array([ 0.98691519, 0. , 0. , -0.16124022]))
self.drillWallWalkBackFrame = vis.updateFrame(t, 'DrillWallWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-1.16122318, 0.04723203, -0.67493468]), array([ 0.93163145, 0. , 0. , -0.36340451]))
self.surprisePreWalkFrame = vis.updateFrame(t, 'SurprisePreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-0.5176186 , -1.00151554, -0.70650799]), array([ 0.84226497, 0. , 0. , -0.53906374]))
self.surpriseWalkFrame = vis.updateFrame(t, 'SurpriseWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([-0.69100097, -0.43713269, -0.68495922]), array([ 0.98625075, 0. , 0. , -0.16525575]))
self.surpriseWalkBackFrame = vis.updateFrame(t, 'SurpriseWalkBack', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([ 0.65827322, -0.08028796, -0.77370834]), array([ 0.94399977, 0. , 0. , -0.3299461 ]))
self.terrainPreWalkFrame = vis.updateFrame(t, 'TerrainPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
t = transformUtils.transformFromPose(array([ 5.47126425, -0.09790393, -0.70504679]), array([ 1., 0., 0., 0.]))
self.stairsPreWalkFrame = vis.updateFrame(t, 'StairsPreWalk', scale=0.2,visible=True, parent=self.pointcloudPD)
self.frameSync = vis.FrameSync()
self.frameSync.addFrame(self.originFrame)
self.frameSync.addFrame(self.pointcloudPD.getChildFrame(), ignoreIncoming=True)
self.frameSync.addFrame(self.valveWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillPreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWallWalkFarthestSafeFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.drillWallWalkBackFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surprisePreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surpriseWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.surpriseWalkBackFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.terrainPreWalkFrame, ignoreIncoming=True)
self.frameSync.addFrame(self.stairsPreWalkFrame, ignoreIncoming=True)
| StarcoderdataPython |
3283570 | <reponame>NLeSC/eEcology-Annotation-WS
# Copyright 2013 Netherlands eScience Center
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from iso8601 import parse_date
from pyramid.view import view_config
logger = logging.getLogger(__package__)
@view_config(route_name="home", renderer="home.mako")
def home(request):
return {}
@view_config(route_name='trackers', renderer='json')
def trackers(request):
"""Returns a list of tracker identifiers the user has access to"""
cur = request.db.cursor()
return {'trackers': fetch_trackers(cur)}
def fetch_trackers(cur):
cur.execute("""
SELECT DISTINCT device_info_serial as id
FROM gps.ee_tracker_limited
JOIN gps.ee_track_session_limited USING (device_info_serial)
ORDER BY device_info_serial
""")
return list(cur)
def fetch_track(cur, tracker_id, start, end):
# TODO accelartion freq is hardcoded
freq = 20.0
sql2 = """
SELECT
timezone('zulu', date_time) date_time
, round(s.latitude::numeric, 5) lat
, round(s.longitude::numeric, 5) lon
, s.altitude
, s.altitude altitude_asl
, s.altitude - s.altitude_agl AS ground_elevation
, s.temperature
, round(s.speed_2d::numeric, 5) AS speed
, round((
ST_Length_Spheroid(ST_MakeLine(location, lag(location) over (order by device_info_serial, date_time)), 'SPHEROID["WGS 84",6378137,298.257223563]')
/
EXTRACT(EPOCH FROM (date_time - lag(date_time) over (order by device_info_serial, date_time)))
)::numeric, 5) as tspeed
, round(s.direction, 2) AS idirection
, round(degrees(ST_Azimuth(lag(location) over (order by device_info_serial, date_time), location))::numeric, 2) tdirection
, round(mod(s.direction - lag(s.direction) over (order by device_info_serial, date_time), 180.0), 2) AS delta_idirection
, round(degrees(
ST_Azimuth(location, lead(location) over (order by device_info_serial, date_time)) -
ST_Azimuth(lag(location) over (order by device_info_serial, date_time), location)
)::numeric %% 180.0, 2) AS delta_tdirection
, aa.time_acceleration
, aa.x_acceleration, aa.y_acceleration, aa.z_acceleration
FROM
gps.ee_tracking_speed_limited s
LEFT JOIN
(
SELECT device_info_serial, date_time
, array_agg(round(a.index/%s, 4) ORDER BY date_time, index) time_acceleration
, array_agg(round(((x_acceleration-x_o)/x_s)::numeric, 4) ORDER BY date_time, index) x_acceleration
, array_agg(round(((y_acceleration-y_o)/y_s)::numeric, 4) ORDER BY date_time, index) y_acceleration
, array_agg(round(((z_acceleration-z_o)/z_s)::numeric, 4) ORDER BY date_time, index) z_acceleration
FROM gps.ee_acceleration_limited a
JOIN (
SELECT
DISTINCT device_info_serial
, x_o, x_s
, y_o, y_s
, z_o, z_s
FROM gps.ee_tracker_limited d
) tu USING (device_info_serial)
WHERE
device_info_serial = %s AND date_time BETWEEN %s AND %s
GROUP BY device_info_serial, date_time
) aa USING (device_info_serial, date_time)
WHERE
device_info_serial = %s AND date_time BETWEEN %s AND %s
AND userflag != 1 AND longitude IS NOT NULL
ORDER BY date_time
"""
logger.debug('Fetching track data for id:{0}, start:{1}, end:{2}'.format(tracker_id, start, end))
cur.execute(sql2, (freq, tracker_id, start, end, tracker_id, start, end))
return cur
@view_config(route_name='tracker', renderer='json')
def tracker(request):
"""Returns gps+accel data of tracker in a certain time range"""
cur = request.db.cursor()
tracker_id = int(request.matchdict['id'])
start = parse_date(request.matchdict['start']).isoformat()
end = parse_date(request.matchdict['end']).isoformat()
return fetch_track(cur, tracker_id, start, end)
| StarcoderdataPython |
3300078 | <gh_stars>10-100
class Facade:
pass | StarcoderdataPython |
84207 | import discord
from discord.ext import commands
async def fetchUser(client: commands.Bot, user: discord.User or str = None) -> discord.User:
if(user == None):
user = await client.fetch_user(client.user.id)
else:
try:
user = await client.fetch_user(user)
except:
user = user
return user
async def pretRes(msg: commands.Context, content: str, color: str= "0x000000", title: str=None, author: str()=None, ) -> None:
perms = msg.author.permissions_in(msg.channel).embed_links
if(color == None):
color = 0x000000
if(False):
embed = discord.Embed(description=content, color=color)
if(author != None):
embed.set_author(name=author[0], icon_url=author[1])
await msg.reply(embed=embed)
else:
await msg.reply(content, mention_author=False) | StarcoderdataPython |
3337805 | <filename>tayne/tayne.py
"""
A script that identified bots mf
"""
# coding: utf-8
# !/usr/bin/python3
# Author: <NAME>
# License: Please see the license file in this repo
# First Create Date: 28-June-2018
# Requirements: minimal. check requirements.txt and run pip/pip3 install -f requirements.txt
# imports section
import base64
import requests
import argparse
import markovify
from pprint import pprint
from statistics import mean
import markovgen
import pandas as pd
from twitter_scraper import get_tweets
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
from bs4 import BeautifulSoup
import datetime
# globals
__version__ = "0.1.0"
logo = """
TAYNE TAYNE TAYNE TTTTT AAAA YYY NN EE
┌ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─ ─
│
│ ***** ███
***** ***** │██
│ ■■■■■■■ ■ ■ *** *** ███
■ ■ ■ ■ ■ ■■■■ * ***** ** │██
│ ■ ■ ■■■■ ■■■■ ■ ***** ******* ███
■ ■■ ■■ ■ ■■ ■■■■■ ***** ****** * │██
│ ■ ■ ■■ ■ ■ ■ ***** * ███
■ ■ ■■■■ **** ** │██
│ ** ***** * ███
** ****** * │██
│ ** ** ███
** ** │██
│ ******* ███
│██
└ ─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─█─███
████████████████████████████████████████████████████
████████████████████████████████████████████████████
"""
itunes_url_endpoint = 'https://itunes.apple.com/search?term={}&country=us&entity={}'
that_url='aHR0cHM6Ly9tb2JpbGUudHdpdHRlci5jb20='
# arguments
parser = argparse.ArgumentParser(description='collects and processes itunes data including ibook, application, and other store items with metadata, run "python3 test_itunize.py', formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-u', '--username', help='username to search', default='jamescampbell', required=False)
parser.add_argument('-n', '--no-logo', dest='logo_off', help='disables printing logo', action='store_true', default=False)
parser.add_argument('-t', '--test', help='prints out james campbell data', action='store_true', default=False)
parser.add_argument('-v', '--verbose', help='print more stuff', action='store_true')
args = parser.parse_args()
# functions section
def get_content(username):
"""Get data."""
if username:
tweets = [t for t in get_tweets(username, pages=25)]
else:
tweets = ['error', 'no username set properly']
return tweets
def get_mean(jsondata):
"""Get average of list of items using numpy."""
if len(jsondata['results']) > 1:
return mean([float(price.get('price')) for price in jsondata['results'] if 'price' in price]) # key name from itunes
# [a.get('a') for a in alist if 'a' in a]
else:
return float(jsondata['results'][0]['price'])
def get_other_mean(data):
if len(data) > 1:
return mean(data)
# main section
def main():
"""Main function that runs everything."""
if not args.logo_off: # print or not print logo
print(logo)
if args.test:
#r = requests.get(base64.b64decode(that_url).decode('utf8')+'/jamescampbell')
tweets = '\n'.join([t['text'] for t in get_tweets('jamescampbell', pages=20)])
if args.verbose:
text_model = markovify.Text(tweets)
print(text_model.make_short_sentence(140))
exit()
#print(r.text)
exit()
else:
tweets = get_content(args.username)
if args.verbose:
tweetbreak = []
print(f"Total found: {len(tweets)}")
print(f"First tweet {tweets[0]['time']}")
for idx, tweet in enumerate(tweets):
timeone = tweet['time']
try:
timetwo = (tweets[idx+1]['time'])
except:
timetwo = tweet['time']
#print(timetwo)
tdelta = timeone - timetwo
#print(tdelta.total_seconds())
tweetbreak.append(tdelta.total_seconds())
# print(tweetbreak[0])
print("Average time between tweets: {} minutes".format(get_other_mean(tweetbreak)/60))
exit()
jsondata = request_response.json()
# [trend['name'] for trend in the_data[0]['trends']]
print()
if args.print_me: # if we are running a test or not
print('json data:')
pprint(jsondata)
print('fields available:')
for k,v in jsondata['results'][0].items():
print(k)
exit('thanks for trying')
average_price = get_mean(jsondata)
print("The average price of the \033[94m{0}\033[0m items matching search term\033[92m {1}\033[0m: ${2:.2f}".format(jsondata['resultCount'], args['search_term'], average_price))
if args.output_table: # if we want to output a table instead of json
print(pd.DataFrame(jsondata['results'], columns=["price", "artistName", "trackName"]))
else:
with open('{}.json'.format(args['search_term']), 'w') as f:
f.write(''.join(str(x) for x in [request_response.json()]))
exit('file saved as {}.json'.format(args['search_term']))
if __name__ == "__main__":
main() | StarcoderdataPython |
1664007 | <reponame>apmcleod/harmonic-inference
"""Models that generate probability distributions over chord classifications of a given input."""
from abc import ABC, abstractmethod
from typing import Any, Collection, Dict, List, Tuple, Union
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from torch.utils.data import DataLoader
from tqdm import tqdm
import pytorch_lightning as pl
from harmonic_inference.data.chord import get_chord_vector_length
from harmonic_inference.data.data_types import ChordType, PieceType, PitchType
from harmonic_inference.data.datasets import ChordClassificationDataset
from harmonic_inference.data.note import get_note_vector_length
class ChordClassifierModel(pl.LightningModule, ABC):
"""
The base type for all Chord Classifier Models, which take as input sets of frames from Pieces,
and output chord probabilities for them.
"""
def __init__(
self,
input_type: PieceType,
input_pitch: PitchType,
output_pitch: PitchType,
reduction: Dict[ChordType, ChordType],
use_inversions: bool,
learning_rate: float,
transposition_range: Union[List[int], Tuple[int, int]],
):
"""
Create a new base ChordClassifierModel with the given input and output formats.
Parameters
----------
input_type : PieceType
The type of piece that the input data is coming from.
input_pitch : PitchType
What pitch type the model is expecting for notes.
output_pitch : PitchType
The pitch type to use for outputs of this model.
reduction : Dict[ChordType, ChordType]
The reduction used for the output chord types.
use_inversions : bool
Whether to use different inversions as different chords in the output.
learning_rate : float
The learning rate.
transposition_range : Union[List[int], Tuple[int, int]]
Minimum and maximum bounds by which to transpose each note and chord of the
dataset. Each __getitem__ call will return every possible transposition in this
(min, max) range, inclusive on each side. The transpositions are measured in
whatever PitchType is used in the dataset.
"""
super().__init__()
self.INPUT_TYPE = input_type
self.INPUT_PITCH = input_pitch
self.OUTPUT_PITCH = output_pitch
self.reduction = reduction
self.use_inversions = use_inversions
self.transposition_range = transposition_range
self.lr = learning_rate
def get_dataset_kwargs(self) -> Dict[str, Any]:
"""
Get a kwargs dict that can be used to create a dataset for this model with
the correct parameters.
Returns
-------
dataset_kwargs : Dict[str, Any]
A keyword args dict that can be used to create a dataset for this model with
the correct parameters.
"""
return {
"reduction": self.reduction,
"use_inversions": self.use_inversions,
"transposition_range": self.transposition_range,
}
def get_output(self, batch):
notes = batch["inputs"].float()
notes_lengths = batch["input_lengths"]
outputs = self(notes, notes_lengths)
return F.softmax(outputs, dim=-1)
def training_step(self, batch, batch_idx):
notes = batch["inputs"].float()
notes_lengths = batch["input_lengths"]
targets = batch["targets"].long()
outputs = self(notes, notes_lengths)
loss = F.cross_entropy(outputs, targets, ignore_index=-1)
self.log("train_loss", loss)
return loss
def validation_step(self, batch, batch_idx):
notes = batch["inputs"].float()
notes_lengths = batch["input_lengths"]
targets = batch["targets"].long()
outputs = self(notes, notes_lengths)
mask = targets != -1
outputs = outputs[mask]
targets = targets[mask]
if len(targets) > 0:
acc = 100 * (outputs.argmax(-1) == targets).sum().float() / len(targets)
loss = F.cross_entropy(outputs, targets, ignore_index=-1)
self.log("val_loss", loss)
self.log("val_acc", acc)
def evaluate(self, dataset: ChordClassificationDataset):
dl = DataLoader(dataset, batch_size=dataset.valid_batch_size)
total = 0
total_loss = 0
total_acc = 0
for batch in tqdm(dl, desc="Evaluating CCM"):
notes = batch["inputs"].float()
notes_lengths = batch["input_lengths"]
targets = batch["targets"].long()
batch_count = len(targets)
outputs = self(notes, notes_lengths)
loss = F.cross_entropy(outputs, targets)
acc = 100 * (outputs.argmax(-1) == targets).sum().float() / len(targets)
total += batch_count
total_loss += loss * batch_count
total_acc += acc * batch_count
return {
"acc": (total_acc / total).item(),
"loss": (total_loss / total).item(),
}
@abstractmethod
def init_hidden(self, batch_size: int) -> Tuple[Variable, ...]:
"""
Get initial hidden layers for this model.
Parameters
----------
batch_size : int
The batch size to initialize hidden layers for.
Returns
-------
hidden : Tuple[Variable, ...]
A tuple of initialized hidden layers.
"""
raise NotImplementedError()
def configure_optimizers(self):
optimizer = torch.optim.Adam(
self.parameters(), lr=self.lr, betas=(0.9, 0.999), eps=1e-08, weight_decay=0.001
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, factor=0.5)
return [optimizer], [{"scheduler": scheduler, "monitor": "val_loss"}]
class SimpleChordClassifier(ChordClassifierModel):
"""
The most simple chord classifier, with layers:
1. Bi-LSTM
2. Linear layer
3. Dropout
4. Linear layer
"""
def __init__(
self,
input_type: PieceType,
input_pitch: PitchType,
output_pitch: PitchType,
reduction: Dict[ChordType, ChordType] = None,
use_inversions: bool = True,
transposition_range: Union[List[int], Tuple[int, int]] = (0, 0),
lstm_layers: int = 1,
lstm_hidden_dim: int = 128,
hidden_dim: int = 128,
dropout: float = 0.0,
learning_rate: float = 0.001,
):
"""
Create a new SimpleChordClassifier.
Parameters
----------
input_type : PieceType
The type of piece that the input data is coming from.
input_pitch : PitchType
What pitch type the model is expecting for notes.
output_pitch : PitchType
The pitch type to use for outputs of this model. Used to derive the output length.
reduction : Dict[ChordType, ChordType]
The reduction used for the output chord types.
transposition_range : Union[List[int], Tuple[int, int]]
Minimum and maximum bounds by which to transpose each note and chord of the
dataset. Each __getitem__ call will return every possible transposition in this
(min, max) range, inclusive on each side. The transpositions are measured in
whatever PitchType is used in the dataset.
use_inversions : bool
Whether to use different inversions as different chords in the output. Used to
derive the output length.
lstm_layers : int
The number of Bi-LSTM layers to use.
lstm_hidden_dim : int
The size of each LSTM layer's hidden vector.
hidden_dim : int
The size of the output vector of the first linear layer.
dropout : float
The dropout proportion of the first linear layer's output.
learning_rate : float
The learning rate.
"""
super().__init__(
input_type,
input_pitch,
output_pitch,
reduction,
use_inversions,
learning_rate,
transposition_range,
)
self.save_hyperparameters()
# Input and output derived from pitch_type and use_inversions
self.input_dim = get_note_vector_length(input_pitch)
self.num_classes = get_chord_vector_length(
output_pitch,
one_hot=True,
relative=False,
use_inversions=use_inversions,
pad=False,
reduction=reduction,
)
# LSTM hidden layer and depth
self.lstm_hidden_dim = lstm_hidden_dim
self.lstm_layers = lstm_layers
self.lstm = nn.LSTM(
self.input_dim,
self.lstm_hidden_dim,
num_layers=self.lstm_layers,
bidirectional=True,
batch_first=True,
)
# Linear layers post-LSTM
self.hidden_dim = hidden_dim
self.dropout = dropout
self.fc1 = nn.Linear(2 * self.lstm_hidden_dim, self.hidden_dim) # 2 because bi-directional
self.fc2 = nn.Linear(self.hidden_dim, self.num_classes)
self.dropout1 = nn.Dropout(self.dropout)
def init_hidden(self, batch_size: int) -> Tuple[Variable, Variable]:
"""
Initialize the LSTM's hidden layer for a given batch size.
Parameters
----------
batch_size : int
The batch size.
"""
return (
Variable(
torch.zeros(
2 * self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device
)
),
Variable(
torch.zeros(
2 * self.lstm_layers, batch_size, self.lstm_hidden_dim, device=self.device
)
),
)
def forward(self, notes, lengths):
# pylint: disable=arguments-differ
batch_size = notes.shape[0]
lengths = torch.clamp(lengths, min=1).cpu()
h_0, c_0 = self.init_hidden(batch_size)
packed_notes = pack_padded_sequence(notes, lengths, enforce_sorted=False, batch_first=True)
lstm_out_packed, (_, _) = self.lstm(packed_notes, (h_0, c_0))
lstm_out_unpacked, lstm_out_lengths = pad_packed_sequence(lstm_out_packed, batch_first=True)
# Reshape lstm outs
lstm_out_forward, lstm_out_backward = torch.chunk(lstm_out_unpacked, 2, 2)
# Get lengths in proper format
lstm_out_lengths_tensor = (
lstm_out_lengths.unsqueeze(1).unsqueeze(2).expand((-1, 1, lstm_out_forward.shape[2]))
).to(self.device)
last_forward = torch.gather(lstm_out_forward, 1, lstm_out_lengths_tensor - 1).squeeze()
last_backward = lstm_out_backward[:, 0, :]
lstm_out = torch.cat((last_forward, last_backward), 1)
relu1 = F.relu(lstm_out)
fc1 = self.fc1(relu1)
relu2 = F.relu(fc1)
drop1 = self.dropout1(relu2)
output = self.fc2(drop1)
return output
class TranspositionInvariantCNNClassifier(nn.Module):
"""
A transposition invariant CNN takes as input some (batch x num_input_channels x
pitch_vector_length) matrix and classifies it in a transpositional invariant way.
The last dimension should go along some representation of "pitches" such that a circular
convolution along this dimension will represent transpositions of the input representation.
The output channels of the convolutional layer are then fed into identical copies of the same
feed-forward network.
Parameters
----------
num_chord_types : int
The number of chord types for the network to output per root.
num_hidden : int
The number of hidden layers to use.
hidden_size : int
The number of nodes in the input layer and each hidden layer.
batch_norm : boolean
True to include batch normalization after the activation function of
the input layer and each hidden layer.
dropout : float
The percentage of nodes in the input layer and each hidden layer to
dropout. This is applied after activation (and before batch normalization
if batch_norm is True, although it is not recommended to use both).
"""
def __init__(
self,
num_chord_types,
num_input_channels=1,
pitch_vector_length=12,
num_conv_channels=10,
num_hidden=1,
hidden_size=100,
batch_norm=False,
dropout=0.0,
):
super().__init__()
# Convolutional layer
self.num_input_channels = num_input_channels
self.pitch_vector_length = pitch_vector_length
self.num_conv_channels = num_conv_channels
self.conv = nn.Conv1d(
self.num_input_channels,
self.num_conv_channels,
self.pitch_vector_length,
padding=self.pitch_vector_length,
padding_mode="circular",
)
# Parallel linear layers
self.num_chord_types = num_chord_types
self.num_hidden = num_hidden
self.hidden_size = hidden_size
self.batch_norm = batch_norm
self.dropout = dropout
self.input = nn.Linear(num_conv_channels, hidden_size)
self.linear = nn.ModuleList(
[nn.Linear(hidden_size, hidden_size) for i in range(num_hidden)]
)
self.output = nn.Linear(hidden_size, num_chord_types)
if batch_norm:
self.batch_norms = nn.ModuleList(
[nn.BatchNorm1d(hidden_size) for i in range(num_hidden + 1)]
)
else:
self.batch_norms = nn.ModuleList([None] * (num_hidden + 1))
self.dropouts = nn.ModuleList([nn.Dropout(dropout) for i in range(num_hidden + 1)])
def forward(self, data):
# Conv layer
conv = F.relu(self.conv(data.unsqueeze(1)))
# Parallel linear layers
parallel_in = conv.reshape(conv.shape[0] * 12, -1)
# Input layer
parallel = self.dropouts[0](F.relu(self.input(parallel_in)))
if self.batch_norms[0] is not None:
parallel = self.batch_norms[0](parallel)
# Hidden layers
for layer, dropout, batch_norm in zip(self.linear, self.dropouts[1:], self.batch_norms[1:]):
parallel = dropout(F.relu(layer(parallel)))
if batch_norm is not None:
parallel = batch_norm(parallel)
# Output layer
parallel_out = F.relu(self.output(parallel))
# Final output combination
output = parallel_out.reshape(parallel_out.shape[0] / 12, -1)
return output
class TransformerEncoder(nn.Module):
"""
This model encodes a given input into a defined chord representation.
Parameters
----------
"""
def __init__(self):
super().__init__()
self.todo = True
def forward(self, data):
# pylint: disable=arguments-differ
pass
class MusicScoreJointModel(nn.Module):
"""
This model is a combination of an chord encoder (e.g., TransformerEncoder) and a
chord classifier (e.g., TranspositionInvariantCNNClassifier). The output of the encoder is
fed into the classifier.
Parameters
----------
encoder : nn.Module
The chord encoder model.
classifier : nn.Module
The chord classifier model.
"""
def __init__(self, encoder: nn.Module, classifier: nn.Module):
super().__init__()
self.encoder = encoder
self.classifier = classifier
def forward(self, data: torch.tensor, stages: Collection[int]) -> torch.tensor:
# pylint: disable=arguments-differ
"""
Forward pass one or both modules.
Parameters
----------
data : torch.tensor
A batch-first representation of the input data for the forward pass.
stages : list
A list of what stages to perform. If 0 is in the list, use the encoder.
If 1 is in the list, use the classifier.
"""
if 0 in stages:
data = self.encoder.forward(data)
if 1 in stages:
data = self.classifier.forward(data)
return data
| StarcoderdataPython |
1782887 | from builtins import super
from django.contrib import messages
from django.contrib.auth.decorators import user_passes_test
from django.contrib.auth.models import User
from django.contrib.messages.views import SuccessMessageMixin
from django.db import transaction
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render, redirect
from django.urls import reverse_lazy, reverse
from django.utils.decorators import method_decorator
from django.views.generic import CreateView, UpdateView, ListView, DetailView, DeleteView
from adminapp.forms import SubspaceFormSetUpdate, SubspaceFormSet, SetupForm, SetupUpdateForm
from adminapp.models import Setup, Dataset, Session, Iteration
from adminapp.services import DatasetService, SetupService, SessionService
class DatasetCreateView(CreateView):
"""
View to create the dataset objects
Attributes:
template_name template to be used
fields fields to be shown to the user
model the object to be created
"""
model = Dataset
fields = ["name", "description", "type", "feature_file", "raw_file"]
template_name = 'dataset_create.html'
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
return context
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(DatasetCreateView, self).dispatch(*args, **kwargs)
def form_valid(self, form):
response = super(DatasetCreateView, self).form_valid(form)
self.object = form.save()
id = self.object.id
_dataset_service = DatasetService()
_dataset_service.save_dataset_info(id)
messages.success(self.request, 'Dataset has been created successfully!')
return response
class DatasetUpdateView(SuccessMessageMixin, UpdateView):
"""
View to update the dataset objects
Attributes:
template_name template to be used
fields fields to be shown to the user
model the object to be created
success_message message to be returned when dataset is updated
"""
model = Dataset
fields = ["name", "description"]
template_name = "dataset_update.html"
success_message = 'Dataset was updated successfully!'
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(DatasetUpdateView, self).dispatch(*args, **kwargs)
class DatasetListView(ListView):
"""
View to list the dataset objects
Attributes:
template_name template to be used
model the object to be listed
paginate_by number of objects on each page
queryset which objects must be shown
context_object_name objects to be listed
"""
model = Dataset
template_name = 'dataset_list.html'
context_object_name = 'datasets'
paginate_by = 20
queryset = Dataset.objects.all()
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(DatasetListView, self).dispatch(*args, **kwargs)
class DatasetDetailView(DetailView):
"""
View to show the dataset profile
Attributes:
template_name template to be used
model the object to be created
context_object_name objects to be shown
"""
model = Dataset
template_name = 'dataset_profile.html'
context_object_name = 'dataset'
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(DatasetDetailView, self).dispatch(*args, **kwargs)
class DatasetDeleteView(DeleteView):
"""
View to delete the dataset objects
Attributes:
template_name template to be used
model the object to be created
success_url url to be redirected to when success
success_message message to be shown to when success
"""
model = Dataset
template_name = 'dataset_delete.html'
success_url = reverse_lazy('dataset-list')
success_message = "Dataset %(name)s was removed successfully!"
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(DatasetDeleteView, self).dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
obj = self.get_object()
messages.success(self.request, self.success_message % obj.__dict__)
return super(DatasetDeleteView, self).delete(request, *args, **kwargs)
class SetupCreateView(CreateView):
"""
View to create the setup objects
Attributes:
template_name template to be used
fields fields to be shown to the user
model the object to be created
success_url url to be redirected to when success
"""
model = Setup
form_class = SetupForm
template_name = 'setup_create.html'
success_url = reverse_lazy('setup-detail')
def get_context_data(self, **kwargs):
"""
Overrides the method to get the list of features for the subspace selection.
:param kwargs:
:return: list of features for the subspace selection.
"""
data = super(SetupCreateView, self).get_context_data(**kwargs)
if self.request.POST:
data['subspace_formset'] = SubspaceFormSet(self.request.POST)
else:
data['subspace_formset'] = SubspaceFormSet()
return data
def form_valid(self, form):
"""
Overrides the method to save the gridpoints data and subspace objects.
"""
context = self.get_context_data()
subspace_formset = context['subspace_formset']
_setup_service = SetupService()
with transaction.atomic():
self.object = form.save()
if subspace_formset.is_valid():
subspace_formset.instance = self.object
subspace_formset.save()
messages.success(self.request, 'Setup ' + self.object.name + ' has been created successfully!')
return super(SetupCreateView, self).form_valid(form)
def get_success_url(self):
return self.object.get_absolute_url()
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SetupCreateView, self).dispatch(*args, **kwargs)
class SetupUpdateView(SuccessMessageMixin, UpdateView):
"""
View to update the setup objects
Attributes:
template_name template to be used
model the object to be created
is_update_view boolean to determine that this is an update view
success_message message to be shown when success
success_url url to be redirected to when success
"""
model = Setup
form_class = SetupUpdateForm
is_update_view = True
template_name = "setup_update.html"
success_message = 'Setup was updated successfully!'
success_url = reverse_lazy('setup-detail')
def get_context_data(self, **kwargs):
"""
Overrides the method to get the list of features for the subspace selection.
:return: list of features for the subspace selection.
"""
data = super(SetupUpdateView, self).get_context_data(**kwargs)
if self.request.POST:
data['subspace_formset'] = SubspaceFormSetUpdate(self.request.POST, instance=self.get_object())
else:
data['subspace_formset'] = SubspaceFormSetUpdate(instance=self.get_object())
return data
def form_valid(self, form):
"""
Overrides the method to save the gridpoints data and subspace objects.
"""
context = self.get_context_data()
subspace_formset = context['subspace_formset']
with transaction.atomic():
self.object = form.save()
if subspace_formset.is_valid():
subspace_formset.instance = self.object
subspace_formset.save()
return super(SetupUpdateView, self).form_valid(form)
def get_success_url(self):
return self.object.get_absolute_url()
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SetupUpdateView, self).dispatch(*args, **kwargs)
class SetupListView(ListView):
"""
View to list the setup objects
Attributes:
template_name template to be used
model the object to be listed
paginate_by number of objects on each page
queryset which objects must be shown
context_object_name objects to be listed
"""
model = Setup
template_name = 'setup_list.html'
context_object_name = 'setups'
paginate_by = 20
queryset = Setup.objects.all()
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SetupListView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
dataset = str(request.GET.get('dataset_id', 'all'))
self.extra_context = {}
try:
if dataset != 'all' and Dataset.objects.filter(id=dataset).count() > 0:
self.queryset = Setup.objects.filter(dataset_id_id=dataset)
self.extra_context = {"dataset_obj": Dataset.objects.get(id=dataset)}
except Exception as e:
pass
datasets = set()
for setup in Setup.objects.all():
datasets.add(setup.dataset_id)
self.extra_context["datasets"] = datasets
return super(SetupListView, self).get(self, request, *args, **kwargs)
class SetupDetailView(SuccessMessageMixin, DetailView):
"""
View to show the setup profile
Attributes:
template_name template to be used
model the object to be created
context_object_name objects to be shown
"""
model = Setup
template_name = 'setup_profile.html'
context_object_name = 'setup'
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SetupDetailView, self).dispatch(*args, **kwargs)
def post(self, request, pk):
"""
Handles the buttons of the view that produce a POST request:
- Start Experiment: Calls the _setup_service method set_final.
- Clone: Calls the _setup_service method clone_setup.
- Export : Calls _setup_service method export_all
"""
_setup_service = SetupService()
setup = self.get_object()
if request.POST.get("action") == "start_experiment":
success_msg = _setup_service.set_final(setup.id)
if success_msg == "success":
messages.success(self.request,
'Setup ' + setup.name + ' has been started.'
' Now you can invite participants from Create Experiment Page')
return redirect("/adminapp/" + "experiments/new/" + "setup=" + str(setup.id))
else:
messages.warning(self.request, success_msg)
return HttpResponseRedirect(setup.get_absolute_url())
elif request.POST.get("action") == "clone":
clone = _setup_service.clone_setup(setup.id)
messages.success(self.request,
'Setup ' + setup.name + ' has been successfully cloned.'
' Please do not forget to change its name for consistency.')
return HttpResponseRedirect(clone.get_absolute_url())
elif request.POST.get("action") == "send_invitation":
return redirect("/adminapp/" + "experiments/new/" + "setup=" + str(setup.id))
elif request.POST.get('action') == "export_all":
response = HttpResponse(_setup_service.export_all(setup.id), content_type='application/json')
response['Content-Disposition'] = 'attachment; filename=result_setup' + str(setup.id) + '.json'
return response
class SetupDeleteView(DeleteView):
"""
View to delete the setup objects
Attributes:
template_name template to be used
model the object to be created
success_url url to be redirected to when success
success_message message to be shown to when success
"""
model = Setup
template_name = 'setup_delete.html'
success_url = reverse_lazy('setup-list')
success_message = "Setup %(name)s was removed successfully!"
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SetupDeleteView, self).dispatch(*args, **kwargs)
def delete(self, request, *args, **kwargs):
obj = self.get_object()
messages.success(self.request, self.success_message % obj.__dict__)
return super(SetupDeleteView, self).delete(request, *args, **kwargs)
class SetupInviteUsersView(ListView):
"""
View to create the session objects
Attributes:
template_name template to be used
model the object to be created
paginate_by number of users to be shown
queryset which users should be shown
context_object_name objects to be listed
"""
model = User
template_name = 'session_create.html'
context_object_name = 'users'
paginate_by = 20
queryset = Setup.objects.all()
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SetupInviteUsersView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
users = User.objects.filter(is_superuser=False)
setups = Setup.objects.filter(status="final")
context = {
'users': users,
'setups': setups,
}
if 'setuppk' in kwargs:
context['selected_setup'] = Setup.objects.get(id=kwargs['setuppk'])
return render(request, 'session_create.html', context)
def post(self, request, *args, **kwargs):
session_service = SessionService()
setup_id = request.POST['setup_id']
users = request.POST.getlist('user_ids')
users_list = []
for user in users:
users_list.append(User.objects.get(id=user).username)
users_list_string = ', '.join(users_list)
success_message = "Invitation(s) has been successfully sent to: " + users_list_string
for user in users:
user_id = user
session_service.create_inactive_session_from_invitation(setup_id, user_id)
messages.success(self.request, success_message)
return HttpResponseRedirect(reverse('session-list'))
class SetupFinishedSessionsView(ListView):
"""
View to show the session objects with finished status
Attributes:
template_name template to be used
model the object to be shown
paginate_by number of users to be shown
context_object_name objects to be listed
"""
model = Session
template_name = 'finished_sessions_list.html'
context_object_name = 'sessions'
paginate_by = 20
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SetupFinishedSessionsView, self).dispatch(*args, **kwargs)
def get_queryset(self):
"""
Overrides the method to get the list of finished sessions of the selected setup by
calling the _session_service method get_finished_sessions_of_setup.
:return: finished_sessions
"""
session_service = SessionService()
setup_id = self.kwargs['pk']
finished_sessions = session_service.get_finished_sessions_for_setup(setup_id)
return finished_sessions
class SessionListView(ListView):
"""
View to show all session objects
Attributes:
template_name template to be used
model the object to be shown
paginate_by number of users to be shown
context_object_name objects to be listed
queryset which objects to be listed
"""
model = Session
template_name = 'session_list.html'
context_object_name = 'sessions'
paginate_by = 20
queryset = Session.objects.all()
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SessionListView, self).dispatch(*args, **kwargs)
def get(self, request, *args, **kwargs):
status = request.GET.get('status', 'all')
if status != 'all':
self.queryset = Session.objects.filter(status=status)
self.extra_context = {"selected_status": status}
return super(SessionListView, self).get(self, request, *args, **kwargs)
class SessionDetailView(DetailView):
"""
View to show the session profile
Attributes:
template_name template to be used
model the object to be created
context_object_name objects to be shown
"""
model = Session
template_name = 'session_detail.html'
context_object_name = 'session'
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SessionDetailView, self).dispatch(*args, **kwargs)
def post(self, request, **kwargs):
"""
Handles the buttons of the view that produce a POST request:
Accept
Export
"""
session_service = SessionService()
session_id = self.get_object().id # get the session id from the request
if request.POST.get('action') == "accept":
session_service.set_accepted(session_id)
return HttpResponseRedirect(self.get_object().get_absolute_url())
elif request.POST.get('action') == "export":
response = HttpResponse(session_service.export_session_results(session_id), content_type='application/json')
response['Content-Disposition'] = 'attachment; filename=result_session' + str(session_id) + '.json'
return response
class SessionDeleteView(DeleteView):
"""
View to delete the session objects
Attributes:
template_name template to be used
model the object to be created
success_url url to be redirected to when success
success_message message to be shown to when success
"""
model = Session
template_name = 'session_delete.html'
success_url = reverse_lazy('session-list')
success_message = "Session was removed successfully!"
@method_decorator(user_passes_test(lambda u: u.is_superuser))
def dispatch(self, *args, **kwargs):
return super(SessionDeleteView, self).dispatch(*args, **kwargs)
| StarcoderdataPython |
1625230 | #!/usr/bin/env python3
# Copyright (c) 2020 Bitcoin Association
# Distributed under the Open BSV software license, see the accompanying file LICENSE.
#
# Test merkle proof requests and validation
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import connect_nodes, assert_equal, Decimal, assert_raises_rpc_error, sync_blocks, random, assert_greater_than
import os, shutil
class MerkleProofTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 2
self.setup_clean_chain = True
# On first node set preferred data file size to 30 kB
# On second node -txindex is set
self.extra_args = [["-preferredmerkletreefilesize=30720"], ["-txindex"]]
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes, 0, 1)
self.sync_all()
def setup_nodes(self):
self.add_nodes(self.num_nodes, self.extra_args)
# change rpc_timeout for node0 to avoid getting timeout on rpc generate when generating
# large number of blocks.
self.nodes[0].rpc_timeout = 300
self.start_nodes()
def check_equivalence(self, a, b):
if a['target']['hash'] != b['target']:
return False
for ax, bx in zip(a['nodes'], b['nodes']):
if ax != bx:
return False
return True
def verify_merkle_proof(self, txid, blockhash, node):
a1 = self.nodes[node].getmerkleproof(txid)
a2 = self.nodes[node].getmerkleproof(txid, blockhash)
b1 = self.nodes[node].getmerkleproof2("", txid)
b2 = self.nodes[node].getmerkleproof2(blockhash,txid)
assert self.nodes[node].verifymerkleproof(a1)
assert self.nodes[node].verifymerkleproof(a2)
assert(self.check_equivalence(a1, b1))
assert(self.check_equivalence(a2, b2))
c1 = self.nodes[node].getmerkleproof2("",txid, False, "merkleroot")
c2 = self.nodes[node].getmerkleproof2(blockhash,txid, False, "merkleroot")
assert(c1["target"] == a1["target"]["merkleroot"])
assert(c2["target"] == a2["target"]["merkleroot"])
d1 = self.nodes[node].getmerkleproof2("",txid, False, "hash")
d2 = self.nodes[node].getmerkleproof2(blockhash,txid, False, "hash")
assert(d1["target"] == a1["target"]["hash"])
assert(d2["target"] == a2["target"]["hash"])
assert(d2["target"] == blockhash)
e1 = self.nodes[node].getmerkleproof2("", txid, False, "header")
e2 = self.nodes[node].getmerkleproof2(blockhash, txid, False, "header")
current_blockhash = d1["target"]
blockheader_func = self.nodes[node].getblockheader(current_blockhash, False)
blockheader_field = e1["target"]
assert(blockheader_func == blockheader_field)
blockheader_func = self.nodes[node].getblockheader(blockhash, False)
blockheader_field = e2["target"]
assert(blockheader_func == blockheader_field)
# Calculate Merkle tree size in bytes
def merkle_tree_size(self, number_of_transactions):
merkle_tree_size = 0
while number_of_transactions > 0:
merkle_tree_size += number_of_transactions
number_of_transactions //= 2
# 32 bytes for each hash
merkle_tree_size *= 32
return merkle_tree_size
def verify_stored_data(self, verifyData, node):
for verifyBlockHash in verifyData:
verifyTransactions = verifyData[verifyBlockHash]
for verifyTxid in verifyTransactions:
self.verify_merkle_proof(verifyTxid, verifyBlockHash, node)
def run_test(self):
self.log.info("Mining 500 blocks...")
self.nodes[0].generate(500)
self.sync_all()
assert_equal(self.nodes[1].getblockcount(), 500)
assert_equal(self.nodes[1].getbalance(), 0)
# Create and send two transactions
tx1_in = self.nodes[0].listunspent().pop()
tx1_out = tx1_in["amount"] - Decimal("0.01")
tx1 = self.nodes[0].createrawtransaction([tx1_in], {self.nodes[1].getnewaddress(): tx1_out})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2_in = self.nodes[0].listunspent().pop()
tx2_out = tx2_in["amount"] - Decimal("0.01")
tx2 = self.nodes[0].createrawtransaction([tx2_in], {self.nodes[1].getnewaddress(): tx2_out})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
# Try to get proof for one of the trasaction - should fail because transaction is not yet in a block
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].getmerkleproof, txid1)
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].getmerkleproof2, "", txid1)
# Mine a new block
self.log.info("Mining 501st block...")
self.nodes[0].generate(1)
self.sync_all()
height_of_block_501 = self.nodes[1].getblockcount()
# Check some negative tests on verifymerkleproof
assert_raises_rpc_error(-8, "\"flags\" must be a numeric value", self.nodes[0].verifymerkleproof, {'flags': '2'})
assert_raises_rpc_error(-8, "verifymerkleproof only supports \"flags\" with value 2", self.nodes[0].verifymerkleproof, {'flags': 1})
assert_raises_rpc_error(-8, "\"nodes\" must be a Json array", self.nodes[0].verifymerkleproof,
{'flags':2,
'index':4,
'txOrId':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890',
'target':{'merkleroot':'<KEY>'},
'nodes':'*'})
assert_raises_rpc_error(-8, "\"node\" must be a \"hash\" or \"*\"", self.nodes[0].verifymerkleproof,
{'flags':2,
'index':4,
'txOrId':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890',
'target':{'merkleroot':'<KEY>'},
'nodes':[2]})
assert_raises_rpc_error(-8, "node must be of length 64 (not 10)", self.nodes[0].verifymerkleproof,
{'flags':2,
'index':4,
'txOrId':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890',
'target':{'merkleroot':'abcdef1234567890abcdef1234567890abcdef1234567890abcdef1234567890'},
'nodes':['*','abcdef1234']})
# Get proof for 1st and 2nd transaction and verify that calculated roots are the same as block's merkle root
hash_of_block_501 = self.nodes[0].getblockhash(height_of_block_501)
self.verify_merkle_proof(txid1, hash_of_block_501, 0)
self.verify_merkle_proof(txid2, hash_of_block_501, 0)
# Create and send 3rd transaction
tx_spent = self.nodes[1].listunspent().pop()
tx3_out = tx_spent["amount"] - Decimal("0.01")
tx3 = self.nodes[1].createrawtransaction([tx_spent], {self.nodes[0].getnewaddress(): tx3_out})
txid3 = self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
# Mine a new block
self.log.info("Mining 502nd block...")
self.nodes[0].generate(1)
self.sync_all()
# Get id of spent and unspent transaction
txid_spent = tx_spent["txid"]
txid_unspent = txid1 if txid_spent != txid1 else txid2
# We can't find the block if transaction was spent because -txindex is not set on node[0]
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].getmerkleproof, txid_spent)
assert_raises_rpc_error(-5, "Transaction not yet in block", self.nodes[0].getmerkleproof2, "", txid_spent)
# We can get the proof if we specify proper block hash
a = self.nodes[0].getmerkleproof(txid_spent, hash_of_block_501)
b = self.nodes[0].getmerkleproof2(hash_of_block_501, txid_spent)
assert self.nodes[0].verifymerkleproof(a)
assert(self.check_equivalence(a,b))
# We can't get the proof if we specify a non-existent block
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getmerkleproof, txid_spent, "1234567890abcdef1234567890abcdef")
assert_raises_rpc_error(-5, "Block not found", self.nodes[0].getmerkleproof2, "<KEY>", txid_spent)
# We can get the proof if the transaction is unspent
self.verify_merkle_proof(txid_unspent, hash_of_block_501, 0)
# We can get a proof of a spent transaction without block hash if node runs with -txindex (nodes[1] in this case)
self.verify_merkle_proof(txid_spent, hash_of_block_501, 1)
# Restart nodes
self.log.info("Restarting nodes...")
self.stop_nodes()
self.start_nodes(self.extra_args)
# Repeat tests after nodes restart
self.verify_merkle_proof(txid_unspent, hash_of_block_501, 0)
self.verify_merkle_proof(txid_spent, hash_of_block_501, 1)
hash_of_block_502 = self.nodes[0].getblockhash(height_of_block_501 + 1)
self.verify_merkle_proof(txid3, hash_of_block_502, 0)
# Create more blocks to get utxos
self.log.info("Mining additional 1500 blocks...")
self.nodes[0].generate(1500)
sync_blocks(self.nodes[0:1])
# Use all utxos and create more Merkle Trees
# We create blocks with max 400 transactions (~25 kB for biggest Merkle Tree)
self.log.info("Mining blocks with random transactions using all utxos...")
utxos = self.nodes[0].listunspent()
calculated_merkle_tree_disk_size = 0
verifyData = {}
while len(utxos) > 0:
# Choose random number of transactions
send_transactions = random.randint(1, 400)
if len(utxos) < send_transactions:
send_transactions = len(utxos)
# Send transactions
for i in range(send_transactions):
tx_in = utxos.pop()
tx_out = tx_in["amount"] - Decimal("0.01")
tx = self.nodes[0].createrawtransaction([tx_in], {self.nodes[1].getnewaddress(): tx_out})
txid = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx)["hex"])
# Mine a block
self.nodes[0].generate(1)
sync_blocks(self.nodes[0:1])
# Verify proofs of some random transactions in each block
hash_of_this_block = self.nodes[0].getblockhash(self.nodes[0].getblockcount())
transactions_of_this_block = self.nodes[0].getblock(hash_of_this_block, True)["tx"]
calculated_merkle_tree_disk_size += self.merkle_tree_size(len(transactions_of_this_block))
verifyData[hash_of_this_block] = transactions_of_this_block
# Verify merkle proofs of all transactions in all blocks
self.verify_stored_data(verifyData, 0)
# Data files checks
number_of_data_files = 0
disk_size = 0
node0_data_dir = os.path.join(self.options.tmpdir, "node0", "regtest", "merkle", "")
for data_file in os.listdir(node0_data_dir):
data_file_name = node0_data_dir + data_file
if os.path.isfile(data_file_name):
data_file_size = os.path.getsize(data_file_name)
# No file should be bigger than 30 kB since no Merkle Tree takes more than 25 kB
assert_greater_than(30 * 1024, data_file_size)
disk_size += data_file_size
number_of_data_files += 1
# Verify that Merkle Tree disk size is at least the size of Merkle Trees we just stored
assert_greater_than(disk_size, calculated_merkle_tree_disk_size)
# Number of data files should be at least calculated_merkle_tree_disk_size/preferred_file_size
assert_greater_than(number_of_data_files, calculated_merkle_tree_disk_size/(30 * 1024))
# Delete index to test recreation of index when node is started again
self.log.info("Restarting nodes to remove Merkle Trees index...")
self.stop_nodes()
node0_index_dir = os.path.join(node0_data_dir, "index", "")
shutil.rmtree(node0_index_dir)
self.start_nodes(self.extra_args)
# Repeat merkle proof checks
self.verify_stored_data(verifyData, 0)
# Since index was recreated from data files, requesting existing merkle trees shouldn't create any new data
new_disk_size = 0
for data_file in os.listdir(node0_data_dir):
data_file_name = node0_data_dir + data_file
if os.path.isfile(data_file_name):
new_disk_size += os.path.getsize(data_file_name)
assert_equal(disk_size, new_disk_size)
if __name__ == '__main__':
MerkleProofTest().main()
| StarcoderdataPython |
1600845 | <gh_stars>0
import requests
from bs4 import BeautifulSoup
import pyquery
# http://www.cnblogs.com/Albert-Lee/p/6232745.html
headers = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/54.0.2840.99 Safari/537.36'} #给请求指定一个请求头来模拟chrome浏览器
web_url = 'https://unsplash.com'
r = requests.get(web_url, headers=headers) #像目标url地址发送get请求,返回一个response对象
all_a = BeautifulSoup(r.text, 'lxml').find_all('a', class_='cV68d') #获取网页中的class为cV68d的所有a标签
for a in all_a:
print(a['style']) #循环获取a标签中的style
| StarcoderdataPython |
3351817 | """
GPS Keplerian elements => ECEF
<NAME>, Ph.D.
"""
from datetime import datetime, timedelta
import xarray
import numpy as np
def keplerian2ecef(sv: xarray.DataArray) -> tuple:
"""
based on:
https://ascelibrary.org/doi/pdf/10.1061/9780784411506.ap03
"""
if 'sv' in sv and sv['sv'] in ('R', 'S'):
return sv['X'], sv['Y'], sv['Z']
sv = sv.dropna(dim='time', how='all')
GM = 3.986005e14 # [m^3 s^-2]
omega_e = 7.292115e-5 # [rad s^-1]
# pi = 3.1415926535898 # definition
A = sv['sqrtA']**2
n0 = np.sqrt(GM/A**3) # computed mean motion
# T = 2*pi / n0 # Satellite orbital period
n = n0 + sv['DeltaN'] # corrected mean motion
# from GPS Week 0
t0 = datetime(1980, 1, 6) + timedelta(weeks=sv['GPSWeek'][0].astype(int).item())
tk = np.empty(sv['time'].size, dtype=float)
# FIXME: so ugly...
# time elapsed since reference epoch
# seems to be a bug in MyPy, this line computes "correctly"
for i, (t1, t2) in enumerate(zip(sv['time'], sv['Toe'])):
tsv = datetime.utcfromtimestamp(t1.item()/1e9)
toe = timedelta(seconds=t2.values.astype(int).item()) + t0 # type: ignore # noqa
tk[i] = (tsv - toe).total_seconds() # type: ignore # noqa
Mk = sv['M0'] + n*tk # Mean Anomaly
Ek = Mk + sv['Eccentricity'] * np.sin(Mk) # FIXME: ok?
nuK = 2 * np.arctan2(np.sqrt(1 + sv['Eccentricity']) *
np.sin(Ek/2), np.sqrt(1-sv['Eccentricity']) * np.cos(Ek/2))
PhiK = nuK + sv['omega']
dik = sv['Cic']*np.cos(2*PhiK) + sv['Cis']*np.sin(2*PhiK)
ik = sv['Io'] + sv['IDOT']*tk + dik # corrected inclination
duk = sv['Cuc'] * np.cos(2*PhiK) + sv['Cus']*np.sin(2*PhiK)
uk = PhiK + duk
drk = sv['Crc']*np.cos(2*PhiK) + sv['Crs']*np.sin(2*PhiK)
rk = A*(1-sv['Eccentricity']*np.cos(Ek)) + drk
Xk1 = rk * np.cos(uk)
Yk1 = rk*np.sin(uk)
OmegaK = sv['Omega0'] + (sv['OmegaDot'] - omega_e)*tk - omega_e*sv['Toe']
X = Xk1 * np.cos(OmegaK) - Yk1 * np.sin(OmegaK) * np.cos(ik)
Y = Xk1*np.sin(OmegaK) + Yk1 * np.cos(OmegaK) * np.cos(ik)
Z = Yk1*np.sin(ik)
return X, Y, Z
| StarcoderdataPython |
165843 | # coding=utf-8
# ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from datetime import datetime
import os
from azure.containerregistry import (
ContainerRepositoryClient,
ContainerRegistryClient,
ContainerRegistryUserCredential,
TagProperties,
ContentPermissions,
RegistryArtifactProperties,
)
class ContainerRegistryTestClass(object):
def create_registry_client(self, endpoint):
return ContainerRegistryClient(
endpoint=endpoint,
credential=ContainerRegistryUserCredential(
username=os.environ["CONTAINERREGISTRY_USERNAME"],
password=os.environ["<PASSWORD>"],
),
)
def create_repository_client(self, endpoint, name):
return ContainerRepositoryClient(
endpoint=endpoint,
repository=name,
credential=ContainerRegistryUserCredential(
username=os.environ["CONTAINERREGISTRY_USERNAME"],
password=<PASSWORD>["<PASSWORD>"],
),
)
def assert_content_permission(self, content_perm, content_perm2):
assert isinstance(content_perm, ContentPermissions)
assert isinstance(content_perm2, ContentPermissions)
assert content_perm.can_delete == content_perm.can_delete
assert content_perm.can_list == content_perm.can_list
assert content_perm.can_read == content_perm.can_read
assert content_perm.can_write == content_perm.can_write
def assert_tag(
self,
tag,
created_on=None,
digest=None,
last_updated_on=None,
content_permission=None,
name=None,
registry=None,
repository=None,
):
assert isinstance(tag, TagProperties)
assert isinstance(tag.content_permissions, ContentPermissions)
assert isinstance(tag.created_on, datetime)
assert isinstance(tag.last_updated_on, datetime)
if content_permission:
self.assert_content_permission(tag.content_permission, content_permission)
if created_on:
assert tag.created_on == created_on
if last_updated_on:
assert tag.last_updated_on == last_updated_on
if name:
assert tag.name == name
if registry:
assert tag.registry == registry
if repository:
assert tag.repository == repository
def assert_registry_artifact(self, tag_or_digest, expected_tag_or_digest):
assert isinstance(tag_or_digest, RegistryArtifactProperties)
assert tag_or_digest == expected_tag_or_digest
| StarcoderdataPython |
1638654 | #!/usr/bin/env python
import os
import requests
import sched
import sys
import time
from GetWeather_Data import get_weather_data
from WeatherBot_Auth import authenticate
def arg_check():
"""
Checks to see if enough arguments are passed to the program.
"""
if len(sys.argv) < 2:
print("Error: Not enough arguments.")
from usage import usage
usage()
sys.exit(1)
def get_current_weather(request_type="conditions",\
location="/q/zmw:12180.1.99999"):
"""
Returns a string with current weather information for tweet.
"""
keys_file = sys.argv[1]
twitter, auth, weather_key = authenticate(keys_file)
data = get_weather_data(weather_key, request_type, location)
location = data["current_observation"]["display_location"]["full"]
weahter = data["current_observation"]["weather"]
windchill = data["current_observation"]['windchill_string']
temp = data["current_observation"]['temperature_string']
time = data["current_observation"]["observation_time"]
wind = data["current_observation"]["wind_string"]
humi = data["current_observation"]["relative_humidity"]
icon_url = data["current_observation"]["icon_url"]
icon_file = 'temp.jpg'
request = requests.get(icon_url, stream=True)
if request.status_code == 200:
with open(icon_file, 'wb') as image:
for chunk in request:
image.write(chunk)
tweet_text = location + '\n' \
+ time + '\n' \
+ temp + '\n' \
+ "Feels like " + windchill + '\n'\
+ wind + '\n'
return twitter, icon_file, tweet_text
def tweet():
"""
Tweets from text and image input.
"""
twitter_api, icon_file, tweet_text = get_current_weather()
print(tweet_text)
if len(icon_file) > 0:
twitter_api.update_with_media(filename=icon_file, status=tweet_text)
os.remove(icon_file)
else:
twitter_api.update(status=tweet_text)
def scheduled_tweet(minutes):
"""
Tweets iteratively every given amount of minutes.
"""
s = sched.scheduler(time.time, time.sleep)
s.enter(60*minutes, 1, tweet, ())
s.run()
def run():
"""
Main function to run the program.
update twitter status on timeline.
"""
arg_check()
tweet()
if len(sys.argv) > 2:
minutes = int(sys.argv[2])
while True:
scheduled_tweet(minutes)
# ============================================================================ #
if __name__ == "__main__":
run()
| StarcoderdataPython |
3381809 | <filename>disas.py
#!/usr/bin/env python3
import sys
from cpu_8051 import *
from termcolor import colored
def help():
print("\t*** Intel 8051(basic) disassembler - coded by Fritz (@anarcheuz) ***\n\n")
print(colored("\tSoftware coded for 256k flash dump, append with 0xff if not the case to avoid any inconvenience! \n", 'red'))
print('\tquit|q - quit')
print('\thelp|h - show this help\n')
print('\tx/f <addr> - disassemble until ret|reti is found')
print('\tx/i <addr> <count> - disassemble <count> instructions from <addr>')
print('\tx/x <unit> <addr> <count> - disassemble from <count> words from <addr> as <unit>\n\t -(Big endian display. Why ? Because easier to read from left to right :))\n')
print('\tx/s <addr> <count> - interpret <addr> as 0 terminated string')
print('\txref <addr> - Try to find all xrefs to <addr>')
print('\tfinds <str> - find all occurences of <str>')
print('\tfind <hexSequence> - find all occurences of the bytes <hexSequence> (eg: find 0a1032897f)')
def int_(s):
try :
if '0x' in s:
return int(s, 16)
else:
return int(s)
except ValueError:
return -1
def prompt():
while 1:
action = input("> ").split(' ')
if action[0] == 'quit' or action[0] == 'q':
break
elif action[0] == 'help' or action[0] == 'h':
help()
elif action[0] == 'x/f':
if len(action) == 2:
addr = int_(action[1])
if addr == -1:
print("can't parse number")
else :
disas(addr)
else:
print('Syntax error')
elif action[0] == 'x/i':
if len(action) >= 2:
addr = int_(action[1])
cnt = 30
if len(action) == 3 and int(action[2]) > 0:
cnt = int_(action[2])
if addr == -1 or cnt == -1:
print("can't parse numbers")
else:
disas(addr, cnt)
else:
print('Syntax error')
elif action[0] == 'x/x':
if len(action) >= 3:
unit = int_(action[1])
addr = int_(action[2])
cnt = 32
if len(action) == 4 and int(action[3]) > 0:
cnt = int_(action[3])
if unit == -1 or addr == -1 or cnt == -1:
print("can't parse numbers")
else:
dump(unit, addr, cnt)
else:
print('Syntax error')
elif action[0] == 'x/s':
if len(action) >= 2:
addr = int_(action[1])
cnt = 1
if len(action) == 3 and int(action[2]) > 0:
cnt = int_(action[2])
if addr == -1 or cnt == -1:
print("can't parse numbers")
else:
strings(addr, cnt)
else:
print('Syntax error')
elif action[0] == 'finds':
if len(action) == 2:
res = finds(action[1])
for l in res:
print(hex(l) + ' = ' + str(l))
else:
print('Syntax error')
elif action[0] == 'find':
if len(action) == 2 and (len(action[1])%2) == 0:
res = find(action[1])
for l in res:
print(hex(l) + ' = ' + str(l))
else:
print('Syntax error')
elif action[0] == 'xref':
if len(action) == 2:
addr = int_(action[1])
if addr == -1:
print("can't parse numbers")
else:
xref(addr)
else:
print('Syntax error')
elif len(action[0]) == 0:
continue
else:
print('Unknown command')
def main():
if len(sys.argv) == 2:
env['file'] = sys.argv[1]
try:
env['data'] = open(env['file'], 'rb').read()
except FileNotFoundError as e:
print(e)
return
prompt()
else:
print(sys.argv[0] + ' <file>')
if __name__ == '__main__':
main()
| StarcoderdataPython |
4835762 | from django import forms
from django.core.exceptions import ValidationError
from posts.models import Post
class PostForm(forms.ModelForm):
class Meta:
# Use model
model = Post
# Show fields
fields = ['title', 'snippet_text', 'body', 'image', 'status', 'publication_date', 'categories']
# Specific field validation
def clean_image(self):
image = self.cleaned_data.get('image')
if image is not None and 'image' not in image.content_type:
raise ValidationError('The file is not an image')
return image
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.