id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3291702 | import numpy as np
import pandas as pd
def simple_aggregate(data,
drop_duplicates=True,
by='AFFINITY',
aggregation='mean',
weights=None,
half_life=None,
):
if drop_duplicates:
data = data.drop_duplicates()
if by in data.columns:
if weights:
data = data.replace({by:weights})
if half_life:
t_ref = pd.to_datetime(data['TIMESTAMP']).max()
data[by] = data.apply(lambda x: x[by] * np.power(0.5, (t_ref - pd.to_datetime(x['TIMESTAMP'])).days / half_life), axis=1)
data = data.groupby(['USERID', 'ITEMID']).agg({by: aggregation}).reset_index()
else:
data[by] = 1
data = data.groupby(['USERID', 'ITEMID']).agg({by: 'count'}).reset_index()
return data | StarcoderdataPython |
1620536 | import Levenshtein, json, logging, sys, traceback, time, copy, jieba
#from es_utils import ElasticObj
from correct import Corrector
from utils import is_name, clean_query
from company import is_company
def resolve_search(search):
res, s = [], 0
if search and search['hits']['hits']:
for hit in search['hits']['hits']:
res.append((hit['_source']['candidate_query'], hit['_source']['candidate_query_freq'])) ; #print(json.dumps(hit, ensure_ascii=False))
s += hit['_source']['candidate_query_freq']
return res, s
def customize_set(file_path):
res = []
try:
res = [e.strip() for e in open(file_path, encoding='utf8').readlines() if e.strip() != '']
except Exception as e:
logging.warn('customize_set_err=%s' % repr(e))
return res
def editdist(word1, word2):
w1, w2 = str(word1).replace(" ", ""), str(word2).replace(" ", "")
d = round(Levenshtein.ratio(w1, w2), 3)
return d
#print(customize_set('./right'));exit()
class queryCorrect:
def __init__(self):
self.VERSION = 'query_correct_1'
self.logs = {}
#self.es_obj = ElasticObj("candidate_query")
#self.right = customize_set("./right")
#self.wrong = customize_set("./wrong")
self.corrector = Corrector()
logging.info('init queryCorrect ok, version=[%s]' % self.VERSION)
def on_correct_begin(self):
logging.debug('on_correct_begin')
self.t_begin = time.time()
def on_correct_end(self):
logging.debug('on_correct_end')
phead = '[on_correct_end] | log_info=%s | cost=%.3fs'
logging.info(phead % (json.dumps(self.logs, ensure_ascii=False), (time.time()-self.t_begin)))
def correct(self, text):
try:
corrected_sent, detail = self.corrector.correct(text)
return corrected_sent, detail
except Exception as e:
logging.warning('correct_err=%s' % repr(e)); print(traceback.format_exc())
return text, []
def run(self, req_dict):
result = {}
self.on_correct_begin()
self.logs['req_dict'] = req_dict
query = req_dict['request']['p']['query']
result["original_query"], result["corrected_query"], result["detail"] = query, query, []
query = clean_query(query)
try:
if is_name(query): #or is_company(query): # 人名或公司名不纠错
result["corrected_query"], result["detail"] = query, []
else:
result["corrected_query"], result["detail"] = self.correct(query) # 开始纠错
#print(json.dumps(result, ensure_ascii=False))
except Exception as e:
logging.warning('run_err=%s' % repr(e)); print(traceback.format_exc())
self.logs['result'] = result
self.logs['senten2term'] = self.corrector.senten2term
self.logs['query_entitys'] = self.corrector.query_entitys
self.logs['maybe_errors'] = self.corrector.maybe_errors
self.on_correct_end()
#print(self.logs); exit()
return result
def correc(self, query):
result = {}; original_query = copy.deepcopy(query)
result["original_query"], result["corrected_query"], result["detail"] = query, query, []
query = clean_query(query)
try:
if is_name(query): # or is_company(query): # 人名或公司名不纠错
result["corrected_query"], result["detail"] = query, []
else:
result["corrected_query"], result["detail"] = self.correct(query) # 开始纠错
# print(json.dumps(result, ensure_ascii=False))
except Exception as e:
logging.warning('run_err=%s' % repr(e)); print(traceback.format_exc())
if result["detail"]: return result["corrected_query"]
else: return original_query
if __name__ == '__main__':
try: que = sys.argv[1]
except: que = "上海jvav开法工成师"
req_dict = {"header": {},"request": {"c": "", "m": "query_correct", "p": {"query": que}}}
qc = queryCorrect()
#print(qc.run(req_dict))
print(qc.correc(que))
#print(qc.ngrams.get("市场", 0))
| StarcoderdataPython |
1706996 | <reponame>cadia-lvl/spjall-post-processing<filename>extract.py<gh_stars>0
# Author: <NAME>
import requests
import json
import argparse
import re
import os
import shutil
class Extraction:
def __init__(self, urls, token):
self.headers = {'Authorization': 'Bearer ' + token['API_TOKEN']}
self.urls = urls
self.transcripts = self.extract_transcripts()
# self.invalid_transcripts = []
# self.valid_transcripts = []
"""
General extraction and filtering
"""
def extract_transcripts(self):
""" Gets the transcripts from the Tiro API """
page_size = {'pageSize': 1000}
response = requests.get(self.urls['transcripts_url'],
params=page_size,
headers=self.headers)
# If user is not authenticated, or an error occurs.
if 'message' in response.json():
print(response.json()['message'])
# print("Fetching public transcripts...")
# response = requests.get(self.urls['transcripts_url'])
# transcripts = response.json()['transcripts']
# print("Public transcripts extracted.")
transcripts = response.json()
else:
transcripts = response.json()['transcripts']
# print("Transcripts extracted.")
# Filter so that only the conversation transcripts are stored
self.transcripts = transcripts
transcripts = self.filter_transcripts("__spjallromur__")
# write_json_to_file(transcripts, "testing/transcripts.json")
return transcripts
def filter_transcripts(self, kw):
""" Filters transcripts by keyword """
filtered = [obj for obj in self.transcripts if (kw in obj['metadata']['keywords'])]
# write_json_to_file(filtered, "filtered.json")
return filtered
def get_progress(self):
""" Returns the % of transcribed files """
transcribed = self.filter_transcripts("TRANSCRIBED")
return len(transcribed) / len(self.transcripts) * 100
def hours_transcribed(self):
""" Gets the total hours transcribed """
transcribed = self.filter_transcripts("TRANSCRIBED")
# Records unique (conversation, speaker) pairs, so that multiple transcriptions of the same side of a conversation aren't counted twice.
unique_cs_pairs = []
total_seconds = 0
for t in transcribed:
convo, speaker = self.get_subject_data(t)
if (convo, speaker) not in unique_cs_pairs:
unique_cs_pairs.append((convo, speaker))
total_seconds += float(t['metadata']['recordingDuration'][:-1])
total_hours = total_seconds/60/60
return total_hours
"""
Transcript oriented processing
"""
def get_transcript_by_id(self, transcript_id):
""" Gets a transcript by id """
response = requests.get(self.urls['transcripts_url'] + '/' + transcript_id, headers=self.headers)
transcript = response.json()
# write_json_to_file(transcript, "testing/transcript.json")
return transcript
def remove_ritari_keyword(self, transcript):
""" Removes the 'ritari:' information from the transcript keywords. """
ritari = [kw for kw in transcript['metadata']['keywords'] if kw.startswith('ritari:')]
# If there is no ritari keyword
if len(ritari) == 0:
return
removed = [keyword for keyword in transcript['metadata']['keywords'] if not keyword.startswith('ritari:')]
transcript['metadata']['keywords'] = removed
def get_subject_data(self, transcript):
""" Gets subject data (convo and speaker) of a transcript """
t_id = self.get_transcript_id(transcript)
t_obj = self.get_transcript_by_id(t_id)
# Split on _ or .
convo, _, speaker, _ = re.split('_|\.', t_obj['metadata']['subject'])
return convo, speaker
def get_transcript_id(self, transcript):
""" Gets the id of a transcript """
_, transcript_id = transcript['name'].split("/")
return transcript_id
def get_audio_file_from_uri(self, transcript, filepath):
""" Downloads the audio file from the uri of a transcript"""
response = requests.get(transcript['uri'])
with open('{}.wav'.format(filepath), 'wb') as f:
f.write(response.content)
def get_demographics(self, convo, speaker):
""" Gets the JSON demographics of a speaker in a conversation """
response = requests.get(urls['samromur_url'] + '/{}/{}_client_{}.json'.format(convo, convo, speaker))
t_demographics = response.json()
# write_json_to_file(t_demographics, 'testing/t_demographics.json')
return t_demographics
def remove_reference_from_demo_data(self, demographics):
""" Removes 'reference' from the demographics metadata. """
if 'reference' in demographics:
del demographics['reference']
def write_to_log(self, str, filename):
""" Writes a message to a log text file """
with open(filename, 'a+') as f:
f.seek(0)
contents = f.read(100)
# If the file is not empty, add a new line.
if len(contents) > 0:
f.write('\n')
f.write(str)
def clear_log(self, filename):
""" Clears the text file for new logging """
open(filename, 'w').close()
def make_conversation_directory(self):
""" Creates a directory for each conversation, containing corresponding audio and json files. """
print("Creating a directory for each conversation. This might take a moment...")
convo_dir_log = 'conversations_dir_log.txt'
validation_log = 'validation_log.txt'
self.clear_log(convo_dir_log)
self.clear_log(validation_log)
keep_flag = False
try:
os.mkdir("conversations")
# If conversations directory already exists, give user a choice to clear the directory, or keep existing files.
# Kept files will not be overwritten.
except FileExistsError:
print("Conversations directory already exists.")
print("\tc - Clear conversations directory and start from scratch.")
print("\tk - Keep existing conversations, and only add new ones.")
print("\tq - Quit and cancel.")
while True:
option = input("Enter c to clear, or k to keep: ")
if option == "c":
print("Clearing conversations directory...")
for dir in os.listdir("conversations"):
shutil.rmtree("conversations/" + dir)
break
elif option == "k":
print("Keeping old files, and only adding new ones...")
keep_flag = True
break
elif option == "q":
return
else:
print("Please enter a valid option.")
count = 0
not_added = 0
added = 0
if keep_flag:
kept = 0
for t in self.transcripts:
transcript_id = self.get_transcript_id(t)
# If the transcript is invalid, a test transcript, or unifinished, it will not be added to a directory.
# if t in self.invalid_transcripts:
if not self.validate_transcript(t, validation_log):
self.write_to_log("Transcript {} is invalid and was not added to directory.".format(transcript_id), convo_dir_log)
not_added += 1
# Remove if guaranteed that no test transcript has __spjallromur__/TRANSCRIBED/PROOFREAD tag, otherwise the subject parsing causes problems.
# elif t['metadata']['subject'] == "Test Spegillinn":
# self.write_to_log("Transcript {} is a test transcript and was not added to directory.".format(transcript_id), convo_dir_log)
# not_added += 1
elif t in self.filter_transcripts("TODO") or t in self.filter_transcripts("INPROGRESS"):
self.write_to_log("Transcript {} is unfinished and was not added to directory.".format(transcript_id), convo_dir_log)
not_added += 1
# If the transcript has been marked as transcribed or proofread, add it to a corresponding directory.
elif t in self.filter_transcripts("TRANSCRIBED") or t in self.filter_transcripts("PROOFREAD"):
# Get the transcript by id to access the uri for the audio file
transcript = self.get_transcript_by_id(transcript_id)
convo, speaker = self.get_subject_data(transcript)
t_demographics = self.get_demographics(convo, speaker)
# Remove ritari keyword from transcript metadata.
self.remove_ritari_keyword(transcript)
# Remove reference from demographics metadata.
self.remove_reference_from_demo_data(t_demographics)
# Where the file should be written
filepath = "conversations/{}/speaker_{}_convo_{}".format(convo, speaker, convo)
try:
os.mkdir("conversations/{}".format(convo))
self.get_audio_file_from_uri(transcript, filepath)
write_json_to_file(t_demographics, filepath + "_demographics.json")
write_json_to_file(transcript, filepath + "_transcript.json")
added += 1
# If a directory for a conversation exists, just add the corresponding files.
except FileExistsError:
# If user does not want to overwrite existing files, do nothing to those files.
if keep_flag and (os.path.exists(filepath + "_demographics.json") or os.path.exists(filepath + "_transcript.json") or os.path.exists(filepath + ".wav")):
kept += 1
else:
self.get_audio_file_from_uri(transcript, filepath)
write_json_to_file(t_demographics, filepath + "_demographics.json")
write_json_to_file(transcript, filepath + "_transcript.json")
added += 1
# If the conversation name contains a file path.
except FileNotFoundError:
self.write_to_log("Could not create directory for {}. Transcript name contains a filepath.".format(convo), convo_dir_log)
not_added += 1
else:
self.write_to_log("Transcript {} has unapproved tags and was not added to directory.".format(transcript_id), convo_dir_log)
not_added += 1
count += 1
print("{}/{} transcripts processed.".format(count, len(self.transcripts)))
if keep_flag:
print("{} existing files kept and not overwritten.".format(kept))
if not_added > 0:
print("{} transcripts were not added to a directory. Refer to {} and {} for further information.".format(not_added, convo_dir_log, validation_log))
print("Completed. {} transcripts were added to their corresponding directory.".format(added))
"""
Transcript validation
"""
# Was intended to validate before making directories, but validation happens inside the for loop in make_conversation_directory to save time.
# Not being used as it is, commented out in case it is needed later.
# def validate_transcripts(self):
# """ Validates the extracted transcripts and sets invalid_transcripts and valid_transcripts """
# print("Validating transcripts...")
# validation_log = "validation_log.txt"
# self.clear_log(validation_log)
# count = 0
# invalid = []
# for t in self.transcripts:
# t_id = self.get_transcript_id(t)
# if not self.validate_transcript(t, validation_log):
# invalid.append(t)
# count += 1
# print("{}/{} validated.".format(count, len(self.transcripts)))
# print("{} transcripts are invalid.".format(len(invalid)))
# self.invalid_transcripts = invalid
# valid = [obj for obj in self.transcripts if (obj not in invalid)]
# self.valid_transcripts = valid
# # write_json_to_file(invalid, "testing/invalid.json")
# # write_json_to_file(valid, "testing/valid.json")
# print("Validation complete.")
# print("{} valid transcripts, and {} invalid transcripts. Refer to validation_log.txt for further information.".format(len(valid), len(invalid)))
def validate_transcript(self, transcript, log):
""" Validates a single transcript """
t_id = self.get_transcript_id(transcript)
# t_obj = self.get_transcript_by_id(t_id)
if transcript in self.filter_transcripts("INVALID"):
self.write_to_log("Transcript {} was tagged INVALID.".format(t_id), "validation_log.txt")
return False
# Transcript duration validation
t_duration = self.validate_transcript_duration(transcript, log)
if not t_duration:
return False
# Demographics duration validation
convo, speaker = self.get_subject_data(transcript)
t_demo = self.get_demographics(convo, speaker)
t_demographics_duration = self.validate_transcript_demographics_duration(transcript, t_demo, log)
if not t_demographics_duration:
return False
# TODO: Other validation checks.
return True
def validate_transcript_duration(self, transcript, log):
""" Validates the length of the transcript. Returns False if invalid, True otherwise. """
t_id = self.get_transcript_id(transcript)
t_obj = self.get_transcript_by_id(t_id)
if t_obj['metadata']['recordingDuration'] is None:
self.write_to_log("Transcript {} has recordingDuration set as null.".format(t_id), log)
return False
try:
# This can only be validated for finished transcriptions
if transcript in self.filter_transcripts("TRANSCRIBED") or transcript in self.filter_transcripts("PROOFREAD"):
audio_duration = float(t_obj['metadata']['recordingDuration'][:-1])
# if the last segment time stamps exceed that of the audio duration
last_segment = t_obj['segments'][-1]
if float(last_segment['endTime'][:-1]) > audio_duration:
self.write_to_log("Transcript {} exceeds the audio duration. Last segment ends at: {}, audio duration: {}.".format(t_id, last_segment['endTime'], audio_duration))
return False
# if the duration of the transcript exceeds that of the audio file
first_segment = t_obj['segments'][0]
transcript_duration = float(last_segment['endTime'][:-1]) - float(first_segment['startTime'][:-1])
if transcript_duration > audio_duration:
self.write_to_log("Transcript {} exceeds the audio duration. Transcript duration: {}, audio duration: {}.".format(t_id, transcript_duration, audio_duration))
return False
return True
except TypeError as e:
self.write_to_log("Transcript {} has segment timestamps with wrong type. Could not calculate transcript duration.".format(t_id, e), log)
return False
def validate_transcript_demographics_duration(self, transcript, t_demographics, log):
""" Validates that the length of the transcript does not exceed the spjall demographics duration """
# The Tiro transcript must always be shorter or equal to the demographics duration.
t_id = self.get_transcript_id(transcript)
t_obj = self.get_transcript_by_id(t_id)
if t_obj['metadata']['recordingDuration'] is None:
self.write_to_log("Transcript {} has recordingDuration set as null.".format(t_id), log)
return False
if float(t_obj['metadata']['recordingDuration'][:-1]) > float(t_demographics['duration_seconds']):
self.write_to_log("Transcript {} duration exceeds the demographics duration.".format(t_id), log)
return False
return True
def load_json(json_file):
""" Loads data from a JSON file """
json_obj = open(json_file)
data = json.load(json_obj)
json_obj.close()
return data
def write_json_to_file(json_object, filename):
""" Writes a JSON object to a file, mostly for testing as it is """
with open(filename, 'w') as outfile:
json.dump(json_object, outfile, indent=4)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('urls_file')
parser.add_argument('token_file')
args = parser.parse_args()
urls = load_json(args.urls_file)
token = load_json(args.token_file)
extract = Extraction(urls, token)
print("Recordings transcribed: {:.2f}%".format(extract.get_progress()))
print("Total hours transcribed: {:.2f}".format(extract.hours_transcribed()))
# extract.validate_transcripts()
extract.make_conversation_directory()
| StarcoderdataPython |
1646228 | from decouple import config
import sys, os
from .analyze.analyzeSignal import calcPowers
from .analyze.pereiraChangeOfMean import pereiraLikelihood, getChangePoints, cleanLikelihoods
from .websocket import wsManager
from . import data as dataHp
import json
import numpy as np
from django.http import JsonResponse
def indicesInDoubleArray(array2, value, thres):
index1 = -1
index2 = -1
minDist = float("inf")
for i, array in enumerate(array2):
for j, val in enumerate(array):
dist = abs(val - value)
if dist < thres and dist < minDist:
minDist = dist
index1 = i
index2 = j
return index1, index2
def findEvents(power, thres, pre, post, voting, minDist, m):
likelihoods = pereiraLikelihood(power, threshold=thres, preEventLength=pre, postEventLength=post, linearFactor=m, verbose=True)
# likelihoods = cleanLikelihoods(likelihoods, 5*threshold)
# Get change indices
changeIndices = getChangePoints(power, likelihoods, windowSize=voting, minDist=minDist)
return changeIndices
def findUniqueStates(power, changeIndices, thres, minDist):
LINE_NOISE = 1.0
# Get State Seuence from all state changes
# Handle start state
stateSequence = [{'index': 0, 'endIndex': changeIndices[0] if len(changeIndices) > 0 else len(power)}]
# Changes in between
for i, change in enumerate(changeIndices[:-1]):
stateSequence.append({'index': change, 'endIndex': changeIndices[i+1]})
# handle end state
if len(changeIndices) > 0: stateSequence.append({'index': changeIndices[-1], 'endIndex': len(power)-1})
# Get Steady states point after each state change
for i in range(len(stateSequence)):
slice = power[ stateSequence[i]['index'] : stateSequence[i]['endIndex'] ]
stateSequence[i]['ssIndex'] = int(stateSequence[i]['index']+minDist )
stateSequence[i]['ssEndIndex'] = int(max(stateSequence[i]['endIndex']-minDist/2, stateSequence[i]['ssIndex']+1))
# Construct mean value of state
for i in range(len(stateSequence)):
if stateSequence[i]['ssIndex'] is None or stateSequence[i]['ssEndIndex'] is None or stateSequence[i]['ssEndIndex'] - stateSequence[i]['ssIndex'] < 1:
stateSequence[i]['mean'] = None
else:
stateSequence[i]['mean'] = np.mean(power[stateSequence[i]['ssIndex']:stateSequence[i]['ssEndIndex']])
if stateSequence[i]['mean'] <= LINE_NOISE: stateSequence[i]['mean'] = 0
means = sorted([stateSequence[i]['mean'] for i in range(len(stateSequence))])
print(means)
cluster = 0
clusters = [0]
# lastMean = means[0]
# for i in range(1, len(means)):
# if abs(lastMean-means[i]) > thres:
# lastMean = means[i]
# cluster += 1
# # lastMean = np.mean(np.array([means[i], lastMean]))
# clusters.append(cluster)
for i in range(1, len(means)):
if abs(means[i-1]-means[i]) > thres:
cluster += 1
clusters.append(cluster)
for i in range(len(stateSequence)):
stateSequence[i]["stateID"] = clusters[means.index(stateSequence[i]['mean'])]
# prevent Self loops
# if len(stateSequence) > 1:
# newStateSequence = []
# source = stateSequence[0]
# for i in range(len(stateSequence)-1):
# dest = stateSequence[i+1]
# if source["stateID"] == dest["stateID"]:
# source['endIndex'] = dest["endIndex"]
# source['ssEndIndex'] = dest["ssEndIndex"]
# #recalculate mean based on the length of the arrays
# source['mean'] = (source['mean'] * (source['endIndex'] - source['index']) + dest["mean"] * (dest['endIndex'] - dest['index']))/(dest['endIndex'] - source['index'])
# else:
# newStateSequence.append(source)
# if dest == stateSequence[-1]:
# newStateSequence.append(dest)
# source = dest
# stateSequence = newStateSequence
return stateSequence
def autoLabel(request):
if request.method != "POST": Http404
response = {}
data = json.loads(request.body)
parameter = data["parameter"]
sessionID = request.session.session_key
sessionData = request.session.get('dataInfo', {})
if sessionData["type"] == "fired":
wsManager.sendStatus(sessionID, "Loading 50Hz power data...", percent=10)
dataDict = dataHp.getSessionData(sessionID, sessionData)
usablePower = ["s", "s_l1", "p", "p_l1"]
usableKeys = list(set(usablePower) & set(dataDict["measures"]))
if len(usableKeys) < 1:
if "v" in dataDict["measures"] and "i" in dataDict["measures"]:
pass
p,q,s = calcPowers(dataDict["data"]["v"], dataDict["data"]["i"], dataDict["samplingrate"])
power = s
response["msg"] = "Calculated apparent power using Current and Voltage"
else:
response["msg"] = "Could not find power, or voltage and current in data. Name it as \"p\",\"s\" or \"v\",\"i\".\n"
response["msg"] += "If you have electricity data of multiple supply legs, name it as \"<measure>_l1\", \"<measure>_l2\", ... accordingly."
return JsonResponse(response)
else:
power = list(dataDict["data"][sorted(usableKeys)[-1]])
sr = dataDict["samplingrate"]
# We only do this at a max samplingrate of 50 Hz
if sr > 50:
wsManager.sendStatus(sessionID, text="Resampling to 50Hz...", percent=15)
power, timestamps = dataHp.resampleDict(dataDict, sorted(usableKeys)[-1], 50, forceEvenRate=True)
#power = dataHp.resample(power, sr, 50)
# print(power)
sr = 50
newSr = None
if "sr" in parameter: newSr = float(parameter["sr"])
if newSr != None and newSr != -1 or "ts" in dataDict:
if "ts" in dataDict and newSr is None: newSr = max(1/3.0, dataDict["samplingrate"])
wsManager.sendStatus(sessionID, text="Resampling to "+ str(round(newSr, 2)) + "Hz...", percent=17)
power, timestamps = dataHp.resampleDict(dataDict, sorted(usableKeys)[-1], newSr, forceEvenRate=True)
#power = dataHp.resample(power, sr, newSr)
sr = newSr
thres = 5.0
if "thres" in parameter: thres = float(parameter["thres"])
thres = max(thres, 0.1)
pre = 1.0*sr
if "pre" in parameter: pre = int(float(parameter["pre"])*sr)
pre = max(pre, 2)
post = 1.0*sr
if "post" in parameter: post = int(float(parameter["post"])*sr)
post = max(post, 2)
voting = 2.0*sr
if "voting" in parameter: voting = int(float(parameter["voting"])*sr)
voting = max(voting, 1)
minDist = 1.0*sr
if "minDist" in parameter: minDist = int(float(parameter["minDist"])*sr)
minDist = max(minDist, 1)
m = 0.005
if "linearCoeff" in parameter: m = float(parameter["linearCoeff"])
print("sr: {}Hz, thres: {}W, pre: {}samples, post: {}:samples, voting: {}samples, minDist: {} samples, m:{}".format(sr, thres, pre, post, voting, minDist, m), flush=True)
wsManager.sendStatus(sessionID, "Finding Events...", percent=20)
changeIndices = findEvents(power, thres, pre, post, voting, minDist, m)
wsManager.sendStatus(sessionID, "Clustering Events...", percent=70)
stateSequence = findUniqueStates(power, changeIndices, thres, minDist)
if len(changeIndices) == 0:
response["msg"] = "No Changes found in signal..."
if len(changeIndices) >= 200:
response["msg"] = "Too many events found, you may want to change settings"
changeIndices = []
wsManager.sendStatus(sessionID, "Generating Labels...")
# Convert change indices to timestamps
ts = 0
if "timestamp" in dataDict: ts = dataDict["timestamp"]
# labels = [{"startTs": ts+(float(i/sr)), "label":""} for i in changeIndices]
labels = [{"startTs": ts+(float(i["index"]/sr)), "label":"S" + str(i["stateID"])} for i in stateSequence]
response["labels"] = labels
return JsonResponse(response)
| StarcoderdataPython |
1780215 | # MIT License
#
# Copyright (c) 2022 TrigonDev
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from __future__ import annotations
import apgorm
from apgorm import Index, IndexType
from .models import (
aschannel,
guild,
member,
message,
override,
permrole,
posrole,
sb_message,
starboard,
user,
vote,
xprole,
)
class Database(apgorm.Database):
def __init__(self):
super().__init__("starboard/database/migrations")
self.asc: set[int] = set()
async def connect(
self, *, migrate: bool = False, **connect_kwargs
) -> None:
await super().connect(**connect_kwargs)
if self.must_create_migrations():
raise Exception("There are uncreated migrations.")
if migrate and await self.must_apply_migrations():
print("Applying migrations...")
await self.apply_migrations()
print("Loading autostar channels...")
self.asc = {
a.channel_id
for a in await aschannel.AutoStarChannel.fetch_query().fetchmany()
}
print("Autostar channels loaded.")
guilds = guild.Guild
users = user.User
patrons = user.Patron
members = member.Member
starboards = starboard.Starboard
overrides = override.Override
permroles = permrole.PermRole
permrole_starboards = permrole.PermRoleStarboard
aschannels = aschannel.AutoStarChannel
xproles = xprole.XPRole
posroles = posrole.PosRole
posrole_members = posrole.PosRoleMember
messages = message.Message
sb_messages = sb_message.SBMessage
votes = vote.Vote
indexes = [
# patrons
Index(patrons, patrons.discord_id, IndexType.BTREE),
# autostar channels
Index(
aschannels, (aschannels.guild_id, aschannels.name), IndexType.BTREE
),
Index(aschannels, aschannels.channel_id, IndexType.BTREE),
# guild
Index(guilds, guilds.premium_end, IndexType.BTREE),
# member
Index(members, members.guild_id, IndexType.BTREE),
Index(members, members.autoredeem_enabled, IndexType.BTREE),
Index(members, members.xp, IndexType.BTREE),
# overrides
Index(
overrides,
(overrides.guild_id, overrides.name),
IndexType.BTREE,
unique=True,
),
Index(overrides, overrides.starboard_id, IndexType.BTREE),
Index(overrides, overrides.channel_ids, IndexType.GIN),
# sbmessages
Index(sb_messages, sb_messages.sb_message_id, unique=True),
Index(sb_messages, sb_messages.last_known_point_count),
Index(sb_messages, sb_messages.starboard_id, IndexType.BTREE),
# permroles
Index(permroles, permroles.guild_id, IndexType.BTREE),
# posroles
Index(
posroles, (posroles.guild_id, posroles.max_members), unique=True
),
# starboards
Index(
starboards, (starboards.guild_id, starboards.name), IndexType.BTREE
),
Index(starboards, starboards.channel_id, IndexType.BTREE),
# xproles
Index(xproles, xproles.guild_id, IndexType.BTREE),
# votes
Index(votes, votes.starboard_id, IndexType.BTREE),
Index(votes, votes.user_id, IndexType.BTREE),
Index(votes, votes.message_id, IndexType.BTREE),
Index(votes, votes.target_author_id, IndexType.BTREE),
Index(votes, votes.is_downvote, IndexType.BTREE),
]
| StarcoderdataPython |
132771 | <filename>python/dynamic_graph/sot/torque_control/tests/test_magdwick.py
# -*- coding: utf-8 -*-
"""
Created on Tue Oct 3 14:01:08 2017
@author: adelpret
"""
from dynamic_graph.sot.torque_control.madgwickahrs import MadgwickAHRS
dt = 0.001
imu_filter = MadgwickAHRS('imu_filter')
imu_filter.init(dt)
imu_filter.setBeta(0.0)
for i in range(10):
imu_filter.accelerometer.value = (0.0, 0.0, 9.8)
imu_filter.gyroscope.value = (0.001, -1e-3, 1e-4)
imu_filter.imu_quat.recompute(i)
print(imu_filter.imu_quat.value)
| StarcoderdataPython |
1691369 | <gh_stars>0
from GameElementBase import GameElementBase
from random import randrange
class MapArea(GameElementBase):
keymap={}
def __init__(self,position,row_data):
self.position = position
self.visitcount=0
self.inv=[]
self.nodes=[]
self.roomid = row_data["RoomID"]
self.roomname = row_data["Name"]
self.description = row_data["Description"]
if str(row_data["Items"]) != "nan":
items = str(row_data["Items"]).split(" ")
for item in items:
itemsize=item.split("%")
if randrange(100)<int(itemsize[1]):
self.inv.append(itemsize[0])
if str(row_data["Nodes"]) != "nan":
items = str(row_data["Nodes"]).split(" ")
for item in items:
itemsize=item.split("%")
if randrange(100)<int(itemsize[1]):
self.nodes.append(itemsize[0])
if itemsize[2]=="false":
self.keymap[itemsize[0].upper() +"_" +"ACTIVE"] = False
else:
self.keymap[itemsize[0].upper() +"_" +"ACTIVE"] = True
def getKeyMap(self):
return self.keymap
def getRoomID(self):
return self.roomid
def getRoomName(self):
return self.roomname
def getDescription(self):
return self.description
def getPositionIndex(self):
return str(self.position[0])+"_"+str(self.position[1])
def getVisitCount(self):
return self.visitcount
def addVisit(self):
self.visitcount+=1
def hasObject(self,objectname):
for item in self.inv:
if item == objectname:
return True
return False
def takeObject(self,objectname):
self.inv.remove(objectname)
def putObject(self,objectname):
self.inv.append(objectname)
def getDescription(self):
return self.description
def getItemDescription(self):
from StaticController import StaticController
if len(self.inv)==0:
return 0
elif len(self.inv)==1:
itemstr = self.inv[0]
else:
itemstr = ""
lenitems=len(self.inv)
itemstr += self.inv[0]
for i in range(1,lenitems-1):
itemstr+= ", " +self.inv[i]
itemstr+= " and " +self.inv[lenitems-1]
return StaticController.displayCD("room-item-description",{"itemstr" : itemstr})
def getNodeDescription(self):
from StaticController import StaticController
if len(self.nodes)==0:
return 0
answerstring=""
print(self.nodes)
for node in self.nodes:
if (node.upper() +"_" +"ACTIVE") in StaticController.variableMap and StaticController.variableMap[node.upper() +"_" +"ACTIVE"] == True:
answerstring+=StaticController.displayCD("room-active-node",{"nodename" : node})
else:
answerstring+=StaticController.displayCD("room-inactive-node",{"nodename" : node})
return answerstring | StarcoderdataPython |
194093 | #!/usr/bin/env python
# Copyright (C) 2014 Open Data ("Open Data" refers to
# one or more of the following companies: Open Data Partners LLC,
# Open Data Research LLC, or Open Data Capital LLC.)
#
# This file is part of Hadrian.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
from titus.genpy import PFAEngine
from titus.errors import *
class TestLib1Map(unittest.TestCase):
def testGetLength(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: int
action:
- {map.len: [input]}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), 5)
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: int
action:
- {map.len: [input]}
''')
self.assertEqual(engine.action({}), 0)
def testGetKeys(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: array, items: string}
action:
- {map.keys: [input]}
''')
self.assertEqual(set(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5})), set(["a", "b", "c", "d", "e"]))
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: array, items: string}
action:
- {map.keys: [input]}
''')
self.assertEqual(engine.action({}), [])
def testGetValues(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: array, items: int}
action:
- {map.values: [input]}
''')
self.assertEqual(set(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5})), set([1, 2, 3, 4, 5]))
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: array, items: int}
action:
- {map.values: [input]}
''')
self.assertEqual(engine.action({}), [])
def testCheckContainsKey(self):
engine, = PFAEngine.fromYaml('''
input: string
output: boolean
action:
map.containsKey:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- input
''')
self.assertTrue(engine.action("a"))
self.assertFalse(engine.action("z"))
engine, = PFAEngine.fromYaml('''
input: string
output: boolean
action:
map.containsKey:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- params: [{x: string}]
ret: boolean
do: {"==": [x, input]}
''')
self.assertTrue(engine.action("a"))
self.assertFalse(engine.action("z"))
def testCheckContainsKey(self):
engine, = PFAEngine.fromYaml('''
input: int
output: boolean
action:
map.containsValue:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- input
''')
self.assertTrue(engine.action(1))
self.assertFalse(engine.action(9))
engine, = PFAEngine.fromYaml('''
input: int
output: boolean
action:
map.containsValue:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- params: [{x: int}]
ret: boolean
do: {"==": [x, input]}
''')
self.assertTrue(engine.action(1))
self.assertFalse(engine.action(9))
def testAddKeyValuePairs(self):
engine, = PFAEngine.fromYaml('''
input: string
output: {type: map, values: int}
action:
map.add:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- input
- 999
''')
self.assertEqual(engine.action("a"), {"a": 999, "b": 2, "c": 3, "d": 4, "e": 5})
self.assertEqual(engine.action("z"), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "z": 999})
engine, = PFAEngine.fromYaml('''
input: int
output: {type: map, values: int}
action:
map.add:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- input
''')
self.assertEqual(engine.action(1), {"BA==": 2, "Ag==": 1, "Bg==": 3, "Cg==": 5, "CA==": 4})
self.assertEqual(engine.action(999), {"BA==": 2, "Ag==": 1, "Bg==": 3, "Cg==": 5, "CA==": 4, "zg8=": 999})
def testRemoveKeys(self):
engine, = PFAEngine.fromYaml('''
input: string
output: {type: map, values: int}
action:
map.remove:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- input
''')
self.assertEqual(engine.action("a"), {"b": 2, "c": 3, "d": 4, "e": 5})
self.assertEqual(engine.action("z"), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5})
def testKeepOnlyCertainKeys(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: string}
output: {type: map, values: int}
action:
map.only:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- input
''')
self.assertEqual(engine.action(["b", "c", "e"]), {"b": 2, "c": 3, "e": 5})
self.assertEqual(engine.action(["b", "c", "e", "z"]), {"b": 2, "c": 3, "e": 5})
self.assertEqual(engine.action([]), {})
engine, = PFAEngine.fromYaml('''
input: {type: array, items: string}
output: {type: map, values: int}
action:
map.only:
- {value: {}, type: {type: map, values: int}}
- input
''')
self.assertEqual(engine.action(["b", "c", "e"]), {})
self.assertEqual(engine.action([]), {})
def testEliminateOnlyCertainKeys(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: string}
output: {type: map, values: int}
action:
map.except:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- input
''')
self.assertEqual(engine.action(["b", "c", "e"]), {"a": 1, "d": 4})
self.assertEqual(engine.action(["b", "c", "e", "z"]), {"a": 1, "d": 4})
self.assertEqual(engine.action([]), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5})
engine, = PFAEngine.fromYaml('''
input: {type: array, items: string}
output: {type: map, values: int}
action:
map.except:
- {value: {}, type: {type: map, values: int}}
- input
''')
self.assertEqual(engine.action(["b", "c", "e"]), {})
self.assertEqual(engine.action([]), {})
def testUpdateWithAnOverlay(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.update:
- {value: {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}, type: {type: map, values: int}}
- input
''')
self.assertEqual(engine.action({"b": 102, "c": 103, "z": 999}), {"a": 1, "b": 102, "c": 103, "d": 4, "e": 5, "z": 999})
def testSplit(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: array, items: {type: map, values: int}}
action:
map.split: input
''')
self.assertEqual(sorted(engine.action({"a": 1, "b": 2, "c": 3}), key=lambda x: list(x.keys())[0]),
sorted([{"a": 1}, {"b": 2}, {"c": 3}], key=lambda x: list(x.keys())[0]))
def testJoin(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: {type: map, values: int}}
output: {type: map, values: int}
action:
map.join: input
''')
self.assertEqual(sorted(engine.action([{"a": 1}, {"b": 2}, {"c": 3}])), sorted({"a": 1, "b": 2, "c": 3}))
def testNumericalArgmaxArgmin(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: string
action:
- {map.argmax: [{value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}]}
''')[0].action(None), "2")
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: string
action:
- {map.argmin: [{value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}]}
''')[0].action(None), "1")
def testObjectArgmaxArgmin(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: string
action:
- {map.argmax: [{value: {"0": "one", "1": "two", "2": "three", "3": "four", "4": "five", "5": "six", "6": "seven"}, type: {type: map, values: string}}]}
''')[0].action(None), "1")
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: string
action:
- {map.argmin: [{value: {"0": "one", "1": "two", "2": "three", "3": "four", "4": "five", "5": "six", "6": "seven"}, type: {type: map, values: string}}]}
''')[0].action(None), "4")
def testUserDefinedArgmaxArgmin(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: string
action:
- map.argmaxLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- {fcn: u.mylt}
fcns:
mylt:
params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), "1")
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: string
action:
- map.argmaxLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), "1")
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: string
action:
- map.argminLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- {fcn: u.mylt}
fcns:
mylt:
params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), "4")
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: string
action:
- map.argminLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), "4")
def testFindTop3NumericalArgmaxArgmin(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- {map.argmaxN: [{value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}, 3]}
''')[0].action(None), ["2", "6", "4"])
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- {map.argminN: [{value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}, 3]}
''')[0].action(None), ["1", "5", "3"])
def testFindTop3ObjectArgmaxArgmin(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- {map.argmaxN: [{value: {"0": "one", "1": "two", "2": "three", "3": "four", "4": "five", "5": "six", "6": "seven"}, type: {type: map, values: string}}, 3]}
''')[0].action(None), ["1", "2", "5"])
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- {map.argminN: [{value: {"0": "one", "1": "two", "2": "three", "3": "four", "4": "five", "5": "six", "6": "seven"}, type: {type: map, values: string}}, 3]}
''')[0].action(None), ["4", "3", "0"])
def testFindTop3UserDefinedArgmaxArgmin(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- map.argmaxNLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- 3
- {fcn: u.mylt}
fcns:
mylt:
params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), ["1", "5", "3"])
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- map.argmaxNLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- 3
- params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), ["1", "5", "3"])
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- map.argminNLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- 3
- {fcn: u.mylt}
fcns:
mylt:
params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), ["4", "0", "6"])
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: string}
action:
- map.argminNLT:
- {value: {"0": 5.5, "1": 2.2, "2": 7.7, "3": 4.4, "4": 6.6, "5": 2.2, "6": 7.6}, type: {type: map, values: double}}
- 3
- params: [{a: double}, {b: double}]
ret: boolean
do: {"<": [{m.abs: {"-": [a, 6.2]}}, {m.abs: {"-": [b, 6.2]}}]}
''')[0].action(None), ["4", "0", "6"])
def testToSet(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: int}
output: {type: map, values: int}
action:
- {map.toset: [input]}
''')
self.assertEqual(engine.action([1, 2, 3, 4, 5]), {"BA==": 2, "Ag==": 1, "Bg==": 3, "Cg==": 5, "CA==": 4})
def testFromSet(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: array, items: int}
action:
- {map.fromset: [input]}
''')
self.assertEqual(set(engine.action({"BA==": 2, "Ag==": 1, "Bg==": 3, "Cg==": 5, "CA==": 4})), set([1, 2, 3, 4, 5]))
engine, = PFAEngine.fromYaml('''
input: {type: map, values: string}
output: {type: array, items: string}
action:
- {map.fromset: [input]}
''')
self.assertEqual(set(engine.action({"BA==": "two", "Ag==": "one", "Bg==": "three", "Cg==": "five", "CA==": "four"})), set(["one", "two", "three", "four", "five"]))
def testIn(self):
engine, = PFAEngine.fromYaml('''
input: int
output: boolean
action:
map.in:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- input
''')
self.assertTrue(engine.action(2))
self.assertFalse(engine.action(0))
def testUnion(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: int}
action:
map.fromset:
map.union:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- {map.toset: {value: [4, 5, 6, 7, 8], type: {type: array, items: int}}}
''')
self.assertEqual(set(engine.action(None)), set([1, 2, 3, 4, 5, 6, 7, 8]))
def testIntersection(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: int}
action:
map.fromset:
map.intersection:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- {map.toset: {value: [4, 5, 6, 7, 8], type: {type: array, items: int}}}
''')
self.assertEqual(set(engine.action(None)), set([4, 5]))
def testDiff(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: int}
action:
map.fromset:
map.diff:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- {map.toset: {value: [4, 5, 6, 7, 8], type: {type: array, items: int}}}
''')
self.assertEqual(set(engine.action(None)), set([1, 2, 3]))
def testSymDiff(self):
engine, = PFAEngine.fromYaml('''
input: "null"
output: {type: array, items: int}
action:
map.fromset:
map.symdiff:
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
- {map.toset: {value: [4, 5, 6, 7, 8], type: {type: array, items: int}}}
''')
self.assertEqual(set(engine.action(None)), set([1, 2, 3, 6, 7, 8]))
def testSubset(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: int}
output: boolean
action:
map.subset:
- {map.toset: input}
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
''')
self.assertTrue(engine.action([1, 2, 3]))
self.assertFalse(engine.action([1, 2, 3, 999]))
self.assertFalse(engine.action([888, 999]))
def testDisjoint(self):
engine, = PFAEngine.fromYaml('''
input: {type: array, items: int}
output: boolean
action:
map.disjoint:
- {map.toset: input}
- {map.toset: {value: [1, 2, 3, 4, 5], type: {type: array, items: int}}}
''')
self.assertFalse(engine.action([1, 2, 3]))
self.assertFalse(engine.action([1, 2, 3, 999]))
self.assertTrue(engine.action([888, 999]))
def testMap(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: string}
output: {type: map, values: int}
action:
map.map:
- input
- params: [{x: string}]
ret: int
do: {parse.int: [x, 10]}
''')
self.assertEqual(engine.action({"a": "1", "b": "2", "c": "3", "d": "4", "e": "5"}), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5})
def testMapWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: string}
output: {type: map, values: int}
action:
map.mapWithKey:
- input
- params: [{key: string}, {value: string}]
ret: int
do:
if: {">": [key, {string: "c"}]}
then: {+: [{parse.int: [value, 10]}, 1000]}
else: {parse.int: [value, 10]}
''')
self.assertEqual(engine.action({"a": "1", "b": "2", "c": "3", "d": "4", "e": "5"}), {"a": 1, "b": 2, "c": 3, "d": 1004, "e": 1005})
def testFilter(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.filter:
- input
- params: [{x: int}]
ret: boolean
do: {"<": [x, 3]}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"a": 1, "b": 2})
def testFilterWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.filterWithKey:
- input
- params: [{key: string}, {value: int}]
ret: boolean
do: {"&&": [{"<": [value, 3]}, {"==": [key, {string: "a"}]}]}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"a": 1})
def testFilterMap(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.filterMap:
- input
- params: [{value: int}]
ret: [int, "null"]
do:
if: {"==": [{"%": [value, 2]}, 0]}
then: {"+": [value, 1000]}
else: null
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"b": 1002, "d": 1004})
def testFilterMapWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.filterMapWithKey:
- input
- params: [{key: string}, {value: int}]
ret: [int, "null"]
do:
if: {"&&": [{"==": [{"%": [value, 2]}, 0]}, {"==": [key, {string: "b"}]}]}
then: {"+": [value, 1000]}
else: null
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"b": 1002})
def testFlatMap(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.flatMap:
- input
- params: [{value: int}]
ret: {type: map, values: int}
do:
if: {">": [value, 2]}
then:
- let: {out: {value: {}, type: {type: map, values: int}}}
- set:
out:
map.add:
- out
- {s.int: value}
- value
- set:
out:
map.add:
- out
- {s.concat: [{s.int: value}, {s.int: value}]}
- value
- out
else:
{value: {}, type: {type: map, values: int}}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"3": 3, "4": 4, "5": 5, "33": 3, "44": 4, "55": 5})
def testFlatMapWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: {type: map, values: int}
action:
map.flatMapWithKey:
- input
- params: [{key: string}, {value: int}]
ret: {type: map, values: int}
do:
map.add:
- map.add:
- {value: {}, type: {type: map, values: int}}
- key
- value
- {s.concat: [key, key]}
- {+: [100, value]}
''')
self.assertEqual(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}), {"a": 1, "b": 2, "c": 3, "d": 4, "e": 5, "aa": 101, "bb": 102, "cc": 103, "dd": 104, "ee": 105})
def testZipMap(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmap:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- params: [{a: string}, {b: int}]
ret: string
do: {s.concat: [a, {s.int: b}]}
''')[0].action(None), {"0": "x101", "1": "y102", "2": "z103"})
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmap:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- {value: {"0": "a", "1": "b", "2": "c"}, type: {type: map, values: string}}
- params: [{a: string}, {b: int}, {c: string}]
ret: string
do: {s.concat: [{s.concat: [a, {s.int: b}]}, c]}
''')[0].action(None), {"0": "x101a", "1": "y102b", "2": "z103c"})
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmap:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- {value: {"0": "a", "1": "b", "2": "c"}, type: {type: map, values: string}}
- {value: {"0": true, "1": false, "2": true}, type: {type: map, values: boolean}}
- params: [{a: string}, {b: int}, {c: string}, {d: boolean}]
ret: string
do: {s.concat: [{s.concat: [{s.concat: [a, {s.int: b}]}, c]}, {if: d, then: {string: "-up"}, else: {string: "-down"}}]}
''')[0].action(None), {"0": "x101a-up", "1": "y102b-down", "2": "z103c-up"})
def testZipMapWithKey(self):
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmapWithKey:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- params: [{k: string}, {a: string}, {b: int}]
ret: string
do: {s.concat: [{s.concat: [k, a]}, {s.int: b}]}
''')[0].action(None), {"0": "0x101", "1": "1y102", "2": "2z103"})
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmapWithKey:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- {value: {"0": "a", "1": "b", "2": "c"}, type: {type: map, values: string}}
- params: [{k: string}, {a: string}, {b: int}, {c: string}]
ret: string
do: {s.concat: [{s.concat: [{s.concat: [k, a]}, {s.int: b}]}, c]}
''')[0].action(None), {"0": "0x101a", "1": "1y102b", "2": "2z103c"})
self.assertEqual(PFAEngine.fromYaml('''
input: "null"
output: {type: map, values: string}
action:
map.zipmapWithKey:
- {value: {"0": "x", "1": "y", "2": "z"}, type: {type: map, values: string}}
- {value: {"0": 101, "1": 102, "2": 103}, type: {type: map, values: int}}
- {value: {"0": "a", "1": "b", "2": "c"}, type: {type: map, values: string}}
- {value: {"0": true, "1": false, "2": true}, type: {type: map, values: boolean}}
- params: [{k: string}, {a: string}, {b: int}, {c: string}, {d: boolean}]
ret: string
do: {s.concat: [{s.concat: [{s.concat: [{s.concat: [k, a]}, {s.int: b}]}, c]}, {if: d, then: {string: "-up"}, else: {string: "-down"}}]}
''')[0].action(None), {"0": "0x101a-up", "1": "1y102b-down", "2": "2z103c-up"})
def testCorresponds(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: boolean
action:
map.corresponds:
- input
- {value: {"a": "1", "b": "2", "c": "3", "d": "4", "e": "5"}, type: {type: map, values: string}}
- params: [{x: int}, {y: string}]
ret: boolean
do: {"==": [x, {parse.int: [y, 10]}]}
''')
self.assertTrue(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}))
self.assertFalse(engine.action({"a": 111, "b": 2, "c": 3, "d": 4, "e": 5}))
def testCorrespondsWithKey(self):
engine, = PFAEngine.fromYaml('''
input: {type: map, values: int}
output: boolean
action:
map.correspondsWithKey:
- input
- {value: {"a": "1", "b": "2", "c": "3", "d": "4", "e": "5"}, type: {type: map, values: string}}
- params: [{k: string}, {x: int}, {y: string}]
ret: boolean
do:
if: {"==": [k, {string: "a"}]}
then: true
else: {"==": [x, {parse.int: [y, 10]}]}
''')
self.assertTrue(engine.action({"a": 1, "b": 2, "c": 3, "d": 4, "e": 5}))
self.assertTrue(engine.action({"a": 111, "b": 2, "c": 3, "d": 4, "e": 5}))
self.assertFalse(engine.action({"a": 1, "b": 222, "c": 3, "d": 4, "e": 5}))
| StarcoderdataPython |
28026 | """Constants for the Ridwell integration."""
import logging
DOMAIN = "ridwell"
LOGGER = logging.getLogger(__package__)
DATA_ACCOUNT = "account"
DATA_COORDINATOR = "coordinator"
SENSOR_TYPE_NEXT_PICKUP = "next_pickup"
| StarcoderdataPython |
1682122 | import os
import unittest
import cv2
import face_pose_dataset as fpdat
from face_pose_dataset.estimation import mtcnn
from face_pose_dataset.estimation.base import ddfa as ddfa
from face_pose_dataset.estimation.base import fsanet, hopenet
# DONE: Use Sandberg MTCNN
def _common_estimation(case, image, detector, estimator):
res = detector.run(image)
case.assertIsNotNone(res)
case.assertGreater(len(res), 0)
det = mtcnn.extract_faces(
image, res, estimator.img_size, margin=(0.4, 0.7, 0.4, 0.1)
)
import matplotlib.pyplot as plt
plt.imshow(det[0])
plt.show()
case.assertIsNotNone(det)
case.assertGreater(len(det), 0)
case.assertEqual(det[0].shape, (*estimator.img_size, 3))
ang = estimator.run(det[0])
case.assertIsNotNone(ang)
print(ang)
class SSDFSATest(unittest.TestCase):
def setUp(self):
self.detector = fsanet.SSDDetector()
self.sample_image = cv2.imread(
os.path.join(fpdat.PROJECT_ROOT, "data", "test", "Mark_Zuckerberg.jpg")
)
self.assertIsNotNone(self.sample_image, "Test image not found.")
self.assertEqual(self.sample_image.shape[2], 3)
self.estimator = fsanet.FSAEstimator()
def test_run(self):
res = self.detector.run(self.sample_image)
self.assertIsNotNone(res)
self.assertEqual(res.shape[:2], (1, 1))
self.assertEqual(res.shape[3], 7)
det = fsanet.extract_faces(
self.sample_image, res, self.estimator.img_size, threshold=0.2, margin=0.0
)
self.assertIsNotNone(res)
self.assertEqual(det.shape[1:], (*self.estimator.img_size, 3))
ang = self.estimator.run(det)
self.assertIsNotNone(ang)
self.assertEqual(ang.shape, (det.shape[0], 3))
print(ang)
class MtcnnFSATest(unittest.TestCase):
def setUp(self):
self.detector = mtcnn.MTCNN()
self.sample_image = cv2.imread(
os.path.join(fpdat.PROJECT_ROOT, "data", "test", "Mark_Zuckerberg.jpg")
)
self.assertIsNotNone(self.sample_image, "Test image not found.")
self.assertEqual(self.sample_image.shape[2], 3)
self.estimator = fsanet.FSAEstimator()
def test_run(self):
res = self.detector.run(self.sample_image, threshold=0.8,)
self.assertIsNotNone(res)
self.assertGreater(len(res), 0)
det = mtcnn.extract_faces(self.sample_image, res, self.estimator.img_size,)
self.assertIsNotNone(res)
self.assertGreater(len(det), 0)
self.assertEqual(det[0].shape, (*self.estimator.img_size, 3))
ang = self.estimator.run(det)
self.assertIsNotNone(ang)
print(ang)
class HopeTest(unittest.TestCase):
def setUp(self):
self.detector = mtcnn.MTCNN()
self.estimator = hopenet.HopenetEstimator()
self.sample_image = cv2.imread(
os.path.join(fpdat.PROJECT_ROOT, "data", "test", "Mark_Zuckerberg.jpg")
)
self.assertIsNotNone(self.sample_image, "Test image not found.")
self.assertEqual(self.sample_image.shape[2], 3)
def test_run(self):
res = self.detector.run(self.sample_image, threshold=0.8)
self.assertIsNotNone(res)
self.assertGreater(len(res), 0)
det = mtcnn.extract_faces(
self.sample_image, res, self.estimator.img_size, margin=(0.4, 0.7, 0.4, 0.1)
)
import matplotlib.pyplot as plt
plt.imshow(det[0])
plt.show()
self.assertIsNotNone(det)
self.assertGreater(len(det), 0)
self.assertEqual(det[0].shape, (*self.estimator.img_size, 3))
ang = self.estimator.run(det[0])
self.assertIsNotNone(ang)
print(ang)
class DDFATest(unittest.TestCase):
def setUp(self):
self.detector = mtcnn.MTCNN()
self.estimator = ddfa.DdfaEstimator()
self.sample_image = cv2.imread(
os.path.join(fpdat.PROJECT_ROOT, "data", "test", "Mark_Zuckerberg.jpg")
)
self.sample_image = cv2.cvtColor(self.sample_image, cv2.COLOR_BGR2RGB)
self.assertIsNotNone(self.sample_image, "Test image not found.")
self.assertEqual(self.sample_image.shape[2], 3)
def test_run(self):
res = self.detector.run(self.sample_image, threshold=0.8)
self.assertIsNotNone(res)
self.assertGreater(len(res), 0)
det = mtcnn.extract_faces(
self.sample_image,
res,
self.estimator.img_size, # margin=(0.4, 0.7, 0.4, 0.1)
)
import matplotlib.pyplot as plt
plt.imshow(det[0])
plt.show()
self.assertIsNotNone(det)
self.assertGreater(len(det), 0)
self.assertEqual(det[0].shape, (*self.estimator.img_size, 3))
ang = self.estimator.run(det[0])
self.assertIsNotNone(ang)
print(ang)
| StarcoderdataPython |
1613124 | <reponame>RitujaPawas/ivy
# global
import numpy as np
from typing import Optional
import numpy.array_api as npa
# local
import ivy
try:
from scipy.special import erf as _erf
except (ImportError, ModuleNotFoundError):
_erf = None
def add(x1: np.ndarray, x2: np.ndarray) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
return np.asarray(npa.add(npa.asarray(x1), npa.asarray(x2)))
def pow(x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
if hasattr(x1, "dtype") and hasattr(x2, "dtype"):
promoted_type = np.promote_types(x1.dtype, x2.dtype)
x1, x2 = np.asarray(x1), np.asarray(x2)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
elif not hasattr(x2, "dtype"):
x2 = np.array(x2, dtype=x1.dtype)
return np.power(x1, x2, out=out)
def bitwise_xor(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.bitwise_xor(x1, x2, out=out)
def exp(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.exp(x, out=out)
def expm1(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.expm1(x, out=out)
def bitwise_invert(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.invert(x, out=out)
def bitwise_and(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.bitwise_and(x1, x2, out=out)
def equal(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.equal(x1, x2, out=out)
def greater(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.greater(x1, x2, out=out)
def greater_equal(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.greater_equal(x1, x2, out=out)
def less_equal(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.less_equal(x1, x2, out=out)
def multiply(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if hasattr(x1, "dtype") and hasattr(x2, "dtype"):
promoted_type = np.promote_types(x1.dtype, x2.dtype)
x1, x2 = np.asarray(x1), np.asarray(x2)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
elif not hasattr(x2, "dtype"):
x2 = np.array(x2, dtype=x1.dtype)
return np.multiply(x1, x2, out=out)
def ceil(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
if "int" in str(x.dtype):
ret = np.copy(x)
else:
return np.ceil(x, out=out)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def floor(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
if "int" in str(x.dtype):
ret = np.copy(x)
else:
return np.floor(x, out=out)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def sign(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.sign(x, out=out)
def sqrt(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.sqrt(x, out=out)
def isfinite(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.isfinite(x, out=out)
def asin(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.arcsin(x, out=out)
def isinf(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.isinf(x, out=out)
def asinh(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.arcsinh(x, out=out)
def cosh(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.cosh(x, out=out)
def log10(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.log10(x, out=out)
def log(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.log(x, out=out)
def log2(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.log2(x, out=out)
def log1p(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.log1p(x, out=out)
def isnan(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.isnan(x, out=out)
def less(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.less(x1, x2, out=out)
def cos(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.cos(x, out=out)
def logical_not(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.logical_not(x, out=out)
def divide(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
promoted_type = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
return np.divide(x1, x2, out=out)
def acos(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.arccos(x, out=out)
def logical_xor(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.logical_xor(x1, x2, out=out)
def logical_or(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.logical_or(x1, x2, out=out)
def logical_and(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.logical_and(x1, x2, out=out)
def acosh(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.arccosh(x, out=out)
def sin(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.sin(x, out=out)
def negative(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.negative(x, out=out)
def not_equal(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
return np.not_equal(x1, x2, out=out)
def tanh(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.tanh(x, out=out)
def floor_divide(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.floor_divide(x1, x2, out=out)
def sinh(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.sinh(x, out=out)
def positive(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.positive(x, out=out)
def square(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.square(x, out=out)
def remainder(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.remainder(x1, x2, out=out)
def round(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
if "int" in str(x.dtype):
ret = np.copy(x)
else:
return np.round(x, out=out)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def bitwise_or(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.bitwise_or(x1, x2, out=out)
def trunc(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
if "int" in str(x.dtype):
ret = np.copy(x)
else:
return np.trunc(x, out=out)
if ivy.exists(out):
return ivy.inplace_update(out, ret)
return ret
def abs(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.absolute(x, out=out)
def subtract(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if hasattr(x1, "dtype") and hasattr(x2, "dtype"):
promoted_type = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(promoted_type)
x2 = x2.astype(promoted_type)
elif not hasattr(x2, "dtype"):
x2 = np.array(x2, dtype=x1.dtype)
return np.subtract(x1, x2, out=out)
def logaddexp(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.logaddexp(x1, x2, out=out)
def bitwise_right_shift(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.right_shift(x1, x2, out=out)
def bitwise_left_shift(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.left_shift(x1, x2, out=out)
def tan(x: np.ndarray, *, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.tan(x, out=out)
def atan(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.arctan(x, out=out)
def atanh(x: np.ndarray, out: Optional[np.ndarray] = None) -> np.ndarray:
return np.arctanh(x, out=out)
def atan2(
x1: np.ndarray, x2: np.ndarray, out: Optional[np.ndarray] = None
) -> np.ndarray:
if not isinstance(x2, np.ndarray):
x2 = np.asarray(x2, dtype=x1.dtype)
else:
dtype = np.promote_types(x1.dtype, x2.dtype)
x1 = x1.astype(dtype)
x2 = x2.astype(dtype)
return np.arctan2(x1, x2, out=out)
# Extra #
# ------#
def minimum(x1, x2, out: Optional[np.ndarray] = None):
return np.minimum(x1, x2, out=out)
def maximum(x1, x2, out: Optional[np.ndarray] = None):
return np.maximum(x1, x2, out=out)
def erf(x, out: Optional[np.ndarray] = None):
if _erf is None:
raise Exception(
"scipy must be installed in order to call ivy.erf with a numpy backend."
)
return _erf(x, out=out)
| StarcoderdataPython |
4818748 | from gym.envs.registration import register
register(
id='CarlaGymEnv-v1',
entry_point='carla_gym.envs:CarlaGymEnv_v1')
register(
id='CarlaGymEnv-v2',
entry_point='carla_gym.envs:CarlaGymEnv_v2')
| StarcoderdataPython |
1628823 | # 2.2 Return Kth to Last:
# Implement an algorithm to find the kth to last element of a singly linked list
| StarcoderdataPython |
95827 | # Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import paddle.fluid as fluid
from simple_nets import init_data
def case1_fill_grad_vars():
x = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
feature = fluid.layers.fc(input=x, size=20, act=None)
part1, part2 = fluid.layers.split(feature, num_or_sections=[10, 10], dim=1)
# Note that: part2 is not used.
loss = fluid.layers.cross_entropy(input=part1, label=label)
loss = fluid.layers.mean(loss)
return loss
def case2_prune_no_grad_branch():
x = fluid.layers.data(name='image', shape=[784], dtype='float32')
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
feature = fluid.layers.fc(input=x, size=10, act=None)
label = fluid.layers.cast(label, dtype="float32")
label = fluid.layers.cast(label, dtype='int64')
# Note that the label is not persistable in fluid.layers.cross_entropy.
loss = fluid.layers.cross_entropy(input=feature, label=label)
loss = fluid.layers.mean(loss)
return loss
def case3_prune_no_grad_branch2():
label = fluid.layers.data(name='label', shape=[1], dtype='int64')
label = fluid.layers.cast(label, dtype="float32")
label = fluid.layers.cast(label, dtype='int64')
out = fluid.layers.one_hot(input=label, depth=100)
loss = fluid.layers.mean(out)
return loss
def case4_with_no_grad_op_maker():
out = fluid.layers.gaussian_random(shape=[20, 30])
loss = fluid.layers.mean(out)
return loss
class TestBackward(unittest.TestCase):
def check_backward(self, model, feed_dict):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
main = fluid.Program()
startup = fluid.Program()
with fluid.program_guard(main, startup):
loss = model()
optimizer = fluid.optimizer.SGD(learning_rate=0.1)
optimizer.minimize(loss)
exe.run(fluid.default_startup_program())
exe.run(feed=feed_dict)
def test_backward(self):
batch_size = 2
img, label = init_data(batch_size, img_shape=[784], label_range=9)
feed_dict = {'image': img, 'label': label}
self.check_backward(case1_fill_grad_vars, feed_dict)
self.check_backward(case2_prune_no_grad_branch, feed_dict)
self.check_backward(case3_prune_no_grad_branch2, {'label': label})
self.check_backward(case4_with_no_grad_op_maker, {})
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3203197 | <gh_stars>0
import wx
from ...lib import ButtonBase
class Button(ButtonBase):
def Draw(self, dc):
dc.SetFont(self.config.get_font('small'))
dc.SetTextForeground(self.config.get_color('text'))
dc.SetBackground(wx.Brush(self.config.get_color('button')))
if self._mouseIn:
dc.SetBackground(wx.Brush(self.config.get_color('highlight')))
if self._mouseDown:
dc.SetBackground(wx.Brush(self.config.get_color('select')))
dc.Clear()
width, height = self.GetClientSize()
textWidth, textHeight = dc.GetTextExtent(self.label)
textX, textY = (width - textWidth)/2, (height - textHeight)/2
dc.DrawText(self.label, textX, textY)
dc.SetPen(wx.Pen(self.config.get_color('border'), width=3))
dc.SetBrush(wx.TRANSPARENT_BRUSH)
dc.DrawRectangle(0, 0, width, height)
class ControlPanel(wx.Panel):
def __init__(self, parent, appConf, widgetConf, matrix_a, matrix_b, matrix_c):
super().__init__(parent)
self.parent = parent
self.appConf = appConf
self.widgetConf = widgetConf
self.in_matrices = [matrix_a, matrix_b]
self.out_matrix = matrix_c
self.Display()
self.Bind(wx.EVT_PAINT, self._OnPaint)
self.Bind(wx.EVT_ERASE_BACKGROUND, self._OnEraseBackground)
self.Bind(wx.EVT_BUTTON, self._OnButton)
def Display(self):
self.sizer = wx.BoxSizer(wx.HORIZONTAL)
butt1 = Button(self, self.appConf, 'Calculate', size=(100, 40))
self.sizer.Add(butt1, 1, flag=wx.EXPAND|wx.BOTTOM|wx.LEFT|wx.RIGHT, border=5)
self.SetSizer(self.sizer)
self.Layout()
def Draw(self, dc):
dc.SetBackground(wx.Brush(self.appConf.get_color('color1', 'widget')))
dc.Clear()
def _OnButton(self, e):
e.Skip()
def _OnEraseBackground(self, e):
pass
def _OnPaint(self, e):
dc = wx.BufferedPaintDC(self)
self.Draw(dc) | StarcoderdataPython |
3394129 | <gh_stars>0
from __future__ import absolute_import
import json
import six
import tempfile
from datetime import timedelta
from django.core import mail
from django.core.urlresolvers import reverse
from django.utils import timezone
from sentry.data_export.base import ExportQueryType, ExportStatus, DEFAULT_EXPIRATION
from sentry.data_export.models import ExportedData
from sentry.models import File
from sentry.testutils import TestCase
from sentry.utils.http import absolute_uri
from sentry.utils.compat.mock import patch
class ExportedDataTest(TestCase):
TEST_STRING = "A bunch of test data..."
def setUp(self):
super(ExportedDataTest, self).setUp()
self.user = self.create_user()
self.organization = self.create_organization()
self.data_export = ExportedData.objects.create(
user=self.user, organization=self.organization, query_type=0, query_info={"env": "test"}
)
self.file1 = File.objects.create(
name="tempfile-data-export", type="export.csv", headers={"Content-Type": "text/csv"}
)
self.file2 = File.objects.create(
name="tempfile-data-export", type="export.csv", headers={"Content-Type": "text/csv"}
)
def test_status_property(self):
assert self.data_export.status == ExportStatus.Early
self.data_export.update(
date_expired=timezone.now() + timedelta(weeks=2),
date_finished=timezone.now() - timedelta(weeks=2),
)
assert self.data_export.status == ExportStatus.Valid
self.data_export.update(date_expired=timezone.now() - timedelta(weeks=1))
assert self.data_export.status == ExportStatus.Expired
def test_payload_property(self):
assert isinstance(self.data_export.payload, dict)
keys = self.data_export.query_info.keys() + ["export_type"]
assert sorted(self.data_export.payload.keys()) == sorted(keys)
def test_file_name_property(self):
assert isinstance(self.data_export.file_name, six.string_types)
file_name = self.data_export.file_name
assert file_name.startswith(ExportQueryType.as_str(self.data_export.query_type))
assert file_name.endswith(six.text_type(self.data_export.id) + ".csv")
def test_format_date(self):
assert ExportedData.format_date(self.data_export.date_finished) is None
assert isinstance(ExportedData.format_date(self.data_export.date_added), six.binary_type)
def test_delete_file(self):
# Empty call should have no effect
assert self.data_export.file is None
self.data_export.delete_file()
assert self.data_export.file is None
# Real call should delete the file
assert File.objects.filter(id=self.file1.id).exists()
self.data_export.update(file=self.file1)
assert isinstance(self.data_export.file, File)
self.data_export.delete_file()
assert not File.objects.filter(id=self.file1.id).exists()
# The ExportedData should be unaffected
assert ExportedData.objects.filter(id=self.data_export.id).exists()
assert ExportedData.objects.get(id=self.data_export.id).file is None
def test_delete(self):
self.data_export.finalize_upload(file=self.file1)
assert ExportedData.objects.filter(id=self.data_export.id).exists()
assert File.objects.filter(id=self.file1.id).exists()
self.data_export.delete()
assert not ExportedData.objects.filter(id=self.data_export.id).exists()
assert not File.objects.filter(id=self.file1.id).exists()
def test_finalize_upload(self):
# With default expiration
with tempfile.TemporaryFile() as tf:
tf.write(self.TEST_STRING)
tf.seek(0)
self.file1.putfile(tf)
self.data_export.finalize_upload(file=self.file1)
assert self.data_export.file.getfile().read() == self.TEST_STRING
assert self.data_export.date_finished is not None
assert self.data_export.date_expired is not None
assert self.data_export.date_expired == self.data_export.date_finished + DEFAULT_EXPIRATION
# With custom expiration
with tempfile.TemporaryFile() as tf:
tf.write(self.TEST_STRING + self.TEST_STRING)
tf.seek(0)
self.file2.putfile(tf)
self.data_export.finalize_upload(file=self.file2, expiration=timedelta(weeks=2))
assert self.data_export.file.getfile().read() == self.TEST_STRING + self.TEST_STRING
# Ensure the first file is deleted
assert not File.objects.filter(id=self.file1.id).exists()
assert self.data_export.date_expired == self.data_export.date_finished + timedelta(weeks=2)
def test_email_success(self):
# Shouldn't send if ExportedData is incomplete
with self.tasks():
self.data_export.email_success()
assert len(mail.outbox) == 0
# Should send one email if complete
self.data_export.finalize_upload(file=self.file1)
with self.tasks():
self.data_export.email_success()
assert len(mail.outbox) == 1
@patch("sentry.utils.email.MessageBuilder")
def test_email_success_content(self, builder):
self.data_export.finalize_upload(file=self.file1)
with self.tasks():
self.data_export.email_success()
expected_url = absolute_uri(
reverse(
"sentry-data-export-details", args=[self.organization.slug, self.data_export.id]
)
)
expected_email_args = {
"subject": "Your data is ready.",
"context": {
"url": expected_url,
"expiration": ExportedData.format_date(date=self.data_export.date_expired),
},
"type": "organization.export-data",
"template": "sentry/emails/data-export-success.txt",
"html_template": "sentry/emails/data-export-success.html",
}
builder.assert_called_with(**expected_email_args)
def test_email_failure(self):
with self.tasks():
self.data_export.email_failure(self.TEST_STRING)
assert len(mail.outbox) == 1
assert not ExportedData.objects.filter(id=self.data_export.id).exists()
@patch("sentry.utils.email.MessageBuilder")
def test_email_failure_content(self, builder):
with self.tasks():
self.data_export.email_failure(self.TEST_STRING)
expected_email_args = {
"subject": "We couldn't export your data.",
"context": {
"creation": ExportedData.format_date(date=self.data_export.date_added),
"error_message": self.TEST_STRING,
"payload": json.dumps(self.data_export.payload, indent=2, sort_keys=True),
},
"type": "organization.export-data",
"template": "sentry/emails/data-export-failure.txt",
"html_template": "sentry/emails/data-export-failure.html",
}
builder.assert_called_with(**expected_email_args)
| StarcoderdataPython |
195362 | # -*- coding: utf-8 -*-
"""
Created on Wed Dec 18 13:50:29 2019
@author: Joshua
"""
# Load libraries
import pandas as pd
pd.__version__
import os
import Python.Data_Preprocessing.config.config as cfg
import Python.Data_Preprocessing.Stage_4.demographics.session_level_wordcount as slw
from tqdm import tqdm
def z_score_per_dev_fold(df, col_name):
# for loop
df_result = pd.DataFrame(columns=["Video_ID", col_name +'_z'])
df_train = df
train_mil = cfg.parameters_cfg['d_'+col_name+'_mu']
train_sd = cfg.parameters_cfg['d_'+col_name+'_sd']
df_series = (df[col_name] - train_mil) / (train_sd)
df_return = pd.DataFrame()
df_return['Video_ID'] = df_train.video_id.apply(lambda x: x.split('_')[0])
df_return[col_name + '_z'] = df_series
df_result = df_result.append(df_return)
return df_result
def get_z_bucket(z_series):
'''
takes in the z_series and outputs the bucket very low;low;high;very high
'''
df = pd.DataFrame(z_series)
df.columns = ['z']
df['z_bucket'] = None
df.loc[df.z < -1, 'z_bucket'] = 'low'
df.loc[df.z < -2, 'z_bucket'] = 'very low'
df.loc[df.z > 1, 'z_bucket'] = 'high'
df.loc[df.z > 2, 'z_bucket'] = 'very high'
return df
def run_dataframe(video_name_1, video_name_2, parallel_run_settings):
'''
Load dataframe of summary metrics
:return: dataframe of metrics
'''
# parallel_run_settings = prs.get_parallel_run_settings("marriane_win")
slw.run_computing_wordcount(video_name_1, video_name_2, parallel_run_settings)
data = pd.read_csv(os.path.join(parallel_run_settings['csv_path'],
video_name_1 + '_' + video_name_2,
'Stage_4',
'Demographics',
'session_level_wordcount.csv'))
return data
def get_blob(variable, fxn, speaker, data):
'''
Generates blob
:param variable: specific AU variable being analyzed
:param fxn: mean/std/min/max
:param speaker: speaker 1 or speaker 2
:return:
'''
var = z_score_per_dev_fold(df=data.loc[(data['speaker'] == speaker)],
col_name=variable+'_'+fxn)
var = pd.concat([var, get_z_bucket(var[variable+'_'+fxn+'_z'])], axis=1)
if fxn == 'count':
text = 'number of '
elif fxn == 'uniquecount':
text = 'unique number of '
elif fxn == 'countprop':
text = 'proportion of number of '
else:
text = ''
variable_text = 'words'
print('Generating ', text+' '+variable_text+' by '+speaker+' ')
i = 0
df_copy = var.copy()
df_copy['blob'] = None
for i in tqdm(range(len(var))):
row_df = var.iloc[i]
if row_df['z_bucket'] == None:
pass
else:
df_copy.blob.iloc[i] = text+' ' +variable_text+' by '+speaker+' '+\
row_df['z_bucket'] + ' '
print(df_copy)
return df_copy
def get_all_blob(video_name_1, video_name_2, parallel_run_settings):
# Importing connection object
data = run_dataframe(video_name_1, video_name_2, parallel_run_settings)
talkativeness_blob = pd.DataFrame()
speakers = [cfg.parameters_cfg['speaker_1'], cfg.parameters_cfg['speaker_2']]
for speaker in speakers:
for fxn in ['count', 'countprop', 'uniquecount']:
for variable_text in ['word']:
blob = get_blob(variable_text, fxn, speaker, data)
talkativeness_blob = pd.concat([talkativeness_blob,
blob], axis=0)
talkativeness_blob.fillna(value='', inplace=True)
talkativeness_blob = talkativeness_blob.groupby(['Video_ID'])[
'blob'].apply(lambda x: ''.join(x)).reset_index()
for i in tqdm(range(len(talkativeness_blob))):
row_df = talkativeness_blob.iloc[i]
if not row_df['blob']:
pass
else:
if len(row_df['blob']) > 0:
talkativeness_blob.blob.iloc[i] = row_df['blob'][:-1] + '. '
talkativeness_blob = talkativeness_blob.sort_values(by=['Video_ID'])
return talkativeness_blob
if __name__ == '__main__':
talkativeness_blob = get_all_blob(video_name_1='Ses01F_F',
video_name_2='Ses01F_M')
| StarcoderdataPython |
3260289 | <gh_stars>0
from __future__ import unicode_literals
import datetime
from django.db import models
from carros.users.models import User
# Create your models here.
class Alert(models.Model):
val = "-1"
default_year = "0"
CHOICES_YEAR_DESDE = (
(default_year, 'Desde'),
("2017",'2017'),
("2016",'2016'),
("2015",'2015'),
("2014",'2014'),
("2013",'2013'),
("2012",'2012'),
("2011",'2011'),
("2010",'2010'),
("2009",'2009'),
("2008",'2008'),
("2007",'2007'),
("2006",'2006'),
("2005",'2005'),
("2004",'2004'),
("2003",'2003'),
("2002",'2002'),
("2001",'2001'),
("2000",'2000'),
("1999",'1999'),
("1998",'1998'),
("1997",'1997'),
("1996",'1996'),
("1995",'1995'),
("1994",'1994'),
("1993",'1993'),
("1992",'1992'),
("1991",'1991'),
("1990",'1990'),
("1989",'1989'),
("1988",'1988'),
("1987",'1987'),
("1986",'1986'),
("1985",'1985'),
("1984",'1984'),
("1983",'1983'),
("1982",'1982'),
("1981",'1981'),
("1980",'1980'),
("1979",'1979'),
("1978",'1978'),
("1977",'1977'),
("1976",'1976'),
("1975",'1975'),
("1974",'1974'),
("1973",'1973'),
("1972",'1972'),
("1971",'1971'),
("1970",'1970'),
("1969",'1969'),
("1968",'1968'),
("1967",'1967'),
("1966",'1966'),
("1965",'1965'),
("1964",'1964'),
("1963",'1963'),
("1962",'1962'),
("1961",'1961'),
("1960",'1960'),
("1959",'1959'),
("1958",'1958'),
("1957",'1957'),
("1956",'1956'),
("1955",'1955'),
("1954",'1954'),
("1953",'1953'),
("1952",'1952'),
("1951",'1951'),
("1950",'1950'),
("1949",'1949'),
("1948",'1948'),
("1947",'1947'),
("1946",'1946'),
("1945",'1945'),
("1944",'1944'),
("1943",'1943'),
("1942",'1942'),
("1941",'1941'),
("1940",'1940'),
("1939",'1939'),
("1938",'1938'),
("1937",'1937'),
("1936",'1936'),
("1935",'1935'),
("1934",'1934'),
("1933",'1933'),
("1932",'1932'),
("1931",'1931'),
("1930",'1930'),
("1929",'1929'),
("1928",'1928'),
("1927",'1927'),
("1926",'1926'),
("1925",'1925'),
("1924",'1924'),
("1923",'1923'),
("1922",'1922'),
("1921",'1921'),
("1920",'1920'),
("1919",'1919'),
("1918",'1918'),
("1917",'1917'),
("1916",'1916'),
("1915",'1915'),
("1914",'1914'),
("1913",'1913'),
("1912",'1912'),
("1911",'1911'),
("1910",'1910'),
("1909",'1909'),
("1908",'1908'),
("1907",'1907'),
("1906",'1906'),
("1905",'1905'),
("1904",'1904'),
("1903",'1903'),
("1902",'1902'),
("1901",'1901'),
("1900",'1900'),
)
CHOICES_YEAR_HASTA = (
(default_year, 'Hasta'),
("2017",'2017'),
("2016",'2016'),
("2015",'2015'),
("2014",'2014'),
("2013",'2013'),
("2012",'2012'),
("2011",'2011'),
("2010",'2010'),
("2009",'2009'),
("2008",'2008'),
("2007",'2007'),
("2006",'2006'),
("2005",'2005'),
("2004",'2004'),
("2003",'2003'),
("2002",'2002'),
("2001",'2001'),
("2000",'2000'),
("1999",'1999'),
("1998",'1998'),
("1997",'1997'),
("1996",'1996'),
("1995",'1995'),
("1994",'1994'),
("1993",'1993'),
("1992",'1992'),
("1991",'1991'),
("1990",'1990'),
("1989",'1989'),
("1988",'1988'),
("1987",'1987'),
("1986",'1986'),
("1985",'1985'),
("1984",'1984'),
("1983",'1983'),
("1982",'1982'),
("1981",'1981'),
("1980",'1980'),
("1979",'1979'),
("1978",'1978'),
("1977",'1977'),
("1976",'1976'),
("1975",'1975'),
("1974",'1974'),
("1973",'1973'),
("1972",'1972'),
("1971",'1971'),
("1970",'1970'),
("1969",'1969'),
("1968",'1968'),
("1967",'1967'),
("1966",'1966'),
("1965",'1965'),
("1964",'1964'),
("1963",'1963'),
("1962",'1962'),
("1961",'1961'),
("1960",'1960'),
("1959",'1959'),
("1958",'1958'),
("1957",'1957'),
("1956",'1956'),
("1955",'1955'),
("1954",'1954'),
("1953",'1953'),
("1952",'1952'),
("1951",'1951'),
("1950",'1950'),
("1949",'1949'),
("1948",'1948'),
("1947",'1947'),
("1946",'1946'),
("1945",'1945'),
("1944",'1944'),
("1943",'1943'),
("1942",'1942'),
("1941",'1941'),
("1940",'1940'),
("1939",'1939'),
("1938",'1938'),
("1937",'1937'),
("1936",'1936'),
("1935",'1935'),
("1934",'1934'),
("1933",'1933'),
("1932",'1932'),
("1931",'1931'),
("1930",'1930'),
("1929",'1929'),
("1928",'1928'),
("1927",'1927'),
("1926",'1926'),
("1925",'1925'),
("1924",'1924'),
("1923",'1923'),
("1922",'1922'),
("1921",'1921'),
("1920",'1920'),
("1919",'1919'),
("1918",'1918'),
("1917",'1917'),
("1916",'1916'),
("1915",'1915'),
("1914",'1914'),
("1913",'1913'),
("1912",'1912'),
("1911",'1911'),
("1910",'1910'),
("1909",'1909'),
("1908",'1908'),
("1907",'1907'),
("1906",'1906'),
("1905",'1905'),
("1904",'1904'),
("1903",'1903'),
("1902",'1902'),
("1901",'1901'),
("1900",'1900'),
)
priceTodos='Todos'
price5000000='5000000'
price10000000='10000000'
price15000000='15000000'
price20000000='20000000'
price25000000='25000000'
price30000000='30000000'
price35000000='35000000'
price40000000='40000000'
price45000000='45000000'
price50000000='50000000'
price55000000='55000000'
price60000000='60000000'
price70000000='70000000'
price80000000='80000000'
price100000000='100000000'
price120000000='120000000'
price_masde='-1'
price_menosde='-1'
CHOICES_PRICE_DESDE = (
(val, "Desde"),
(price_menosde, "Menos de 5.000.000"),
(price5000000, "5.000.000"),
(price10000000, "10.000.000"),
(price15000000, "15.000.000"),
(price20000000, "20.000.000"),
(price25000000, "25.000.000"),
(price30000000, "30.000.000"),
(price35000000, "35.000.000"),
(price40000000, "40.000.000"),
(price45000000, "45.000.000"),
(price50000000, "50.000.000"),
(price55000000, "55.000.000"),
(price60000000, "60.000.000"),
(price70000000, "70.000.000"),
(price80000000, "80.000.000"),
(price100000000, "100.000.000"),
(price120000000, "120.000.000"),
)
CHOICES_PRICE_HASTA = (
(val, "Hasta"),
(price5000000, "5.000.000"),
(price10000000, "10.000.000"),
(price15000000, "15.000.000"),
(price20000000, "20.000.000"),
(price25000000, "25.000.000"),
(price30000000, "30.000.000"),
(price35000000, "35.000.000"),
(price40000000, "40.000.000"),
(price45000000, "45.000.000"),
(price50000000, "50.000.000"),
(price55000000, "55.000.000"),
(price60000000, "60.000.000"),
(price70000000, "70.000.000"),
(price80000000, "80.000.000"),
(price100000000, "100.000.000"),
(price120000000, "120.000.000"),
(price_masde, "Mas de 120.000.000"),
)
user = models.ForeignKey(User)
main_category = models.CharField(max_length=10)
brand = models.CharField(max_length=10, blank = True, null = True)
model = models.CharField(max_length=10, blank = True, null = True)
year_min = models.CharField(max_length=4, choices=CHOICES_YEAR_DESDE, default=default_year, blank = True, null = True)
year_max = models.CharField(max_length=4, choices=CHOICES_YEAR_HASTA, default=default_year, blank = True, null = True)
price_min = models.CharField(max_length=10, choices=CHOICES_PRICE_DESDE, default=val, blank = True, null = True)
price_max = models.CharField(max_length=10, choices=CHOICES_PRICE_HASTA, default=val, blank = True, null = True)
location = models.CharField(max_length=30, blank = True, null = True)
mileage = models.CharField( max_length=30, blank = True, null = True)
def __unicode__(self):
return "%s %s" % (self.brand, self.model)
class Match(models.Model):
alerts = models.ManyToManyField(Alert)
external_id = models.CharField(max_length=140)
title = models.CharField(max_length=200, blank = True, null = True)
category_id = models.CharField(max_length=30, blank = True, null = True)
price = models.DecimalField(max_digits = 11, decimal_places = 2, blank = True, null = True)
start_time = models.CharField(max_length=40, blank = True, null = True)
stop_time = models.CharField(max_length=40, blank = True, null = True)
permalink = models.URLField(max_length=500, blank = True, null = True)
thumbnail = models.URLField(max_length=500, blank = True, null = True)
last_updated = models.CharField(max_length=40, blank = True, null = True)
year = models.CharField(max_length=4, blank = True, null = True)
mileage = models.CharField( max_length=30, blank = True, null = True)
location = models.CharField(max_length=30, blank = True, null = True)
def __unicode__(self):
return "%s encontrado a %s" % (self.title, self.price) | StarcoderdataPython |
3317526 | <reponame>thiagofreitascarneiro/Python-avancado-Geek-University
'''
JSON ePickle
JSON -> JavaScript Object Notation
API ->São meios de comunicação entre os serviços oferecidos por empresas
(Twitter, Facebook, Youtube...) e terceiros(nós desenvolvedores).
import json
ret = json.dumps(['produto', {'Playstation 4': ('2TB', 'Novo', '220V', 2340)}])
print(type(ret))
print(ret)
import json
class Gato:
def __init__(self, nome, raca):
self.__nome = nome
self.__raca = raca
@property
def nome(self):
return self.__nome
@property
def raca(self):
return self.__raca
felix = Gato('Felix', 'Vira-Lata')
print(felix.__dict__)
ret = json.dumps(felix.__dict__)
print(ret)
Integrando o Json com o Pickle
pip install jsonpickle
import jsonpickle
class Gato:
def __init__(self, nome, raca):
self.__nome = nome
self.__raca = raca
@property
def nome(self):
return self.__nome
@property
def raca(self):
return self.__raca
felix = Gato('Felix', 'Vira-Lata')
ret = jsonpickle.encode(felix)
print(ret)
# Escrevendo o arquivo json/pickle
import jsonpickle
class Gato:
def __init__(self, nome, raca):
self.__nome = nome
self.__raca = raca
@property
def nome(self):
return self.__nome
@property
def raca(self):
return self.__raca
felix = Gato('Felix', 'Vira-Lata')
with open('felix.json', 'w') as arquivo:
ret = jsonpickle.encode(felix)
arquivo.write(ret)
'''
# Lendo o arquivo json/pickle
import jsonpickle
class Gato:
def __init__(self, nome, raca):
self.__nome = nome
self.__raca = raca
@property
def nome(self):
return self.__nome
@property
def raca(self):
return self.__raca
felix = Gato('Felix', 'Vira-Lata')
with open('felix.json', 'r') as arquivo:
conteudo = arquivo.read()
ret = jsonpickle.decode(conteudo)
print(ret)
print(type(ret))
print(ret.nome)
print(ret.raca)
| StarcoderdataPython |
3273541 | # This is a _very simple_ example of a web service that recognizes faces in uploaded images.
#
# The result is returned as json. For example:
#
# $ curl -XPOST -F "file=@obama2.jpg" http://127.0.0.1:5001
#
# Returns:
#
# {
# "face_found_in_image": true,
# "is_picture_of_obama": true
# }
#
# This example is based on the Flask file upload example: http://flask.pocoo.org/docs/0.12/patterns/fileuploads/
# NOTE: This example requires flask to be installed! You can install it with pip:
# $ pip3 install flask
import face_recognition
from flask import Flask, jsonify, request, redirect
import pickle
from flask_cors import CORS, cross_origin
import json
# You can change this to any folder on your system
ALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/FaceRecognition_api', methods=['GET', 'POST'])
@cross_origin(origin='*')
def upload_image():
# Check if a valid image file was uploaded
if request.method == 'POST':
print(request.files)
if 'file' not in request.files:
return "file upload unsuccessfull"
file = request.files['file']
if file.filename == '':
return "file value is null"
print(allowed_file(file.filename))
print("====")
return detect_faces_in_image(file)
if file and allowed_file(file.filename):
# The image file seems valid! Detect faces and return the result.
return detect_faces_in_image(file)
# If no valid image file was uploaded, show the file upload form:
return '''
<!doctype html>
<title>Is this a picture of Obama?</title>
<h1>Upload a picture and see if it's a picture of Obama!</h1>
<form method="POST" enctype="multipart/form-data">
<input type="file" name="file">
<input type="submit" value="Upload">
</form>
'''
def detect_faces_in_image(file_stream):
# Pre-calculated face encoding of Obama generated with face_recognition.face_encodings(img)
data = pickle.loads(open('encodings.pickle', "rb").read())
# Load the uploaded image file
img = face_recognition.load_image_file(file_stream)
# Get face encodings for any faces in the uploaded image
unknown_face_encodings = face_recognition.face_encodings(img)
face_found = False
is_obama = False
name=""
if len(unknown_face_encodings) > 0:
face_found = True
# See if the first face in the uploaded image matches the known face of Obama
match_results = face_recognition.compare_faces(data["encodings"], unknown_face_encodings[0],tolerance=0.4)
if match_results[0]:
is_obama = True
if True in match_results:
matchedIdxs = [i for (i, b) in enumerate(match_results) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
# update the list of names
# Return the result as json
result = {
"face_found_in_image": face_found,
'username':name
}
return jsonify(result)
if __name__ == "__main__":
cors = CORS(app)
app.config['CORS_HEADERS'] = 'Content-Type'
app.run(host='0.0.0.0', port=5001, debug=True)
| StarcoderdataPython |
3345492 | <reponame>maciek-slon/DisCODe<gh_stars>1-10
#! /usr/bin/env python
# Copyright (c) 2010 Warsaw Univeristy of Technology
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import sys
import os
import re
import argparse
def replace_words(text, word_dic):
"""
take a text and <strong class="highlight">replace</strong> words that match
a key <strong class="highlight">in</strong> a dictionary with the associated
value, return the changed text
"""
rc = re.compile('|'.join(map(re.escape, word_dic)))
def translate(match):
return word_dic[match.group(0)]
return rc.sub(translate, text)
def configure_file(in_name, out_name, word_dic):
"""
take template file, replace words according to given dictionary and write
result to another file
"""
# Read template file
fin = open(in_name, "r")
str = fin.read()
fin.close()
# call the function and get the changed text
str = replace_words(str, word_dic)
# write changed text back out
fout = open(out_name, "w")
fout.write(str)
fout.close()
# Absolute path of directory DisCODe was installed in
DISCODE_PATH="@CMAKE_INSTALL_PREFIX@"
DISCODE_DCL_DIR=os.environ['DISCODE_DCL_DIR']
parser = argparse.ArgumentParser()
parser.add_argument("DCL", help="name of DCL to be created")
args = parser.parse_args()
#if len(sys.argv) != 2:
# # stop the program and print an error message
# sys.exit("Usage: " + sys.argv[0] + " PATH_TO_DCL")
dcl_name = args.DCL
fullpath = os.path.join(DISCODE_DCL_DIR, dcl_name)
# Create directory if it doesn't exist
if not os.path.exists(fullpath):
os.makedirs(fullpath)
# Check if provided directory contains CMakeLists.txt file
# which means, that it is probably already an DCL
if os.path.exists(fullpath + "/CMakeLists.txt"):
sys.exit("There is already DCL named " + dcl_name + " in current location!")
print "Creating DCL:" , dcl_name , "in" , fullpath
#===============================================================================
# Preparing README file
#===============================================================================
readme_header = dcl_name + " - DisCODe Component Library"
readme_header += "\n" + "=" * len(readme_header)
readme_dic = {
'TEMPLATE_ARG_DCL_NAME' : readme_header
}
configure_file(DISCODE_PATH+'/share/DisCODe/Templates/README.md', fullpath+'/README.md', readme_dic)
#===============================================================================
# Preparing CMakeLists.txt file
#===============================================================================
cmakelists_dic = {
'TEMPLATE_ARG_DCL_NAME' : dcl_name
}
configure_file(DISCODE_PATH+'/share/DisCODe/Templates/CMakeLists.txt', fullpath+'/CMakeLists.txt', cmakelists_dic)
#===============================================================================
# Preparing 'src' directory. If it already exists, then all components
# inside are converted and inserted to CMakeLists.txt
#===============================================================================
# create src directory if it doesn't exist
srcpath = os.path.join(fullpath, "src")
if not os.path.exists(srcpath):
os.makedirs(srcpath)
# create CMakeLists.txt in src directory
srccmakepath = os.path.join(srcpath, "CMakeLists.txt")
if not os.path.exists(srccmakepath):
configure_file(DISCODE_PATH+'/share/DisCODe/Templates/src/CMakeLists.txt', srccmakepath, cmakelists_dic)
# create DCLConfig.txt in src directory
srcconfigpath = os.path.join(srcpath, dcl_name+"Config.cmake.in")
if not os.path.exists(srcconfigpath):
configure_file(DISCODE_PATH+'/share/DisCODe/Templates/src/DCLConfig.cmake.in', srcconfigpath, cmakelists_dic)
# create Types directory if it doesn't exist
srctypespath = os.path.join(srcpath, "Types")
if not os.path.exists(srctypespath):
os.makedirs(srctypespath)
# create CMakeLists.txt in Components directory
srctypescmakepath = os.path.join(srctypespath, "CMakeLists.txt")
if not os.path.exists(srctypescmakepath):
configure_file(DISCODE_PATH+'/share/DisCODe/Templates/src/Types/CMakeLists.txt', srctypescmakepath, cmakelists_dic)
# create Components directory if it doesn't exist
srccomponentspath = os.path.join(srcpath, "Components")
if not os.path.exists(srccomponentspath):
os.makedirs(srccomponentspath)
# create CMakeLists.txt in Components directory
srccomponentscmakepath = os.path.join(srccomponentspath, "CMakeLists.txt")
if not os.path.exists(srccomponentscmakepath):
configure_file(DISCODE_PATH+'/share/DisCODe/Templates/src/Components/CMakeLists.txt', srccomponentscmakepath, cmakelists_dic)
# prepare dictionary for components conversion
cmakelists_dic = {
'INSTALL_PROCESSOR' : 'INSTALL_COMPONENT',
'INSTALL_SOURCE' : 'INSTALL_COMPONENT',
'INSTALL_SINK' : 'INSTALL_COMPONENT',
'INSTALL_PROXY' : 'INSTALL_COMPONENT'
}
cmakefile = open(srccomponentscmakepath, "a")
# iterate through all subdirectories in Components folder
for f in os.listdir(srccomponentspath):
newpath = os.path.join(srccomponentspath, f)
if os.path.isdir(newpath):
cmakepath = os.path.join(newpath, "CMakeLists.txt")
if os.path.exists(cmakepath):
print "Converting component: " + f
configure_file(cmakepath, cmakepath, cmakelists_dic)
cmakefile.write("\nADD_COMPONENT("+f+")\n")
cmakefile.close()
#===============================================================================
# Print message and finish
#===============================================================================
print dcl_name , "DCL created."
print "Please edit README file and provide all necessary information."
print "You can create new components by executing create_component command."
| StarcoderdataPython |
3350218 | <gh_stars>10-100
from .utils import get_test_data_path
from ..abbr import findall, expandall, compressall, clean_str
from glob import glob
from os.path import join
import json
def test_findall():
data_dir = get_test_data_path()
files = glob(join(data_dir, 'raw*.txt'))
for f in files:
json_file = f.replace('raw_', 'dict_').replace('.txt', '.json')
with open(f, 'rb') as fo:
text = fo.read()
text = clean_str(text)
d = findall(text)
d = {k: v for (k, v) in d.items() if v is not None}
with open(json_file, 'r') as fo:
d2 = json.load(fo)
assert d == d2
def test_expandall():
data_dir = get_test_data_path()
files = glob(join(data_dir, 'raw*.txt'))
for f in files:
exp_file = f.replace('raw', 'expanded')
with open(f, 'rb') as fo:
test_text = fo.read()
with open(exp_file, 'rb') as fo:
exp_text = fo.read()
test_text = clean_str(test_text)
exp_text = clean_str(exp_text)
test_text = expandall(test_text)
assert test_text == exp_text
def test_compressall():
data_dir = get_test_data_path()
files = glob(join(data_dir, 'raw*.txt'))
for f in files:
exp_file = f.replace('raw', 'compressed')
with open(f, 'rb') as fo:
test_text = fo.read()
with open(exp_file, 'rb') as fo:
exp_text = fo.read()
test_text = clean_str(test_text)
comp_text = clean_str(exp_text)
test_text = compressall(test_text)
assert test_text == comp_text
| StarcoderdataPython |
3215815 | BASE_URL = 'https://nova-dveri.ru/'
USER_AGENT = ('Mozilla/5.0 (iPhone; CPU iPhone OS 14_7 like Mac OS X) '
'AppleWebKit/605.1.15 (KHTML, like Gecko) CriOS/92.0.4515.90 '
'Mobile/15E148 Safari/604.1')
HTML_SITE_MAP = 'sitemap2'
HEADERS = {'User-Agent': USER_AGENT}
| StarcoderdataPython |
4822408 | <gh_stars>0
# https://github.com/nestordeharo/mysql-python-class/blob/master/mysql_python.py
import MySQLdb
from config import *
from collections import OrderedDict
import datetime
class MysqlPython(object):
"""
#https://github.com/nestordeharo/mysql-python-class/blob/master/mysql_python.py
Python Class for connecting with MySQL server and accelerate development project using MySQL
Extremely easy to learn and use, friendly construction.
"""
__instance = None
__host = None
__user = None
__password = None
__database = None
__session = None
__connection = None
def __new__(cls, *args, **kwargs):
if not cls.__instance or not cls.__database:
cls.__instance = super(MysqlPython, cls).__new__(cls,*args,**kwargs)
return cls.__instance
## End def __new__
def __init__(self, host=DB_IP, user=DB_user, password=<PASSWORD>, database=DB_name):
self.__host = host
self.__user = user
self.__password = password
self.__database = database
print("#####__init__ {}, {}, {}, {}".format(self.__host, self.__user, self.__password, self.__database))
## End def __init__
def __open(self):
try:
cnx = MySQLdb.connect(self.__host, self.__user, self.__password, self.__database)
self.__connection = cnx
self.__session = cnx.cursor()
except MySQLdb.Error as e:
print "error in connecting...Error %d: %s" % (e.args[0],e.args[1])
## End def __open
def __close(self):
self.__session.close()
self.__connection.close()
## End def __close
def select(self, table, where=None, *args, **kwargs):
result = None
query = 'SELECT '
keys = args
values = tuple(kwargs.values())
l = len(keys) - 1
for i, key in enumerate(keys):
query += "`"+key+"`"
if i < l:
query += ","
## End for keys
query += 'FROM %s' % table
if where:
query += " WHERE %s" % where
## End if where
self.__open()
self.__session.execute(query, values)
number_rows = self.__session.rowcount
number_columns = len(self.__session.description)
if number_rows >= 1 and number_columns > 1:
result = [item for item in self.__session.fetchall()]
else:
result = [item[0] for item in self.__session.fetchall()]
self.__close()
return result
## End def select
def update(self, table="tblCrawler", where=None, *args, **kwargs):
query = "UPDATE %s SET " % table
keys = kwargs.keys()
values = tuple(kwargs.values()) + tuple(args)
l = len(keys) - 1
for i, key in enumerate(keys):
query += "`"+key+"` = %s"
if i < l:
query += ","
## End if i less than 1
## End for keys
query += " WHERE %s" % where
self.__open()
self.__session.execute(query, values)
self.__connection.commit()
# Obtain rows affected
update_rows = self.__session.rowcount
self.__close()
return update_rows
## End function update
def insert(self, table='tblCrawler', *args, **kwargs):
values = None
query = "INSERT INTO %s " % table
if kwargs:
keys = kwargs.keys()
values = tuple(kwargs.values())
query += "(" + ",".join(["`%s`"] * len(keys)) % tuple (keys) + ") VALUES (" + ",".join(["%s"]*len(values)) + ")"
elif args:
values = args
query += " VALUES(" + ",".join(["%s"]*len(values)) + ")"
self.__open()
self.__session.execute(query, values)
self.__connection.commit()
self.__close()
return self.__session.lastrowid
## End def insert
def delete(self, table="tblCrawler", where=None, *args):
query = "DELETE FROM %s" % table
if where:
query += ' WHERE %s' % where
values = tuple(args)
self.__open()
self.__session.execute(query, values)
self.__connection.commit()
# Obtain rows affected
delete_rows = self.__session.rowcount
self.__close()
return delete_rows
## End def delete
def select_advanced(self, sql, *args):
od = OrderedDict(args)
query = sql
values = tuple(od.values())
self.__open()
self.__session.execute(query, values)
number_rows = self.__session.rowcount
number_columns = len(self.__session.description)
if number_rows >= 1 and number_columns > 1:
result = [item for item in self.__session.fetchall()]
else:
result = [item[0] for item in self.__session.fetchall()]
self.__close()
return result
## End def select_advanced
## End class
def getFeedid(vendor=None):
date_ = datetime.datetime.now().strftime ("%d%m%Y")
if vendor is not None:
feedid = date_ + '_' + vendor
else:
feedid = date_
return feedid
def readQuery():
conditional_query = 'feedid = %s '
connect_mysql = MysqlPython()
feedid = getFeedid("myntra")
result = connect_mysql.select('tblCrawler', conditional_query, 'failed_item', 'total_item', feedid=feedid)
print("update result: {}".format(result))
def updateQuery():
global feedid
#feedid= '11122018_myntra'
'''
conditional_query = 'car_make = %s'
result = connect_mysql.update('car_table', conditional_query, 'nissan', car_model='escort', car_year='2005')
'''
conditional_query = 'feedid = %s '
connect_mysql = MysqlPython()
#feedid = getFeedid("myntra")
result = connect_mysql.update('tblCrawler', conditional_query,feedid , failed_item=5)
print("update result: {}".format(result))
def readItem(item):
global feedid
#feedid= '11122018_myntra'
conditional_query = 'feedid = %s '
connect_mysql = MysqlPython()
result = connect_mysql.select('tblCrawler', conditional_query, item, feedid=feedid)
print("result: {}".format(result[0]))
return result[0]
def updateItem(item):
global feedid
#feedid= '11122018_myntra'
val_item = readItem(item)
val_item = val_item + 1
conditional_query = 'feedid = %s '
connect_mysql = MysqlPython()
if item == 'failed_item':
result = connect_mysql.update('tblCrawler', conditional_query,feedid , failed_item=val_item)
elif item == 'duplicate_item':
result = connect_mysql.update('tblCrawler', conditional_query,feedid , duplicate_item=val_item)
elif item == 'incomplete_item':
result = connect_mysql.update('tblCrawler', conditional_query,feedid , incomplete_item=val_item)
elif item == 'successful_item':
result = connect_mysql.update('tblCrawler', conditional_query,feedid , successful_item=val_item)
elif item == 'total_item':
result = connect_mysql.update('tblCrawler', conditional_query,feedid , total_item=val_item)
else:
pass
print("update result: {}".format(result))
def insertFeedId(feedid):
#feedid = getFeedid("myntra")
connect_mysql = MysqlPython()
result = connect_mysql.insert(feedid=feedid)
#CREATE TABLE tblCrawler(Id INT NOT NULL AUTO_INCREMENT,feedid VARCHAR(40) DEFAULT NULL,total_item INT DEFAULT NULL,duplicate_item INT DEFAULT NULL,incomplete_item INT DEFAULT NULL,failed_item INT DEFAULT NULL,successful_item INT DEFAULT NULL,PRIMARY KEY (Id));
if __name__ == '__main__':
#insertQuery()
#updateQuery()
#readItem('failed_item')
updateItem('failed_item')
| StarcoderdataPython |
1776198 | import sys
from rokuon.application import Application
if __name__ == "__main__":
app = Application()
sys.exit(app.run(sys.argv))
| StarcoderdataPython |
1656181 | import re
import glob
from json import dumps
from os.path import curdir, abspath, join, splitext, isfile
from os import walk
rfc_2119_keywords_regexes = [
r"MUST",
r"REQUIRED",
r"SHALL",
r"MUST NOT",
r"SHALL NOT",
r"SHOULD",
r"RECOMMENDED",
r"SHOULD NOT",
r"NOT RECOMMENDED",
r"MAY",
r"OPTIONAL",
]
def get_ignored_path_globs(root):
fileName = join(root, ".specignore")
if not isfile(fileName):
return []
with open(fileName, 'r') as f:
# trim whitespace
globs = [line.strip() for line in f.readlines()]
# remove empty lines
globs = [g for g in globs if g]
# remove comments
globs = [g for g in globs if not g.startswith('#')]
return globs
def get_ignored_paths(root):
globs = get_ignored_path_globs(root)
globbed_paths = set()
ignored_files = set()
for g in globs:
globbed_paths.update(glob.glob(g, recursive=True))
for p in globbed_paths:
if isfile(p):
ignored_files.add(join(root, p))
else:
ignored_files.update(glob.glob(join(root, p, "**/*.md"), recursive=True))
return ignored_files
def find_markdown_file_paths(root):
'Finds the .md files in the root provided.'
markdown_file_paths = []
ignored_paths = get_ignored_paths(root)
for root_path, _, file_paths, in walk(root):
for file_path in file_paths:
absolute_file_path = join(root_path, file_path)
if absolute_file_path in ignored_paths:
continue
_, file_extension = splitext(absolute_file_path)
if file_extension == ".md":
markdown_file_paths.append(absolute_file_path)
return markdown_file_paths
def clean_content(content):
'Transmutes markdown content to plain text'
lines = content.splitlines()
content = '\n'.join([x for x in lines if x.strip() != '' and x.strip().startswith('>')])
for rfc_2119_keyword_regex in rfc_2119_keywords_regexes:
content = re.sub(
f"\\*\\*{rfc_2119_keyword_regex}\\*\\*",
rfc_2119_keyword_regex,
content
)
return re.sub(r"\n?>\s*", " ", content.strip()).strip()
def find_rfc_2119_keyword(content):
'Returns the RFC2119 keyword, if present'
for rfc_2119_keyword_regex in rfc_2119_keywords_regexes:
if re.search(
f"\\*\\*{rfc_2119_keyword_regex}\\*\\*", content
) is not None:
return rfc_2119_keyword_regex
def parsed_content_to_heirarchy(parsed_content):
'Turns a bunch of headline & content pairings into a tree of requirements'
content_tree = []
headline_stack = []
node = lambda l,h,c: {'level': l, 'headline': h, 'content': c, 'children': []}
for level, headline, content in parsed_content:
try:
if len(headline_stack) == 0: # top-most node
cur = node(level, headline, content)
content_tree.append(cur)
headline_stack.insert(0, [level, headline, cur])
elif len(headline_stack[0][0]) >= len(level): # Sibling or parent node
if len(headline_stack[0][0]) > len(level): # parent, right?
headline_stack.pop(0)
headline_stack.pop(0)
if len(headline_stack) == 0:
parent = content_tree
else:
parent = headline_stack[0][2]['children']
cur = node(level, headline, content)
parent.append(cur)
headline_stack.insert(0, [level, headline, cur])
elif len(level) > len(headline_stack[0][0]): # child node
# TODO: emit warning if headlines are too deep
cur = node(level, headline, content)
parent = headline_stack[0][2]
parent['children'].append(cur)
headline_stack.insert(0, [level, headline, cur])
else:
headline_stack.pop(0)
except Exception as k:
print(k);
# Specify a root so we know that everything is a node all the way down.
root = node(0, '', '')
root['children'] = content_tree
return content_tree_to_spec(root)
def gen_node(ct):
'given a content node, turn it into a requirements node'
headline = ct['headline']
content = ct['content']
keyword = find_rfc_2119_keyword(content)
req_group = re.search(r'(?P<req>(requirement|condition)[^\n]+)', headline, re.IGNORECASE)
if req_group is None:
return None
_id = req_group.groups()[0]
return {
'id': _id,
'clean id': re.sub(r"[^\w]", "_", _id.lower()),
'content': clean_content(content),
'RFC 2119 keyword': keyword,
'children': [],
}
def content_tree_to_spec(ct):
current = gen_node(ct)
children_grouped = [content_tree_to_spec(x) for x in ct['children']]
# Filter out potential None entries.
children = []
for _iter in children_grouped:
'''
So we might get a None (skip it), an object (add it to the list) or another list (merge it with list).
'''
if _iter is None:
continue
if type(_iter) == list:
children.extend(_iter)
else:
children.append(_iter)
if current is None:
if len(children) > 0:
return children
return
else:
current['children'] = children
return current
def parse(markdown_file_path):
with open(markdown_file_path, "r") as markdown_file:
content_finder = re.compile(r'^(?P<level>#+)(?P<headline>[^\n]+)(?P<rest>[^#]*)', re.MULTILINE)
parsed = content_finder.findall(markdown_file.read())
return parsed_content_to_heirarchy(parsed)
def write_json_specifications(requirements):
for md_absolute_file_path, requirement_sections in requirements.items():
with open(
"".join([splitext(md_absolute_file_path)[0], ".json"]), "w"
) as json_file:
json_file.write(dumps(requirement_sections, indent=4))
if __name__ == "__main__":
for markdown_file_path in find_markdown_file_paths(
join(abspath(curdir))
):
result = parse(markdown_file_path)
if result:
with open(
"".join([splitext(markdown_file_path)[0], ".json"]), "w"
) as json_file:
json_file.write(dumps(result, indent=4))
| StarcoderdataPython |
93758 | <gh_stars>1-10
# General Errors
NO_ERROR = 0
USER_EXIT = 1
ERR_SUDO_PERMS = 100
ERR_FOUND = 101
ERR_PYTHON_PKG = 154
# Warnings
WARN_FILE_PERMS = 115
WARN_LOG_ERRS = 126
WARN_LOG_WARNS = 127
WARN_LARGE_FILES = 151
# Installation Errors
ERR_BITS = 102
ERR_OS_VER = 103
ERR_OS = 104
ERR_FINDING_OS = 105
ERR_FREE_SPACE = 106
ERR_PKG_MANAGER = 107
ERR_OMSCONFIG = 108
ERR_OMI = 109
ERR_SCX = 110
ERR_OMS_INSTALL = 111
ERR_OLD_OMS_VER = 112
ERR_GETTING_OMS_VER = 113
ERR_FILE_MISSING = 114
ERR_CERT = 116
ERR_RSA_KEY = 117
ERR_FILE_EMPTY = 118
ERR_INFO_MISSING = 119
ERR_PKG = 152
# Connection Errors
ERR_ENDPT = 120
ERR_GUID = 121
ERR_OMS_WONT_RUN = 122
ERR_OMS_STOPPED = 123
ERR_OMS_DISABLED = 124
ERR_FILE_ACCESS = 125
# Heartbeat Errors
ERR_HEARTBEAT = 128
ERR_MULTIHOMING = 129
ERR_INTERNET = 130
ERR_QUERIES = 131
# High CPU / Memory Usage Errors
ERR_OMICPU = 141
ERR_OMICPU_HOT = 142
ERR_OMICPU_NSSPEM = 143
ERR_OMICPU_NSSPEM_LIKE = 144
ERR_SLAB = 145
ERR_SLAB_BLOATED = 146
ERR_SLAB_NSSSOFTOKN = 147
ERR_SLAB_NSS = 148
ERR_LOGROTATE_SIZE = 149
ERR_LOGROTATE = 150
# Syslog Errors
ERR_SYSLOG_WKSPC = 132
ERR_PT = 133
ERR_PORT_MISMATCH = 134
ERR_PORT_SETUP = 135
ERR_SERVICE_CONTROLLER = 136
ERR_SYSLOG = 137
ERR_SERVICE_STATUS = 138
# Custom Log Errors
ERR_CL_FILEPATH = 139
ERR_CL_UNIQUENUM = 140
ERR_BACKEND_CONFIG = 153
| StarcoderdataPython |
3302516 | <filename>tilty_dashboard/__init__.py
# -*- coding: utf-8 -*-
""" The main method, handles all initialization """
import logging
import os
from datetime import datetime, timedelta
from flask import Flask, render_template, session
from flask_bootstrap import Bootstrap
from flask_cors import CORS
from flask_socketio import SocketIO, emit
from sqlalchemy import and_
from werkzeug.contrib.fixers import ProxyFix
from flask_session import Session
from tilty_dashboard.model import Tilt, db
logging.basicConfig(level=logging.DEBUG)
log = logging.getLogger(__name__)
app = Flask(__name__)
app.config['SESSION_TYPE'] = 'filesystem'
Session(app)
socketio = SocketIO(app, manage_session=False)
def init_webapp(config):
""" Initialize the web application. """
app.wsgi_app = ProxyFix(app.wsgi_app)
app.config['LOG_FILE'] = os.environ.get('LOG_FILE', '/app/tilty.log')
app.config['SQLALCHEMY_DATABASE_URI'] = config['webapp']['database_uri']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', 'abc123')
app.config['TILT_CONFIG'] = os.environ.get(
'TILT_CONFIG',
'/etc/tilty/tilty.ini'
)
CORS(app, supports_credentials=True)
Bootstrap(app)
db.app = app
db.init_app(app)
db.create_all()
return app
@socketio.on('save device config')
def save_device_config(message):
""" Save the device config into the config file """
with open(app.config['TILT_CONFIG'], 'w') as file:
file.write(message['data']['config'])
@app.route('/device_config', methods=['GET', 'POST'])
def device_config():
""" Device Config Page. """
tilty_config = ""
with open(app.config['TILT_CONFIG'], 'r') as file:
tilty_config = file.read()
return render_template(
'device_config.html',
tilty_config=tilty_config,
)
@socketio.on('save dashboard settings')
def save_dashboard_settings(message):
""" Save the settings into the cookie """
session["settings"] = message['settings']
@app.route('/dashboard_settings', methods=['GET', 'POST'])
def dashboard_settings():
""" Dashboard Settings Page. """
return render_template(
'dashboard_settings.html',
gravity_meas=session.get('settings', {}).get('gravity_meas'),
gravity_offset=session.get('settings', {}).get(
'gravity_offset',
-0.001
),
temp_meas=session.get('settings', {}).get('temp_meas'),
)
@app.route('/')
def index():
"""A landing page.
Nothing too interesting here.
"""
return render_template(
'index.html',
gravity_meas=session.get('settings', {}).get('gravity_meas'),
gravity_offset=session.get('settings', {}).get(
'gravity_offset',
-0.001
),
temp_meas=session.get('settings', {}).get('temp_meas'),
)
@socketio.on('refresh')
def refresh():
""" Query The DB and refresh the socket """
since = datetime.now() - timedelta(days=1)
last_pulse = db.session.query( # pylint:disable=E1101
Tilt.color,
Tilt.gravity,
Tilt.temp,
Tilt.mac,
Tilt.timestamp,
db.func.max(Tilt.timestamp) # pylint:disable=E1101
).group_by(Tilt.mac).subquery()
_data = Tilt.query.join(
last_pulse,
and_(
Tilt.mac == last_pulse.c.mac,
Tilt.timestamp == last_pulse.c.timestamp
)
).filter(Tilt.timestamp > since).all()
_tilt_data = [d.serialize() for d in _data]
emit('refresh', {'data': _tilt_data})
@app.route('/logs', methods=['GET'])
def logs():
""" load the logs endpoint """
return render_template(
'logs.html',
)
@socketio.on('logs')
def render_logs():
""" Render the logs from the log file """
with open(app.config['LOG_FILE']) as f:
emit('logs', {'data': f.read()})
| StarcoderdataPython |
59961 | from django.http import HttpResponse, HttpResponseRedirect
from django.urls import reverse
from django.db.models import Q
from .models import (
Deck,
Grave,
Hand,
Duel,
Trigger,
Lock,
)
from pprint import pprint
from .battle_det import battle_det,battle_det_return_org_ai
from .duel import DuelObj
from time import time
def lock_lock(room_number, lock,request):
duel = Duel.objects.filter(id=room_number).get()
if duel.guest_flag is False:
ID1 = -1
else:
ID1 = duel.guest_id
if duel.guest_flag2 is False:
ID2 = -1
else:
ID2 = duel.guest_id2
if "ID" in request.COOKIES :
ID = request.COOKIES["ID"]
else:
ID = ""
if room_number == 1:
if lock.lock_1 is True and time() - lock.time_1 < 20:
if duel.is_ai is False:
return HttpResponse("waiting")
duelobj = DuelObj(room_number)
duelobj.duel = duel
duelobj.room_number = room_number
duelobj.in_execute = False
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
user_1 = duel.user_1
user_2 = duel.user_2
if request.user != user_1 and request.user != user_2:
if (ID1 == ID and duel.guest_flag) or (ID2 == ID and duel.guest_flag2):
pass
else:
return HttpResponse("error")
if request.user == user_1 or (ID1 == ID and duel.guest_flag):
duelobj.user = 1
user = 1
other_user = 2
if request.user == user_2 or (ID2 == ID and duel.guest_flag2):
duelobj.user = 2
user = 2
other_user = 1
duelobj.init_all(user, other_user, room_number)
return battle_det_return_org_ai(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
else:
lock.lock_1 = True
lock.time_1 = time()
lock.save()
elif room_number == 2:
if lock.lock_2 is True and time() - lock.time_2 < 20:
if duel.is_ai is False:
return HttpResponse("waiting")
duelobj = DuelObj(room_number)
duelobj.duel = duel
duelobj.room_number = room_number
duelobj.in_execute = False
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
user_1 = duel.user_1
user_2 = duel.user_2
if request.user != user_1 and request.user != user_2:
return HttpResponse("error")
if request.user == user_1:
duelobj.user = 1
user = 1
other_user = 2
if request.user == user_2:
duelobj.user = 2
user = 2
other_user = 1
duelobj.init_all(user, other_user, room_number)
return battle_det_return_org_ai(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
else:
lock.lock_2 = True
lock.time_2 = time()
lock.save()
elif room_number == 3:
if lock.lock_3 is True and time() - lock.time_3 < 20:
if duel.is_ai is False:
return HttpResponse("waiting")
duelobj = DuelObj(room_number)
duelobj.duel = duel
duelobj.room_number = room_number
duelobj.in_execute = False
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
user_1 = duel.user_1
user_2 = duel.user_2
if request.user != user_1 and request.user != user_2:
return HttpResponse("error")
if request.user == user_1:
duelobj.user = 1
user = 1
other_user = 2
if request.user == user_2:
duelobj.user = 2
user = 2
other_user = 1
duelobj.init_all(user, other_user, room_number)
return battle_det_return_org_ai(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
else:
lock.lock_3 = True
lock.time_3 = time()
lock.save()
return "OK"
def choices(request):
room_number = int(request.POST["room_number"])
trigger_id = request.POST["trigger_id"]
lock = Lock.objects.get()
lock_flag = lock_lock(room_number, lock,request)
duel = Duel.objects.filter(id=room_number).get()
if duel.guest_flag is False:
ID1 = -1
else:
ID1 = duel.guest_id
if duel.guest_flag2 is False:
ID2 = -1
else:
ID2 = duel.guest_id2
if "ID" in request.COOKIES :
ID = request.COOKIES["ID"]
else:
ID = ""
if lock_flag != "OK":
if duel.is_ai == False:
return HttpResponse("waiting")
else:
duelobj = DuelObj(room_number)
duelobj.duel = duel
duelobj.room_number = room_number
duelobj.in_execute = False
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
user_1 = duel.user_1
user_2 = duel.user_2
if request.user != user_1 and request.user != user_2:
if (ID1 == ID and duel.guest_flag) or (ID2 == ID and duel.guest_flag2):
pass
else:
return HttpResponse("error")
if request.user == user_1 or(ID1 == ID and duel.guest_flag is True):
duelobj.user = 1
user = 1
other_user = 2
if request.user == user_2 or(ID2 == ID and duel.guest_flag2 is True):
duelobj.user = 2
user = 2
other_user = 1
duelobj.init_all(user, other_user, room_number)
return battle_det_return_org_ai(
duelobj, decks, graves, hands, user, other_user, choices, room_number
)
if duel.user_1 != request.user and duel.user_2 != request.user:
if (ID1 == ID and duel.guest_flag) or (ID2 == ID and duel.guest_flag2):
pass
else:
free_lock(room_number, lock)
return HttpResponseRedirect(reverse("tcgcreator:watch_battle"))
if duel.user_1 == request.user or ( ID1 == ID and duel.guest_flag is True):
user = 1
other_user = 2
elif duel.user_2 == request.user or (ID2 == ID and duel.guest_flag2 is True):
user = 2
other_user = 1
duelobj = DuelObj(room_number)
duelobj.duel = duel
duelobj.user = user
duelobj.room_number = room_number
decks = Deck.objects.all()
graves = Grave.objects.all()
hands = Hand.objects.all()
duelobj.init_all(user, other_user, room_number)
duelobj.check_eternal_effect(
decks, graves, hands, duel.phase, duel.user_turn, user, other_user
)
if duel.in_cost is True:
free_lock(room_number, lock)
return HttpResponse("error")
if duel.user_1 == request.user or ( ID1 == ID and duel.guest_flag is True):
if duel.appoint != 1:
free_lock(room_number, lock)
return HttpResponse("error")
duelobj.user = 1
user = 1
other_user = 2
if choices_det(duelobj, trigger_id, request, user) != -1:
duelobj.duel.mute = False
duelobj.save_all(user, other_user, room_number)
free_lock(room_number, lock)
return battle_det(request, duelobj)
else:
free_lock(room_number, lock)
return HttpResponse("error")
elif duel.user_2 == request.user or (ID2 == ID and duel.guest_flag2 is True):
if duel.appoint != 2:
free_lock(room_number, lock)
return HttpResponse("error")
duelobj.user = 2
user = 2
other_user = 1
if choices_det(duelobj, trigger_id, request, user) != -1:
duelobj.duel.mute = False
duelobj.save_all(user, other_user, room_number)
free_lock(room_number, lock)
return battle_det(request, duelobj)
else:
free_lock(room_number, lock)
return HttpResponse("error")
free_lock(room_number, lock)
return HttpResponse("error")
def choices_det(duelobj, trigger_id, request, user):
if user == 1:
other_user = 2
else:
other_user = 1
triggers = Trigger.objects.all()
trigger = triggers.get(id=trigger_id)
if trigger is not None and duelobj.check_launch_trigger( trigger, duelobj.duel.phase, duelobj.duel.user_turn, user, other_user, user):
return duelobj.invoke_trigger(trigger, "", "", "", duelobj.user, "")
else:
return -1
def free_lock(room_number, lock):
if room_number == 1:
lock.lock_1 = False
lock.save()
elif room_number == 2:
lock.lock_2 = False
lock.save()
elif room_number == 3:
lock.lock_3 = False
lock.save()
| StarcoderdataPython |
23541 | import tensorflow as tf
from os import path
import numpy as np
from scipy import misc
from styx_msgs.msg import TrafficLight
import cv2
import rospy
import tensorflow as tf
class CarlaModel(object):
def __init__(self, model_checkpoint):
self.sess = None
self.checkpoint = model_checkpoint
self.prob_thr = 0.90
self.TRAFFIC_LIGHT_CLASS = 10
self.image_no = 10000
tf.reset_default_graph()
def predict(self, img):
if self.sess == None:
gd = tf.GraphDef()
gd.ParseFromString(tf.gfile.GFile(self.checkpoint, "rb").read())
tf.import_graph_def(gd, name="object_detection_api")
self.sess = tf.Session()
g = tf.get_default_graph()
self.image = g.get_tensor_by_name("object_detection_api/image_tensor:0")
self.boxes = g.get_tensor_by_name("object_detection_api/detection_boxes:0")
self.scores = g.get_tensor_by_name("object_detection_api/detection_scores:0")
self.classes = g.get_tensor_by_name("object_detection_api/detection_classes:0")
img_h, img_w = img.shape[:2]
self.image_no = self.image_no+1
cv2.imwrite("full_"+str(self.image_no)+".png", img)
for h0 in [img_h//3, (img_h//3)-150]:
for w0 in [0, img_w//3, img_w*2//3]:
grid = img[h0:h0+img_h//3+50, w0:w0+img_w//3, :] # grid
pred_boxes, pred_scores, pred_classes = self.sess.run([self.boxes, self.scores, self.classes],
feed_dict={self.image: np.expand_dims(grid, axis=0)})
pred_boxes = pred_boxes.squeeze()
pred_scores = pred_scores.squeeze() # descreding order
pred_classes = pred_classes.squeeze()
traffic_light = None
h, w = grid.shape[:2]
cv2.imwrite("grid_"+str(self.image_no)+"_"+str(h0)+"_"+str(w0)+".png",grid)
rospy.loginfo("w,h is %s,%s",h0,w0)
for i in range(pred_boxes.shape[0]):
box = pred_boxes[i]
score = pred_scores[i]
if score < self.prob_thr: continue
if pred_classes[i] != self.TRAFFIC_LIGHT_CLASS: continue
x0, y0 = box[1] * w, box[0] * h
x1, y1 = box[3] * w, box[2] * h
x0, y0, x1, y1 = map(int, [x0, y0, x1, y1])
x_diff = x1 - x0
y_diff = y1 - y0
xy_ratio = x_diff/float(y_diff)
rospy.loginfo("image_no is %s", self.image_no)
rospy.loginfo("x,y ratio is %s",xy_ratio)
rospy.loginfo("score is %s",score)
if xy_ratio > 0.48: continue
area = np.abs((x1-x0) * (y1-y0)) / float(w*h)
rospy.loginfo("area is %s",area)
if area <= 0.001: continue
traffic_light = grid[y0:y1, x0:x1]
rospy.loginfo("traffic light given")
# select first -most confidence
if traffic_light is not None: break
if traffic_light is not None: break
if traffic_light is None:
pass
else:
rospy.loginfo("w,h is %s,%s",h0,w0)
rospy.loginfo("x,y ratio is %s",xy_ratio)
rospy.loginfo("score is %s",score)
cv2.imwrite("light_"+str(self.image_no)+".png",traffic_light)
#cv2.imwrite("full_"+str(self.image_no)+".png", img)
#cv2.imwrite("grid_"+str(self.image_no)+".png",grid)
#self.image_no = self.image_no+1
brightness = cv2.cvtColor(traffic_light, cv2.COLOR_RGB2HSV)[:,:,-1]
hs, ws = np.where(brightness >= (brightness.max()-30))
hs_mean = hs.mean()
tl_h = traffic_light.shape[0]
if hs_mean / tl_h < 0.4:
rospy.loginfo("image"+str(self.image_no-1)+" is RED")
return TrafficLight.RED
elif hs_mean / tl_h >= 0.55:
rospy.loginfo("image"+str(self.image_no-1)+" is GREEN")
return TrafficLight.GREEN
else:
rospy.loginfo("image"+str(self.image_no-1)+" is YELLOW")
return TrafficLight.YELLOW
return TrafficLight.UNKNOWN
| StarcoderdataPython |
1620895 | #! /usr/bin/env python2
import sys
import time
import os
#----------------------------------------------------------------------
# flow control
#----------------------------------------------------------------------
def flow_control(command, hz):
import subprocess
hz = (hz < 10) and 10 or hz
#sys.stdout.write('%d: --> %s\n'%(hz, command))
sys.stdout.flush()
p = subprocess.Popen(
command,
shell = True,
stdin = subprocess.PIPE,
stderr = subprocess.STDOUT,
stdout = subprocess.PIPE)
stdout = p.stdout
p.stdin.close()
count = 0
ts = long(time.time() * 1000000)
period = 1000000 / hz
tt = time.time()
while True:
text = stdout.readline()
if text == '':
break
text = text.rstrip('\n\r')
current = long(time.time() * 1000000)
if current < ts:
delta = (ts - current)
time.sleep(delta * 0.001 * 0.001)
elif ts < current - period * 10:
ts = current
ts += period
sys.stdout.write(text + '\n')
sys.stdout.flush()
#sys.stdout.write('endup %ld seconds\n'%long(time.time() - tt))
sys.stdout.flush()
return 0
#----------------------------------------------------------------------
# main program
#----------------------------------------------------------------------
def main(args):
args = [n for n in args]
if len(args) < 2:
print 'usage: %s HZ command'%args[0]
return 1
hz = int(os.environ.get('VIM_LAUNCH_HZ', '50'))
flow_control(args[1], hz)
return 0
#----------------------------------------------------------------------
# main program
#----------------------------------------------------------------------
if __name__ == '__main__':
main(sys.argv)
| StarcoderdataPython |
1618577 | <reponame>wmvanvliet/psychic
import numpy as np
from .basenode import BaseNode
from ..dataset import DataSet
from .spatialfilter import sym_whitening, cov0
from ..utils import get_samplerate
from scipy import signal
class SlowSphering(BaseNode):
def __init__(self, isi=10, reest=.5):
'''
Define a SlowSphering node, with inter-stimulus interval isi in seconds
which is reestimated every reest seconds.
'''
self.isi = isi
self.reest = reest
BaseNode.__init__(self)
def train_(self, d):
self.samplerate = get_samplerate(d)
nyq = ((1./self.reest) / 2.)
self.cutoff = (1./self.isi) / nyq
self.log.debug('set cutoff: %.3f' % self.cutoff)
self.fil = signal.iirfilter(4, self.cutoff, btype='low')
def apply_(self, d):
data = slow_sphere(d.data, self.fil, int(self.reest * self.samplerate))
return DataSet(data, default=d)
def slow_sphere(samples, xxx_todo_changeme, wstep):
'''
Applies a symmetrical whitening transform to samples, based on locally
estimated covariance matrices. (b, a) is a FIR or IIR filter that determines
the type of smoothing, step_size determines how much the window shifts before
re-estimation of the whitening transform.
The actual calculation is performed as follows:
1) a local covariance is estimated for segments of step_size length
2) the local covariances are forward filtered with (b, a)
3) each segment is individually whitened with a symmetrical whitening
transfrom
The filter (b, a) should be designed to match wstep.
'''
(b, a) = xxx_todo_changeme
samples = np.atleast_2d(samples)
sigs = np.asarray([cov0(samples[:, i:i+wstep]) for i in
range(0, samples.shape[1], wstep)])
sigs = signal.lfilter(b, a, sigs, axis=0)
ws = [sym_whitening(s) for s in sigs]
return np.hstack([np.dot(W.T, samples[:, i * wstep:(i+1) * wstep])
for i, W in enumerate(ws)])
| StarcoderdataPython |
3308747 | <reponame>Nub-Team/MLP-Classifier<gh_stars>0
from Network import Network
import os
import numpy as np
Name = '1'
if not os.path.exists('out'):
os.makedirs('out')
Path = os.path.join(os.getcwd(), 'out')
Neuron_in_topo = [0,0,0,1]
Neuron_in = 1
Neuron_hidden = 4
Learning_r = 0.1
Moment = 0.5
Bias = 1
Epoches = 1000
Error_measure_frequency = 1
Topology = [Neuron_in,Neuron_hidden,3]
Activation_fun_topology = ['sigmoid','sigmoid','sigmoid']
fromFile = np.loadtxt(fname='/content/ANN_classification/training_data_1.csv', delimiter=' ')
X1 = fromFile[:,0:5]
fromFile = np.loadtxt(fname='/content/ANN_classification/test_data.csv', delimiter=' ')
X3 = fromFile[:,0:5]
X2 = []
for row in X1:
k = []
if Neuron_in_topo[0] == 1:
k.append(row[0])
if Neuron_in_topo[1] == 1:
k.append(row[1])
if Neuron_in_topo[2] == 1:
k.append(row[2])
if Neuron_in_topo[3] == 1:
k.append(row[3])
k.append(row[4])
X2.append(k)
X4 = []
for row in X1:
k = []
if Neuron_in_topo[0] == 1:
k.append(row[0])
if Neuron_in_topo[1] == 1:
k.append(row[1])
if Neuron_in_topo[2] == 1:
k.append(row[2])
if Neuron_in_topo[3] == 1:
k.append(row[3])
k.append(row[4])
X4.append(k)
net = Network(Topology, Activation_fun_topology, Moment, Learning_r, Bias, Epoches, Error_measure_frequency)
net.train(X2, Name, Path, True)
net.test(X4, Name, Path, True) | StarcoderdataPython |
3273950 | from django import forms
from .models import *
from django.forms import ModelForm
from .choices import *
class CreateCurriculumForm(forms.ModelForm):
CurriculumName = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),label='Curriculum Name:')
FacultyName = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Faculty Name:',choices = FacultyNameChoices)
class Meta:
model = Curriculum
fields = 'CurriculumName', 'FacultyName'
class CreateCourseOutlineForm(forms.ModelForm):
CourseName = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),label='Course Name')
CourseCode = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),label='Course Code:')
FacultyName = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Faculty Name:',choices = FacultyNameChoices)
CourseCurriculumID = forms.ModelChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Curriculum Name:',queryset=Curriculum.objects.all())
class Meta:
model = CourseOutline
fields = 'CourseName', 'CourseCode','FacultyName','CourseCurriculumID'
class CreateCourseOutlineForm1(forms.ModelForm):
NumberOfCredit = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Number of Credit:',choices = Credits)
CategoryOfCourse = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Category of course:',choices = CourseCategory)
Prerequisite = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),label='Pre-Requisite:')
Corequisite = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),label='Co-Requisite:')
Place = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),label='Place:')
DateofCourseDevelopmentorRevision = forms.DateTimeField(widget = forms.DateTimeInput(attrs={'class':'form-control'}),label='Date of course Development or Revision:',initial =timezone.now)
class Meta:
model = CourseOutlineSection1
fields = 'NumberOfCredit', 'CategoryOfCourse', 'Prerequisite', 'Corequisite', 'Place', 'DateofCourseDevelopmentorRevision'
class CreateCourseOutlineForm2(forms.ModelForm):
CourseAims = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control','rows':'4'}),label='Course Aims')
ObjofCourse = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control','rows':'4'}),label='Objective of Course')
class Meta:
model = CourseOutlineSection2
fields = 'CourseAims', 'ObjofCourse'
class CreateCourseOutlineForm3(forms.ModelForm):
CourseDesc = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control','rows':'4'}),label='Course Description')
NumofLectureHrs = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Number of Lecture Hours',choices=HoursChoices)
NumofAddLectureHrs = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Number of Additional Lecture Hours',choices=HoursChoices)
NumofLabHrs = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Number of Lab Hours',choices=HoursChoices)
NumofSSHrs = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Number of Self Study Hours',choices=HoursChoices)
NumofAdvHrs = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Number of Advising Hours',choices=HoursChoices)
class Meta:
model = CourseOutlineSection3
fields = 'CourseDesc', 'NumofLectureHrs', 'NumofAddLectureHrs', 'NumofLabHrs', 'NumofSSHrs', 'NumofAdvHrs'
class CreateCourseOutlineForm4(forms.ModelForm):
CourseTopic = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}),label='Course Topics')
CourseTopicDesc = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control','rows':'4'}),label='Course Topics Description')
class Meta:
model = CourseOutlineSection4
fields = 'CourseTopic', 'CourseTopicDesc'
class CreateCourseOutlineForm5(forms.ModelForm):
EvaluationType = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Evaluation type',choices=EvaluationTypes)
EvaluationPercentage = forms.CharField(widget=forms.TextInput(attrs={'class':'form-control'}))
class Meta:
model = CourseOutlineSection5
fields = 'EvaluationType', 'EvaluationPercentage'
class CreateCourseOutlineForm6(forms.ModelForm):
ResourcesTypes = forms.ChoiceField(widget=forms.Select(attrs={'class':'form-control'}),label='Resource Type',choices=ResourcesTypes)
ResourcesDescription = forms.CharField(widget=forms.Textarea(attrs={'class':'form-control','rows':'4'}),label='Resource Description')
class Meta:
model = CourseOutlineSection6
fields = 'ResourcesTypes', 'ResourcesDescription'
| StarcoderdataPython |
1770291 | from flask import Flask, render_template, request, g, flash, redirect, url_for
import openaq
from .models import DB, Record
from .forms import SelectCityForm
def create_app():
"""Create and configure an instance of the Flask application."""
app = Flask(__name__)
app.secret_key = 'super secret key'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite3'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
DB.init_app(app)
# Create route to the home page
@app.route('/')
def root():
#data = fetch_data()
return render_template(
'base.html', title='Home', form=SelectCityForm())
@app.route('/run', methods=(['GET', 'POST']))
def run():
data = fetch_data()
city = get_city()
return render_template(
'index.html', title='Home', data=data, city=city,
form=SelectCityForm())
# Create route to allow user to refreshd data in the database
@app.route('/refresh', methods=(['GET', 'POST']))
def refresh():
"""Pull fresh data from Open AQ and replace existing data."""
if request.method == 'POST':
city = set_city(request.form['city'])
else:
return redirect(url_for('run'))
DB.drop_all()
DB.create_all()
params={'city': city, 'parameter': 'pm25'} # Can configure from selection
push_data_to_db(
params=params,
data=get_openaq_data(
city=params['city'],
parameter=params['parameter']
),
)
DB.session.commit()
return redirect(url_for('run'))
## CAREFUL DO NOT DELETE THIS
return app
## CAREFUL DO NOT DELETE THIS RETURN STATEMENT
# Create local instances to share across functions
def get_api():
if 'api' not in g:
g.api = openaq.OpenAQ()
return g.api
# Helper functions
def get_openaq_data(city, parameter):
api = get_api()
results = Results(city=city, parameter=parameter)
status, results.data = api.measurements(city=city, parameter=parameter)
if status == 200:
return results.get_data(('date.utc', 'value'))
def push_data_to_db(params, data):
# hard coded to only date date_time and val right now
print('adding data to db')
for record in data:
db_record = Record(datetime=record[0], value=record[1])
DB.session.add(db_record)
def fetch_data():
return Record.query.filter(Record.value >= 10)
def set_city(city):
if 'city' not in g:
g.city = city
city = g.city
return city
def get_city():
if 'city' not in g:
city = 'City not Found'
else:
city = g.city
return city
# Data class
class Results():
def __init__(self, city, parameter, data=None):
self.city = city
self.parameter = parameter
self.data = data
def get_data(self, tuple_of_columns=None):
assert type(tuple_of_columns) == tuple
dataframe = []
for record in self.data['results']:
clean_record = []
for column in tuple_of_columns:
if 'date' in column:
clean_record.append(self.strip_date(record['date'], 'utc'))
else:
clean_record.append(record[column])
dataframe.append(tuple(clean_record))
return dataframe
def strip_date(self, date_data, date_type):
return date_data[date_type]
| StarcoderdataPython |
16199 | <filename>python/cython_build.py
from distutils.core import setup
from distutils.extension import Extension
from Cython.Build import cythonize
import sys
python_version = sys.version_info[0]
setup(
name='batch_jaro_winkler',
ext_modules=cythonize([Extension('batch_jaro_winkler', ['cbatch_jaro_winkler.pyx'])], language_level=python_version)
) | StarcoderdataPython |
1656763 | <filename>wordservice/tests/wordservice/test_wordservice.py
# -*- coding: utf-8 -*-
__author__ = "<NAME>"
__copyright__ = "<NAME>"
__license__ = "mit"
import os
from unittest import TestCase, mock
from wordservice import WordService
class TrieTest(TestCase):
_resources_dir = os.path.normpath(os.path.join(os.path.dirname(__file__), 'resources'))
@classmethod
def setUpClass(cls) -> None:
with mock.patch.object(WordService, '_resources_dir', cls._resources_dir):
cls._word_service = WordService()
def test_not_found(self):
found = self._word_service.search('wordthatdoesnotexist')
self.assertEqual(False, found)
def test_found(self):
found = self._word_service.search('word')
self.assertEqual(True, found)
def test_starts_with_not_found(self):
found = self._word_service.starts_with('wordthatdoesnotexist')
self.assertEqual(0, len(found))
def test_starts_with_found(self):
found = self._word_service.starts_with('word')
assert len(found)
def test_starts_with_maxfound(self):
found = self._word_service.starts_with('w', max_found=10)
self.assertEqual(10, len(found))
| StarcoderdataPython |
117843 | import logging
import numpy as np
import pandas
import constants
import pygeoutil.util as util
from preprocess_IAM import IAM
class GCAM(IAM):
"""
Class for GCAM
"""
def __init__(self, path_nc):
IAM.__init__(self, 'GCAM', path_nc)
self.land_var = 'landcoverpercentage'
# Input file paths
self.path_pas = constants.gcam_dir + constants.PASTURE[0]
self.path_frt = constants.gcam_dir + constants.FOREST[0]
self.path_urb = constants.gcam_dir + constants.URBAN[0]
self.path_wh = constants.gcam_dir + constants.WOOD_HARVEST
self.path_ftz = constants.gcam_dir + constants.FERT_DATA
# Output directories
self.gcam_out_fl = constants.out_dir+constants.GCAM_OUT
self.perc_crops_fl = constants.out_dir+constants.GCAM_CROPS
def AEZ_to_national_GCAM(self, data_source = 'wh', out_nc_name = ''):
"""
:param data_source:
:param out_nc_name:
:return:
"""
# Create a dictionary mapping GCAM AEZ's to regions
if data_source == 'wh':
df = util.open_or_die(self.path_wh)
elif data_source == 'ftz': # fertilizer data
df = util.open_or_die(self.path_ftz)
# Insert columns so that we have data for each year
idx = 1
for yr in xrange(constants.GCAM_START_YR + 1, constants.GCAM_END_YR):
# Skip years for which we already have data i.e. multiples of constants.GCAM_STEP_YR
if yr%constants.GCAM_STEP_YR != 0:
df.insert(constants.SKIP_GCAM_COLS + idx, str(yr), np.nan)
idx += 1
# Extract columns with information on GCAM regions
gcam_df = df[['region', 'subsector']]
# Fill in missing values, note that using interpolate
df = df.ix[:, str(constants.GCAM_START_YR):str(constants.GCAM_END_YR)]
df = df.interpolate(axis = 1)
# Concatenate
df = pandas.concat([gcam_df, df], axis=1, join='inner')
# Extract "Russia", "Central Asia", "EU-12", and "Europe-Eastern" into a single larger region with region code 33
merg_df = df.loc[df['region'].isin(['Russia', 'Central Asia', 'EU-12', 'Europe-Eastern'])]
# Create a new row with data for USSR or region code 33
new_row = ['USSR', 'subsector']
new_row.extend(merg_df.ix[:, 2:].sum().tolist())
# Add newly created row to dataframe
df.loc[len(df.index)] = np.array(new_row)
# Group dataframe by region
df = df.groupby('region').sum()
# Remove the subsector column since it interferes with netCDF creation later
df.drop('subsector', axis=1, inplace=True)
# Read in GCAM region country mapping
xdf = util.open_or_die(constants.gcam_dir + constants.GCAM_MAPPING)
map_xdf = xdf.parse("Sheet1")
df_dict = dict((z[0],list(z[1:])) for z in zip(map_xdf['country ISO code'], map_xdf['Modified GCAM Regions'],
map_xdf['GCAM REGION NAME'], map_xdf['country-to-region WH ratios']))
# Create WH output netCDF
onc = util.open_or_die(constants.out_dir + out_nc_name + '_' + str(constants.GCAM_START_YR) + '_' +
str(constants.GCAM_END_YR) + '.nc', 'w')
# dimensions
onc.createDimension('country_code', len(df_dict.keys()))
onc.createDimension('time', constants.GCAM_END_YR - constants.GCAM_START_YR + 1)
# variables
country_code = onc.createVariable('country_code', 'i4', ('country_code',))
time = onc.createVariable('time', 'i4', ('time',))
data = onc.createVariable(out_nc_name, 'f4', ('country_code', 'time',))
# Metadata
country_code.long_name = 'country_code'
country_code.units = 'index'
country_code.standard_name = 'country_code'
time.units = 'year as %Y.%f'
time.calendar = 'proleptic_gregorian'
if data_source == 'wh':
data.units = 'MgC'
data.long_name = 'wood harvest carbon'
elif data_source == 'ftz':
print 'TODO!!'
# Assign data
time[:] = np.arange(constants.GCAM_START_YR, constants.GCAM_END_YR + 1)
country_code[:] = sorted(df_dict.keys())
for idx, ctr in enumerate(country_code[:]):
# Get GCAM region corresponding to country
gcam_reg = df_dict.get(ctr)[1] # GCAM region identifier
gcam_mul = df_dict.get(ctr)[2] # GCAM country-to-region WH ratios
try:
# @TODO: Need to finalize woodharvest calculation
# @TODO: Generalize for data other than wood harvest
data[idx, :] = df.ix[gcam_reg].values.astype(float) * 0.225 * constants.BILLION * gcam_mul
# @TODO: Multiply by 1.3 to account for slash fraction
except:
data[idx, :] = np.zeros(len(time[:]))
onc.close()
def create_GCAM_croplands(self, nc):
"""
:param nc: Empty 3D numpy array (yrs,ny,nx)
:return nc: 3D numpy array containing SUM of all GCAM cropland percentages
"""
# Iterate over all crop categories and add the self.land_var data
for i in range(len(constants.CROPS)):
print('Processing: ' + constants.CROPS[i])
logging.info('Processing: ' + constants.CROPS[i])
ds = util.open_or_die(constants.gcam_dir+constants.CROPS[i])
for j in range(len(self.time)):
nc[j,:,:] += ds.variables[self.land_var][j,:,:].data
ds.close()
# @TODO: Test whether sum of all self.land_var in a given year is <= 1.0
return nc
def create_nc_perc_croplands(self, sum_nc, shape):
"""
Create netcdf file with each crop category represented as fraction of cropland
area and not total grid cell area
:param sum_nc: netCDF file containing 'croplands' which is fraction of area
of cell occupied by all croplands
:param shape: Tuple containing dimensions of netCDF (yrs, ny, nx)
:return: None
"""
print 'Creating cropland nc'
logging.info('Creating cropland nc')
inc = util.open_or_die(sum_nc)
onc = util.open_or_die(self.perc_crops_fl, 'w')
onc.description = 'crops_as_fraction_of_croplands'
# dimensions
onc.createDimension('time',shape[0])
onc.createDimension('lat', shape[1])
onc.createDimension('lon', shape[2])
# variables
time = onc.createVariable('time', 'i4', ('time',))
latitudes = onc.createVariable('lat', 'f4', ('lat',))
longitudes = onc.createVariable('lon', 'f4', ('lon',))
# Metadata
latitudes.units = 'degrees_north'
latitudes.standard_name = 'latitude'
longitudes.units = 'degrees_east'
longitudes.standard_name = 'longitude'
# Assign time
time[:] = self.time
# Assign lats/lons
latitudes[:] = self.lat
longitudes[:] = self.lon
# Assign data
for i in range(len(constants.CROPS)):
print '\t'+constants.CROPS[i]
onc_var = onc.createVariable(constants.CROPS[i], 'f4', ('time', 'lat', 'lon',),fill_value=np.nan)
onc_var.units = 'percentage'
ds = util.open_or_die(constants.gcam_dir+constants.CROPS[i])
# Iterate over all years
for j in range(shape[0]):
onc_var[j,:,:] = ds.variables[self.land_var][j,:,:].data / inc.variables['cropland'][j,:,:]
ds.close()
# @TODO: Copy metadata from original GCAM netcdf
onc.close()
def write_GCAM_nc(self, isum_perc, shape):
"""
:param isum_perc: Sum of self.land_var values for all crop classes
:param shape: Tuple containing dimensions of netCDF (yrs, ny, nx)
:return: Nothing, side-effect is to create a netCDF file with each crop category
represented as fraction of cropland area and not total grid cell area
"""
print 'Creating GCAM file'
logging.info('Creating GCAM file')
# Read in netCDF datasets
ids_pas = util.open_or_die(self.path_pas)
ids_frt = util.open_or_die(self.path_frt)
ids_urb = util.open_or_die(self.path_urb)
iam_nc = util.open_or_die(self.gcam_out_fl, perm = 'w')
iam_nc.description = 'GCAM'
# dimensions
iam_nc.createDimension('time',shape[0])
iam_nc.createDimension('lat', shape[1])
iam_nc.createDimension('lon', shape[2])
# variables
time = iam_nc.createVariable('time', 'i4', ('time',))
latitudes = iam_nc.createVariable('lat', 'f4', ('lat',))
longitudes = iam_nc.createVariable('lon', 'f4', ('lon',))
crp = iam_nc.createVariable('cropland', 'f4', ('time', 'lat', 'lon',),fill_value=np.nan)
pas = iam_nc.createVariable('pasture', 'f4', ('time', 'lat', 'lon',),fill_value=np.nan)
frt = iam_nc.createVariable('forest', 'f4', ('time', 'lat', 'lon',),fill_value=np.nan)
urb = iam_nc.createVariable('urban', 'f4', ('time', 'lat', 'lon',),fill_value=np.nan)
# Metadata
crp.units = 'percentage'
pas.units = 'percentage'
frt.units = 'percentage'
urb.units = 'percentage'
latitudes.units = 'degrees_north'
latitudes.standard_name = 'latitude'
longitudes.units = 'degrees_east'
longitudes.standard_name = 'longitude'
# Assign time
time[:] = self.time
# Assign lats/lons
latitudes[:] = self.lat
longitudes[:] = self.lon
# Assign data to new netCDF file
for i in range(len(self.time)):
crp[i,:,:] = isum_perc[i,:,:]
pas[i,:,:] = ids_pas.variables[self.land_var][i,:,:].data
frt[i,:,:] = ids_frt.variables[self.land_var][i,:,:].data
urb[i,:,:] = ids_urb.variables[self.land_var][i,:,:].data
# @TODO: Copy metadata from original GCAM netcdf
ids_pas.close()
ids_frt.close()
ids_urb.close()
iam_nc.close()
| StarcoderdataPython |
159055 | import abc
from contextlib import contextmanager
import datetime
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy import Column, String, ForeignKey, Float, Integer, DateTime, Enum
from . import requests
from . import storage
RequestsBase = declarative_base()
class SQLAlchemyStorage(abc.ABC):
def __init__(self, sqlalchemy_connection_string, base):
from sqlalchemy import create_engine
self.declarative_base = base
self.engine = create_engine(sqlalchemy_connection_string)
self.declarative_base.metadata.create_all(self.engine)
def wipe_database(self):
self.declarative_base.metadata.drop_all(self.engine)
self.declarative_base.metadata.create_all(self.engine)
# taken from: http://docs.sqlalchemy.org/en/latest/orm/session_basics.html
@contextmanager
def session_scope(self):
"""Provide a transactional scope around a series of operations."""
from sqlalchemy.orm import sessionmaker
session = sessionmaker(bind=self.engine)()
try:
yield session
session.commit()
except:
session.rollback()
raise
finally:
session.close()
class Request(RequestsBase):
__tablename__ = 'requests'
id = Column(String, nullable=False, primary_key=True, unique=True)
person_id = Column(String, nullable=False)
class IWantRequest(Request):
__tablename__ = 'iwantrequests'
id = Column(String, ForeignKey("requests.id"),
primary_key=True, unique=True)
deadline = Column(DateTime, nullable=False)
activity_start = Column(DateTime, nullable=False)
activity_duration = Column(Float, nullable=False)
activity = Column(String, nullable=False)
resolved_by = Column(Integer, ForeignKey("results.id"))
def toIWantRequest(self):
result = requests.IWantRequest(
self.person_id, self.activity, self.deadline,
self.activity_start, self.activity_duration)
result.id = self.id
result.resolved_by = self.resolved_by
return result
class Result(RequestsBase):
__tablename__ = 'results'
id = Column(Integer, primary_key=True, unique=True, autoincrement=True)
deadline = Column(DateTime)
status = Column(Enum(requests.Status))
def toResult(self, requests_ids):
result = requests.Result(
self.id, requests_ids, self.deadline)
result.status = self.status
return result
# notification_status
class SqlAlchemyRequestStorage(SQLAlchemyStorage, storage.RequestStorage):
def __init__(self, backend_url):
SQLAlchemyStorage.__init__(self, backend_url, RequestsBase)
def store_request(self, request):
if isinstance(request, requests.IWantRequest):
return self._store_activity_request(request)
else:
raise ValueError(f"Can't store requests of type {type(request)}.")
def _store_activity_request(self, request):
with self.session_scope() as session:
new_result_id = self._create_result(request)
request_to_add = IWantRequest(
id=request.id, person_id=request.person_id, deadline=request.deadline,
activity=request.activity, activity_start=request.activity_start,
activity_duration=request.activity_duration, resolved_by=new_result_id)
session.add(request_to_add)
request.resolved_by = new_result_id
return request
def get_activity_requests(self, activity=None):
result = []
with self.session_scope() as session:
query_results = (
session.query(Request, IWantRequest)
.filter(Request.id == IWantRequest.id)
)
if activity is not None:
query_results = query_results.filter(
IWantRequest.activity == activity)
result = [record.toIWantRequest()
for base, record in query_results.all()]
return result
def remove_activity_request(self, request_id, person_id):
with self.session_scope() as session:
query_results = (
session.query(IWantRequest)
.filter(IWantRequest.id == request_id, IWantRequest.person_id == person_id)
)
all_results = query_results.all()
assert len(all_results) == 1
request_to_delete = all_results[0]
session.delete(request_to_delete)
concerned_result_id = request_to_delete.resolved_by
self.__update_result_status(concerned_result_id)
self._update_result_deadline(concerned_result_id)
def __update_result_status(self, result_id):
with self.session_scope() as session:
query_results = (
session.query(IWantRequest)
.filter(IWantRequest.resolved_by == result_id)
)
requests_of_the_same_result = [req.toIWantRequest()
for req in query_results.all()]
with self.session_scope() as session:
result_obj = self._get_result_object(session, result_id)
self._update_result_status(result_obj, requests_of_the_same_result)
def resolve_requests(self, requests_ids):
with self.session_scope() as session:
query_results = (
session.query(IWantRequest)
.filter(IWantRequest.id.in_(requests_ids))
)
resolved_by = None
all_results = query_results.all()
assert len(all_results) > 1
result_objs = [self._get_result_object(session, req.resolved_by)
for req in all_results]
involved_result_ids = [result.id for result in result_objs]
resolved_by = self._find_fitting_result_id(result_objs)
for record in all_results:
record.resolved_by = resolved_by
with self.session_scope() as session:
for involved_id in involved_result_ids:
self.__update_result_status(involved_id)
self._update_result_deadline(resolved_by)
return resolved_by
def get_requests_of_result(self, result_id):
with self.session_scope() as session:
query_results = (
session.query(IWantRequest)
.filter(IWantRequest.resolved_by == result_id)
)
result = [record.toIWantRequest()
for record in query_results.all()]
return result
def get_result(self, result_id):
with self.session_scope() as session:
query_results = (
session.query(IWantRequest)
.filter(IWantRequest.resolved_by == result_id)
)
requests_ids = {req.id for req in query_results.all()}
result = self._get_result_object(session, result_id).toResult(requests_ids)
return result
def _get_result_object(self, session, result_id):
query_results = (
session.query(Result)
.filter(Result.id == result_id)
)
result = query_results.first()
return result
def get_requests_by_deadline_proximity(self, deadline, time_buffer_in_seconds):
time_end = deadline
time_start = time_end - datetime.timedelta(seconds=time_buffer_in_seconds)
with self.session_scope() as session:
query_results = (
session.query(IWantRequest)
.filter(IWantRequest.deadline > time_start)
.filter(IWantRequest.deadline < time_end)
)
result = [record.toIWantRequest()
for record in query_results.all()]
return result
def get_results_by_deadline_proximity(self, deadline, time_buffer_in_seconds):
time_end = deadline
time_start = time_end - datetime.timedelta(seconds=time_buffer_in_seconds)
with self.session_scope() as session:
query_results = (
session.query(Result)
.filter(Result.deadline > time_start)
.filter(Result.deadline < time_end)
)
result = [record.Result(self.get_requests_of_result(record.id))
for record in query_results.all()]
return result
def _create_result(self, request):
# The context manager doesn't work for some reason
from sqlalchemy.orm import sessionmaker
session = sessionmaker(bind=self.engine)()
result = Result(status=requests.Status.PENDING, deadline=request.deadline)
session.add(result)
session.commit()
result_id = result.id
session.close()
return result_id
def _update_result_deadline(self, result_id):
with self.session_scope() as session:
query_results = (
session.query(IWantRequest.deadline)
.filter(IWantRequest.resolved_by == result_id)
.order_by(IWantRequest.deadline.asc())
)
result_object = self._get_result_object(session, result_id)
if result_object.status == requests.Status.INVALID:
return
result_object.deadline = query_results.first()[0]
| StarcoderdataPython |
4829208 | t = int(input())
for _ in range(t):
x, y, z = map(int, input().split(' '))
print(['Cat A','Cat B', 'Mouse C'][0 if abs(x-z) < abs(y-z) else 1 if abs(x-z) > abs(y-z) else 2])
| StarcoderdataPython |
3299462 | # Copyright 2021 IBM Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from swagger_client.api.application_settings_api import ApplicationSettingsApi
from swagger_client.api.catalog_service_api import CatalogServiceApi
from swagger_client.api.component_service_api import ComponentServiceApi
from swagger_client.api.credential_service_api import CredentialServiceApi
from swagger_client.api.dataset_service_api import DatasetServiceApi
from swagger_client.api.health_check_api import HealthCheckApi
from swagger_client.api.inference_service_api import InferenceServiceApi
from swagger_client.api.model_service_api import ModelServiceApi
from swagger_client.api.notebook_service_api import NotebookServiceApi
from swagger_client.api.pipeline_service_api import PipelineServiceApi
| StarcoderdataPython |
65056 | """
Google Cloud Emulators
======================
Allows to spin up google cloud emulators, such as PubSub.
"""
from .pubsub import PubSubContainer # noqa
| StarcoderdataPython |
1668847 | #!/usr/bin/env python
"""
############################
Incident Package Data Module
############################
"""
# -*- coding: utf-8 -*-
#
# rtk.incident.Incident.py is part of The RTK Project
#
# All rights reserved.
# Copyright 2007 - 2017 <NAME> <EMAIL>rew.rowland <AT> reliaqual <DOT> com
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
# PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER
# OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# Import other RTK modules.
try:
import Utilities
except ImportError:
import rtk.Utilities as Utilities
__author__ = '<NAME>'
__email__ = '<EMAIL>'
__organization__ = 'ReliaQual Associates, LLC'
__copyright__ = 'Copyright 2007 - 2015 Andrew "Weibullguy" Rowland'
class Model(object): # pylint: disable=R0902, R0904
"""
The Incident data model contains the attributes and methods for an
Incident. The attributes of an Incident model are:
:ivar revision_id: default value: None
:ivar incident_id: default value: None
:ivar incident_category: default value: 0
:ivar incident_type: default value: 0
:ivar short_description: default value: ''
:ivar detail_description: default value: ''
:ivar criticality: default value: 0
:ivar detection_method: default value: 0
:ivar remarks: default value: ''
:ivar status: default value: 0
:ivar test: default value: ''
:ivar test_case: default value: ''
:ivar execution_time: default value: 0.0
:ivar unit_id: default value: 0
:ivar cost: default value: 0.0
:ivar incident_age: default value: 0.0
:ivar hardware_id: default value: 0
:ivar software_id: default value: 0
:ivar request_by: default value: ''
:ivar request_date: default value: 0
:ivar reviewed: default value: False
:ivar review_by: default value: ''
:ivar review_date: default value: 0
:ivar approved: default value: False
:ivar approve_by: default value: ''
:ivar approve_date: default value: 0
:ivar closed: default value: False
:ivar close_by: default value: ''
:ivar close_date: default value: 0
:ivar life_cycle: default value: ''
:ivar analysis: default value: ''
:ivar accepted: default value: False
:ivar int relevant :default value: False
:ivar int chargeable :default value: False
"""
def __init__(self):
"""
Method to initialize an Incident data model instance.
"""
# Define private dictionary attributes.
# Define private list attributes.
# Define private scalar attributes.
# Define public dictionary attributes.
# Define public list attributes.
self.lstRelevant = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1,
-1, -1, -1, -1, -1, -1, -1]
self.lstChargeable = [-1, -1, -1, -1, -1, -1, -1, -1, -1, -1]
# Define public scalar attributes.
self.revision_id = None
self.incident_id = None
self.incident_category = 0
self.incident_type = 0
self.short_description = ''
self.detail_description = ''
self.criticality = 0
self.detection_method = 0
self.remarks = ''
self.status = 0
self.test = ''
self.test_case = ''
self.execution_time = 0.0
self.unit_id = ''
self.cost = 0.0
self.incident_age = 0
self.hardware_id = 0
self.software_id = 0
self.request_by = 0
self.request_date = 0
self.reviewed = False
self.review_by = 0
self.review_date = 0
self.approved = False
self.approve_by = 0
self.approve_date = 0
self.closed = False
self.close_by = 0
self.close_date = 0
self.life_cycle = 0
self.analysis = ''
self.accepted = False
self.relevant = -1
self.chargeable = -1
class Incident(object):
"""
The Incident data controller provides an interface between the Incident
data model and an RTK view model. A single Incident controller can
manage one or more Incident data models. The attributes of a
Incident data controller are:
:ivar _dao: the :class:`rtk.dao.DAO` to use when communicating with the RTK
Project database.
:ivar int _last_id: the last Incident ID used.
:ivar dict dicIncidents: Dictionary of the Incident data models managed.
Key is the Incident ID; value is a pointer to the
Incident data model instance.
"""
def __init__(self):
"""
Method to initialize an Incident data controller instance.
"""
# Define private dictionary attributes.
# Define private list attributes.
# Define private scalar attributes.
self._dao = None
self._last_id = None
# Define public dictionary attributes.
self.dicIncidents = {}
# Define public list attributes.
# Define public scalar attributes.
def request_incidents(self, dao, revision_id, load_all=False, query=None):
"""
Method to read the RTK Project database and load all the Incidents
associated with the selected Revision. For each Incident returned:
#. Retrieve the inputs from the RTK Project database.
#. Create a Incident data model instance.
#. Set the attributes of the data model instance from the returned
results.
#. Add the instance to the dictionary of Incidents being managed
by this controller.
:param rtk.DAO dao: the Data Access object to use for communicating
with the RTK Project database.
:param int revision_id: the Revision ID to select the tasks for.
:param bool load_all: indicates whether or not to load incidents for
all revisions.
:param str query: the query used to retrieve a filtered set of
incidents.
:return: (_results, _error_code)
:rtype: tuple
"""
self._dao = dao
self._last_id = self._dao.get_last_id('rtk_incident')[0]
if query is None:
if not load_all:
_query = "SELECT * FROM rtk_incident \
WHERE fld_revision_id={0:d}".format(revision_id)
else:
_query = "SELECT * FROM rtk_incident"
else:
_query = query
(_results, _error_code, __) = self._dao.execute(_query, commit=False)
try:
_n_incidents = len(_results)
except TypeError:
_n_incidents = 0
for i in range(_n_incidents):
_incident = Model()
_incident.set_attributes(_results[i])
self.dicIncidents[_incident.incident_id] = _incident
return(_results, _error_code)
def add_incident(self, revision_id):
"""
Method to add a new Incident to the RTK Program's database.
:param int revision_id: the Revision ID to add the new Incident to.
:return: (_results, _error_code)
:rtype: tuple
"""
try:
_short_description = "New Incident " + str(self._last_id + 1)
except TypeError: # No tasks exist.
_short_description = "New Incident 1"
_query = "INSERT INTO rtk_incident \
(fld_revision_id, fld_short_description) \
VALUES (%d, '%s')" % (revision_id, _short_description)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
# If the new test was added successfully to the RTK Project database:
# 1. Retrieve the ID of the newly inserted task.
# 2. Create a new Incident model instance.
# 4. Set the attributes of the new Incident model instance.
# 5. Add the new Incident model to the controller dictionary.
if _results:
self._last_id = self._dao.get_last_id('rtk_incident')[0]
_incident = Model()
_incident.set_attributes((revision_id, self._last_id, 0, 0,
_short_description, '', 0, 0, '', 0, '',
'', 0.0, 0, 0.0, 0.0, 0, 0, 0, 719163,
False, 0, 719163, False, 0, 719164,
False, 0, 719163, '', '', False))
self.dicIncidents[_incident.incident_id] = _incident
return(_results, _error_code)
def save_incident(self, incident_id):
"""
Method to save the Incident model information to the open RTK Program
database.
:param int incident_id: the ID of the Incident task to save.
:return: (_results, _error_code)
:rtype: tuple
"""
_incident = self.dicIncidents[incident_id]
_query = "UPDATE rtk_incident \
SET fld_incident_category={2:d}, fld_incident_type={3:d}, \
fld_short_description='{4:s}', \
fld_long_description='{5:s}', fld_criticality={6:d}, \
fld_detection_method={7:d}, fld_remarks='{8:s}', \
fld_status={9:d}, fld_test_found='{10:s}', \
fld_test_case='{11:s}', fld_execution_time={12:f}, \
fld_unit='{13:s}', fld_cost={14:f}, \
fld_incident_age={15:d}, fld_hardware_id={16:d}, \
fld_sftwr_id={17:d}, fld_request_by={18:d}, \
fld_request_date={19:d}, fld_reviewed={20:d}, \
fld_reviewed_by={21:d}, fld_reviewed_date={22:d}, \
fld_approved={23:d}, fld_approved_by={24:d}, \
fld_approved_date={25:d}, fld_complete={26:d}, \
fld_complete_by={27:d}, fld_complete_date={28:d}, \
fld_life_cycle={29:d}, fld_analysis='{30:s}', \
fld_accepted={31:d}, fld_relevant_1={32:d}, \
fld_relevant_2={33:d}, fld_relevant_3={34:d}, \
fld_relevant_4={35:d}, fld_relevant_5={36:d}, \
fld_relevant_6={37:d}, fld_relevant_7={38:d}, \
fld_relevant_8={39:d}, fld_relevant_9={40:d}, \
fld_relevant_10={41:d}, fld_relevant_11={42:d}, \
fld_relevant_12={43:d}, fld_relevant_13={44:d}, \
fld_relevant_14={45:d}, fld_relevant_15={46:d}, \
fld_relevant_16={47:d}, fld_relevant_17={48:d}, \
fld_relevant_18={49:d}, fld_relevant_19={50:d}, \
fld_relevant_20={51:d}, fld_relevant={52:d}, \
fld_chargeable_1={53:d}, fld_chargeable_2={54:d}, \
fld_chargeable_3={55:d}, fld_chargeable_4={56:d}, \
fld_chargeable_5={57:d}, fld_chargeable_6={58:d}, \
fld_chargeable_7={59:d}, fld_chargeable_8={60:d}, \
fld_chargeable_9={61:d}, fld_chargeable_10={62:d}, \
fld_chargeable={63:d} \
WHERE fld_revision_id={0:d} \
AND fld_incident_id={1:d}".format(
_incident.revision_id, _incident.incident_id,
_incident.incident_category, _incident.incident_type,
_incident.short_description,
_incident.detail_description, _incident.criticality,
_incident.detection_method, _incident.remarks,
_incident.status, _incident.test, _incident.test_case,
_incident.execution_time, _incident.unit_id,
_incident.cost, _incident.incident_age,
_incident.hardware_id, _incident.software_id,
_incident.request_by, _incident.request_date,
_incident.reviewed, _incident.review_by,
_incident.review_date, _incident.approved,
_incident.approve_by, _incident.approve_date,
_incident.closed, _incident.close_by,
_incident.close_date, _incident.life_cycle,
_incident.analysis, _incident.accepted,
_incident.lstRelevant[0], _incident.lstRelevant[1],
_incident.lstRelevant[2], _incident.lstRelevant[3],
_incident.lstRelevant[4], _incident.lstRelevant[5],
_incident.lstRelevant[6], _incident.lstRelevant[7],
_incident.lstRelevant[8], _incident.lstRelevant[9],
_incident.lstRelevant[10], _incident.lstRelevant[11],
_incident.lstRelevant[12], _incident.lstRelevant[13],
_incident.lstRelevant[14], _incident.lstRelevant[15],
_incident.lstRelevant[16], _incident.lstRelevant[17],
_incident.lstRelevant[18], _incident.lstRelevant[19],
_incident.relevant, _incident.lstChargeable[0],
_incident.lstChargeable[1], _incident.lstChargeable[2],
_incident.lstChargeable[3], _incident.lstChargeable[4],
_incident.lstChargeable[5], _incident.lstChargeable[6],
_incident.lstChargeable[7], _incident.lstChargeable[8],
_incident.lstChargeable[9], _incident.chargeable)
(_results, _error_code, __) = self._dao.execute(_query, commit=True)
return(_results, _error_code)
| StarcoderdataPython |
1627577 | <filename>tests/tests.py<gh_stars>1-10
import os
import pandas
import pytest
import pygeneactiv
import simplejson as json
__DIR__ = os.path.dirname(__file__)
def test_headers():
geneactiv_file = os.path.join(__DIR__, 'right_wrist.csv')
headers_file = os.path.join(__DIR__, 'headers.json')
ds = pygeneactiv.read(geneactiv_file)
# squash nans in dataset headers
headers = json.loads(json.dumps(ds.headers, ignore_nan=True))
# load cached headers
with open(headers_file) as fo:
cache = json.load(fo)
# assertions
assert headers == cache
def test_get_data():
geneactiv_file = os.path.join(__DIR__, 'right_wrist.csv')
stats_file = os.path.join(__DIR__, 'stats.json')
ds = pygeneactiv.read(geneactiv_file)
samples = 0
sums = pandas.DataFrame()
# build stats
for chunk in ds.get_data(chunksize=100):
if sums.empty:
sums = chunk.sum(axis=0)
else:
sums = sums.add(chunk.sum(axis=0))
samples += chunk.shape[0]
# read cached stats
with open(stats_file, 'r') as fo:
cache = json.load(fo)
assert pytest.approx(sums['x'] / samples, cache['mean']['x'])
assert pytest.approx(sums['y'] / samples, cache['mean']['y'])
assert pytest.approx(sums['z'] / samples, cache['mean']['z'])
assert pytest.approx(sums['lux'] / samples, cache['mean']['lux'])
assert pytest.approx(sums['button'] / samples, cache['mean']['button'])
assert pytest.approx(sums['thermistor'] / samples, cache['mean']['thermistor'])
assert samples == cache['samples']
| StarcoderdataPython |
96695 | <filename>ssm/init_state_distns.py
from functools import partial
from warnings import warn
import autograd.numpy as np
import autograd.numpy.random as npr
from autograd.scipy.special import logsumexp
from autograd.misc.optimizers import sgd, adam
from autograd import grad
from ssm.util import ensure_args_are_lists
class InitialStateDistribution(object):
def __init__(self, K, D, M=0):
self.K, self.D, self.M = K, D, M
self.log_pi0 = -np.log(K) * np.ones(K)
@property
def params(self):
return (self.log_pi0,)
@params.setter
def params(self, value):
self.log_pi0 = value[0]
@property
def initial_state_distn(self):
return np.exp(self.log_pi0 - logsumexp(self.log_pi0))
@property
def log_initial_state_distn(self):
return self.log_pi0 - logsumexp(self.log_pi0)
@ensure_args_are_lists
def initialize(self, datas, inputs=None, masks=None, tags=None):
pass
def permute(self, perm):
"""
Permute the discrete latent states.
"""
self.log_pi0 = self.log_pi0[perm]
def log_prior(self):
return 0
def m_step(self, expectations, datas, inputs, masks, tags, **kwargs):
pi0 = sum([Ez[0] for Ez, _, _ in expectations]) + 1e-8
self.log_pi0 = np.log(pi0 / pi0.sum())
class FixedInitialStateDistribution(InitialStateDistribution):
def __init__(self, K, D, pi0=None, M=0):
super(FixedInitialStateDistribution, self).__init__(K, D, M=M)
if pi0 is not None:
# Handle the case where user passes a numpy array of (K, 1) instead of (K,)
pi0 = np.squeeze(np.array(pi0))
assert len(pi0) == K, "Array passed as pi0 is of the wrong length"
self.log_pi0 = np.log(pi0 + 1e-16)
def m_step(self, expectations, datas, inputs, masks, tags, **kwargs):
# Don't change the distribution
pass | StarcoderdataPython |
3327087 | from simglucose.simulation.user_interface import simulate
from simglucose.controller.base import Controller, Action
class MyController(Controller):
def __init__(self, init_state):
self.init_state = init_state
self.state = init_state
def policy(self, observation, reward, done, **info):
'''
Every controller must have this implementation!
----
Inputs:
observation - a namedtuple defined in simglucose.simulation.env. For
now, it only has one entry: blood glucose level measured
by CGM sensor.
reward - current reward returned by environment
done - True, game over. False, game continues
info - additional information as key word arguments,
simglucose.simulation.env.T1DSimEnv returns patient_name
and sample_time
----
Output:
action - a namedtuple defined at the beginning of this file. The
controller action contains two entries: basal, bolus
'''
self.state = observation
action = Action(basal=0, bolus=0)
return action
def reset(self):
'''
Reset the controller state to inital state, must be implemented
'''
self.state = self.init_state
ctrller = MyController(0)
simulate(controller=ctrller)
| StarcoderdataPython |
4812985 | <gh_stars>0
__author__ = '10bestman'
| StarcoderdataPython |
177611 | <filename>guidance_and_support/models.py
"""Model definitions for the guidance_and_support app."""
from django.db import models
from wagtail.admin.edit_handlers import FieldPanel
from wagtail.core.fields import StreamField, RichTextField
from wagtail.core.models import Page
from wagtail.images.edit_handlers import ImageChooserPanel
from home.models import AbstractContentPage, AbstractIndexPage, DefaultPageHeaderImageMixin, IATIStreamBlock
from .mixins import ContactFormMixin
class GuidanceAndSupportPage(DefaultPageHeaderImageMixin, AbstractContentPage):
"""A base for the Guidance and Support page."""
parent_page_types = ['home.HomePage']
subpage_types = [
'guidance_and_support.GuidanceGroupPage',
# 'guidance_and_support.KnowledgebaseIndexPage',
'guidance_and_support.SupportPage',
]
@property
def guidance_groups(self):
"""Get all GuidanceGroupPage objects that have been published."""
guidance_groups = GuidanceGroupPage.objects.child_of(self).live()
return guidance_groups
class GuidanceGroupPage(AbstractContentPage):
"""A base for Guidance Group pages."""
subpage_types = ['guidance_and_support.GuidanceGroupPage', 'guidance_and_support.GuidancePage']
section_image = models.ForeignKey(
'wagtailimages.Image', null=True, blank=True,
on_delete=models.SET_NULL, related_name='+',
help_text='This is the image that will be displayed for this page on the main guidance and support page. Ignore if this page is being used as a sub-index page.'
)
section_summary = StreamField(IATIStreamBlock(required=False), null=True, blank=True, help_text='A small amount of content to appear on the main page (e.g. bullet points). Ignore if this page is being used as a sub-index page.')
button_link_text = models.TextField(max_length=255, null=True, blank=True, help_text='The text to appear on the button of the main guidance and support page. Ignore if this page is being used as a sub-index page.')
content_editor = StreamField(IATIStreamBlock(required=False), null=True, blank=True, help_text='The content to appear on the page itself, as opposed to "section summary" which appears on the parent page.')
@property
def guidance_groups(self):
"""Get all objects that are children of the instantiated GuidanceGroupPage.
Note:
These can be other guidance group pages or single guidance pages.
"""
guidance_groups = Page.objects.child_of(self).specific().live()
guidance_group_list = [{"page": page, "count": len(page.get_children())} for page in guidance_groups]
return guidance_group_list
translation_fields = AbstractContentPage.translation_fields + ["section_summary", "button_link_text"]
multilingual_field_panels = [
ImageChooserPanel('section_image'),
]
class GuidancePage(ContactFormMixin, AbstractContentPage):
"""A base for a single guidance page."""
subpage_types = []
# class KnowledgebaseIndexPage(AbstractIndexPage):
# """A base for a Knowledgebase index page."""
# subpage_types = ['guidance_and_support.KnowledgebasePage']
# class KnowledgebasePage(AbstractContentPage):
# """A base for a single Knowledgebase page."""
# subpage_types = []
class CommunityPage(DefaultPageHeaderImageMixin, AbstractIndexPage):
"""A base for the Community page."""
parent_page_types = ['home.HomePage']
subpage_types = []
text_box = models.TextField(max_length=255, null=True, blank=True, help_text='A small ammount of text describing the community page.')
button_link_text = models.CharField(max_length=255, null=True, blank=True, help_text='The text to appear on the button of the community page.')
button_url = models.URLField(null=True, blank=True, help_text='The url for the community page being linked')
translation_fields = AbstractIndexPage.translation_fields + ["text_box", "button_link_text"]
content_panels = AbstractIndexPage.content_panels + [
FieldPanel('heading'),
FieldPanel('button_link_text'),
FieldPanel('button_url'),
FieldPanel('text_box')
]
class SupportPage(DefaultPageHeaderImageMixin, ContactFormMixin, AbstractContentPage):
"""Model to define the overall fields for the support page."""
parent_page_types = ['guidance_and_support.GuidanceAndSupportPage']
subpage_types = []
alternative_content = RichTextField(
features=['h3', 'link', 'ul'],
help_text='Content to describe alternative ways of receiving support',
)
translation_fields = AbstractContentPage.translation_fields + [
'alternative_content',
]
| StarcoderdataPython |
24329 | from dataclasses import dataclass, field
from typing import Dict
import perde
import pytest
from util import FORMATS, FORMATS_EXCEPT
"""rust
#[derive(Serialize, Debug, new)]
struct Plain {
a: String,
b: String,
c: u64,
}
add!(Plain {"xxx".into(), "yyy".into(), 3});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_plain(m):
@dataclass
class Plain:
a: str
b: str
c: int
m.repack_type(Plain)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "camelCase")]
struct RenameAll {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAll {"xxx".into(), "yyy".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all(m):
@perde.attr(rename_all="camelCase")
@dataclass
class RenameAll:
pen_pineapple: str
apple_pen: str
m.repack_type(RenameAll)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllSerialize", rename_all = "PascalCase")]
struct RenameAllSerializeOutput {
pen_pineapple: String,
apple_pen: String,
}
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllSerialize")]
struct RenameAllSerializeInput {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAllSerializeInput {"--".into(), "==".into()});
add!(RenameAllSerializeOutput {"--".into(), "==".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all_serialize(m):
@perde.attr(rename_all_serialize="PascalCase")
@dataclass
class RenameAllSerialize:
pen_pineapple: str
apple_pen: str
d = m.unpack_data("RenameAllSerializeInput", astype=RenameAllSerialize)
v = m.dumps(d)
e = m.data("RenameAllSerializeOutput")
assert v == e
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllDeserialize")]
struct RenameAllDeserializeOutput {
pen_pineapple: String,
apple_pen: String,
}
#[derive(Serialize, Debug, new)]
#[serde(rename = "RenameAllDeserialize", rename_all = "SCREAMING_SNAKE_CASE")]
struct RenameAllDeserializeInput {
pen_pineapple: String,
apple_pen: String,
}
add!(RenameAllDeserializeInput {"--".into(), "==".into()});
add!(RenameAllDeserializeOutput {"--".into(), "==".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_all_deserialize(m):
@perde.attr(rename_all_deserialize="SCREAMING_SNAKE_CASE")
@dataclass
class RenameAllDeserialize:
pen_pineapple: str
apple_pen: str
d = m.unpack_data("RenameAllDeserializeInput", astype=RenameAllDeserialize)
v = m.dumps(d)
e = m.data("RenameAllDeserializeOutput")
assert v == e
"""rust
#[derive(Serialize, Debug, new)]
struct DenyUnknownFields {
x: String,
y: i64,
z: i64,
q: String,
}
add!(DenyUnknownFields {"aaaaa".into(), 1, -2, "unknown".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_deny_unknown_fields(m):
@dataclass
class NoDenyUnknownFields:
x: str
y: int
z: int
@perde.attr(deny_unknown_fields=True)
@dataclass
class DenyUnknownFields:
x: str
y: int
z: int
e = m.unpack_data("DenyUnknownFields", astype=NoDenyUnknownFields)
assert e == NoDenyUnknownFields("aaaaa", 1, -2)
with pytest.raises(Exception) as e:
m.unpack_data("DenyUnknownFields", astype=DenyUnknownFields)
print(f"{e}")
"""rust
#[derive(Serialize, Debug, new)]
struct Rename {
a: String,
#[serde(rename = "x")]
b: String,
c: u64,
}
add!(Rename {"xxx".into(), "yyy".into(), 3});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename(m):
@dataclass
class Rename:
a: str
b: str = field(metadata={"perde_rename": "x"})
c: int
m.repack_type(Rename)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "camelCase")]
struct RenameAllRename {
pen_pineapple: String,
#[serde(rename = "pen_pen")]
apple_pen: String,
}
add!(RenameAllRename {"xxx".into(), "yyy".into()});
"""
@pytest.mark.parametrize("m", FORMATS)
def test_rename_in_rename_all(m):
@perde.attr(rename_all="camelCase")
@dataclass
class RenameAllRename:
pen_pineapple: str
apple_pen: str = field(metadata={"perde_rename": "pen_pen"})
m.repack_type(RenameAllRename)
"""rust
#[derive(Serialize, Debug, new)]
struct NestedRenameChild {
a: String,
#[serde(rename = "d")]
b: String,
}
#[derive(Serialize, Debug, new)]
struct NestedRename {
x: String,
#[serde(rename = "w")]
y: NestedRenameChild,
z: i64,
}
add!(NestedRename
{"xxx".into(),
NestedRenameChild::new("ppp".into(), "qqq".into()),
1111}
except "toml");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("toml"))
def test_nested_rename(m):
@dataclass
class NestedRenameChild:
a: str
b: str = field(metadata={"perde_rename": "d"})
@dataclass
class NestedRename:
x: str
y: NestedRenameChild = field(metadata={"perde_rename": "w"})
z: int
m.repack_type(NestedRename)
"""rust
#[derive(Serialize, Debug, new)]
#[serde(rename_all = "UPPERCASE")]
struct NestedRenameAllChild {
a: String,
b: String,
}
#[derive(Serialize, Debug, new)]
struct NestedRenameAll {
x: String,
y: NestedRenameAllChild,
z: i64,
}
add!(NestedRenameAll
{"xxx".into(),
NestedRenameAllChild::new("ppp".into(), "qqq".into()),
1111}
except "toml");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("toml"))
def test_nested_rename_all(m):
@perde.attr(rename_all="UPPERCASE")
@dataclass
class NestedRenameAllChild:
a: str
b: str
@dataclass
class NestedRenameAll:
x: str
y: NestedRenameAllChild
z: int
m.repack_type(NestedRenameAll)
"""rust
#[derive(Serialize, Debug, new)]
struct FlattenChild {
a: String,
b: String,
}
#[derive(Serialize, Debug, new)]
struct Flatten {
x: String,
#[serde(flatten)]
y: FlattenChild,
z: i64,
}
add!(Flatten
{"xxx".into(),
FlattenChild::new("ppp".into(), "qqq".into()),
1111}
except "msgpack");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_flatten(m):
@dataclass
class FlattenChild:
a: str
b: str
@dataclass
class Flatten:
x: str
y: FlattenChild = field(metadata={"perde_flatten": True})
z: int
m.repack_type(Flatten)
"""rust
#[derive(Serialize, Debug, new)]
struct DictFlatten {
x: String,
y: i64,
#[serde(flatten)]
z: IndexMap<String, String>,
}
add!(DictFlatten {"hey".into(), -103223,
{
let mut m = IndexMap::new();
m.insert("pp".into(), "q1".into());
m.insert("ppp".into(), "q2".into());
m.insert("pppp".into(), "q3".into());
m
}}
except "msgpack");
"""
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_dict_flatten(m):
@dataclass
class DictFlatten:
x: str
y: int
z: Dict[str, str] = field(metadata={"perde_flatten": True})
m.repack_type(DictFlatten)
"""rust
#[derive(Serialize, Debug, new)]
struct Flatten2 {
x: String,
a: i64,
b: i64,
}
add!(Flatten2 { "haa".into(), 11, 33 });
"""
@pytest.mark.parametrize("m", FORMATS)
def test_flatten2(m):
@dataclass
class Flatten2Child:
a: int
b: int
@dataclass
class Flatten2:
x: str
y: Flatten2Child = field(metadata={"perde_flatten": True})
m.repack_type(Flatten2)
"""rust
#[derive(Serialize, Debug, new)]
struct DictFlatten2 {
x: String,
y: i64,
pp: String,
ppp: String,
pppp: String,
}
add!(DictFlatten2 {
"hey".into(), -103223,
"q1".into(), "q2".into(), "q3".into()
});
"""
# Hopefully support msgpack.
@pytest.mark.parametrize("m", FORMATS_EXCEPT("msgpack"))
def test_dict_flatten2(m):
@dataclass
class DictFlatten2:
x: str
y: int
z: Dict[str, str] = field(metadata={"perde_flatten": True})
m.repack_type(DictFlatten2)
| StarcoderdataPython |
991 | <gh_stars>0
import urllib2
import json
import time
from core.helpers.decorator import Cached
from core.helpers.config import config
from core.helpers.logger import log, LogLevel
@Cached
def __request(request):
log('Send Fanart Request: ' + request.replace(config.fanart.api_key, 'XXX'), 'DEBUG')
headers = {'Accept': 'application/json'}
_request = urllib2.Request(request, headers=headers)
response_body = urllib2.urlopen(_request).read()
result = json.loads(response_body)
return result
def _get(video_type, movie_id, output_format='JSON'):
req = '{0}{1}/{2}/{3}/{4}'.format(config.fanart.url_base, video_type,
config.fanart.api_key, movie_id, output_format)
try_again = True
n = 0
while try_again and n < 10:
try:
return __request(req)
except urllib2.HTTPError:
n += 1
try_again = True
log('Ooops.. FanartTV Error - Try again', LogLevel.Warning)
time.sleep(2)
def get_movie(tmdb_id):
return _get(video_type='movie', movie_id=tmdb_id)
def get_show(tvdb_id):
return _get(video_type='series', movie_id=tvdb_id) | StarcoderdataPython |
4833606 | <filename>util.py<gh_stars>0
import os, struct, math
import numpy as np
import torch
from glob import glob
import data_util
import shlex
import subprocess
import torch.nn.functional as F
def backproject(ux, uy, depth, intrinsic):
'''Given a point in pixel coordinates plus depth gives the coordinates of the imaged point in camera coordinates
'''
x = (ux - intrinsic[0][2]) / intrinsic[0][0]
y = (uy - intrinsic[1][2]) / intrinsic[1][1]
return torch.stack([depth * x, depth * y, depth, torch.ones_like(depth)], dim=0)
def parse_intrinsics(filepath, trgt_sidelength, invert_y=False):
# Get camera intrinsics
with open(filepath, 'r') as file:
f, cx, cy, _ = map(float, file.readline().split())
grid_barycenter = torch.Tensor(list(map(float, file.readline().split())))
near_plane = float(file.readline())
scale = float(file.readline())
height, width = map(float, file.readline().split())
try:
world2cam_poses = int(file.readline())
except ValueError:
world2cam_poses = None
if world2cam_poses is None:
world2cam_poses = False
world2cam_poses = bool(world2cam_poses)
cx = cx / width * trgt_sidelength
cy = cy / height * trgt_sidelength
f = trgt_sidelength / height * f
fx = f
if invert_y:
fy = -f
else:
fy = f
# Build the intrinsic matrices
full_intrinsic = np.array([[fx, 0., cx, 0.],
[0., fy, cy, 0],
[0., 0, 1, 0],
[0, 0, 0, 1]])
return full_intrinsic, grid_barycenter, scale, near_plane, world2cam_poses
def resize2d(img, size):
return F.adaptive_avg_pool2d(img, size[2:])
def compute_warp_idcs(cam_1_intrinsic,
cam_2_intrinsic,
img_1_pose,
img_1_depth,
img_2_pose,
img_2_depth):
cam_1_intrinsic = cam_1_intrinsic.squeeze().cuda()
cam_2_intrinsic = cam_2_intrinsic.squeeze().cuda()
img_1_pose = img_1_pose.squeeze().cuda()
img_2_pose = img_2_pose.squeeze().cuda()
img_1_depth = img_1_depth.squeeze().cuda()
img_2_depth = img_2_depth.squeeze().cuda()
# Get the new size
side_length = img_1_depth.shape[0]
# Get camera coordinates of pixels in camera 1
pixel_range = torch.arange(0, side_length)
xx, yy = torch.meshgrid([pixel_range, pixel_range])
xx = xx.contiguous().view(-1).float().cuda()
yy = yy.contiguous().view(-1).float().cuda()
img_1_cam_coords = backproject(yy,
xx,
img_1_depth.contiguous().view(-1),
cam_1_intrinsic)
# Convert to world coordinates
world_coords = torch.mm(img_1_pose, img_1_cam_coords)
# Convert to cam 2 coordinates
trgt_coords = torch.mm(torch.inverse(img_2_pose), world_coords)
trgt_coords = torch.mm(cam_2_intrinsic, trgt_coords)
# Get the depths in the target camera frame
transformed_depths = trgt_coords[2, :].clone()
# z-divide.
trgt_coords /= trgt_coords[2:3, :] + 1e-9
trgt_idcs = torch.round(trgt_coords[:2]).long()
# Mask out everything outside the image boundaries
mask_img_bounds = (torch.ge(trgt_idcs[0], 0) *
torch.ge(trgt_idcs[1], 0))
mask_img_bounds = (mask_img_bounds *
torch.lt(trgt_idcs[0], side_length) *
torch.lt(trgt_idcs[1], side_length))
if not mask_img_bounds.any():
print('Nothing in warped image')
return None
valid_trgt_idcs = trgt_idcs[:, mask_img_bounds]
gt_depths = img_2_depth[valid_trgt_idcs[1, :], valid_trgt_idcs[0, :]]
not_occluded = (torch.abs(gt_depths.detach() - transformed_depths[mask_img_bounds].detach()) < 0.05)
# not_occluded = gt_depths < 1000.
if not not_occluded.any():
print('Nothing unoccluded')
return None
# Get the final coordinates
valid_xx = xx[mask_img_bounds][not_occluded].long()
valid_yy = yy[mask_img_bounds][not_occluded].long()
valid_trgt_coords = trgt_coords[:, mask_img_bounds][:, not_occluded]
return torch.stack([valid_xx, valid_yy], dim=0), valid_trgt_coords
def concat_pose(feature_map, pose):
feat_map = torch.cat([feature_map, pose.squeeze()[None, :, None, None].repeat(1, 1, 64, 64)], dim=1)
return feat_map
def num_divisible_by_2(number):
i = 0
while not number % 2:
number = number // 2
i += 1
return i
def compute_view_directions(intrinsic,
cam2world,
img_height_width,
voxel_size,
frustrum_depth=1,
near_plane=np.sqrt(3) / 2):
xx, yy, zz = torch.meshgrid([torch.arange(0, img_height_width[1]),
torch.arange(0, img_height_width[0]),
torch.arange(0, frustrum_depth)])
coords = torch.stack([xx, yy, zz, torch.zeros_like(xx)], dim=0).float()
coords[2] *= voxel_size
coords[2] += near_plane
coords[0] = (coords[0] - intrinsic[0][2]) / intrinsic[0][0]
coords[1] = (coords[1] - intrinsic[1][2]) / intrinsic[1][1]
coords[:2] *= coords[2]
coords = coords.view(4, -1)
world_coords = torch.mm(cam2world, coords)[:3]
world_coords /= world_coords.norm(2, dim=0, keepdim=True)
world_coords = world_coords.view(3, img_height_width[1], img_height_width[0], frustrum_depth)
return world_coords
# util for saving tensors, for debug purposes
def write_array_to_file(tensor, filename):
sz = tensor.shape
with open(filename, 'wb') as f:
f.write(struct.pack('Q', sz[0]))
f.write(struct.pack('Q', sz[1]))
f.write(struct.pack('Q', sz[2]))
tensor.tofile(f)
def read_lines_from_file(filename):
assert os.path.isfile(filename)
lines = open(filename).read().splitlines()
return lines
# create camera intrinsics
def make_intrinsic(fx, fy, mx, my):
intrinsic = torch.eye(4)
intrinsic[0][0] = fx
intrinsic[1][1] = fy
intrinsic[0][2] = mx
intrinsic[1][2] = my
return intrinsic
# create camera intrinsics
def adjust_intrinsic(intrinsic, intrinsic_image_dim, image_dim):
if intrinsic_image_dim == image_dim:
return intrinsic
resize_width = int(math.floor(image_dim[1] * float(intrinsic_image_dim[0]) / float(intrinsic_image_dim[1])))
intrinsic[0, 0] *= float(resize_width) / float(intrinsic_image_dim[0])
intrinsic[1, 1] *= float(image_dim[1]) / float(intrinsic_image_dim[1])
# account for cropping here
intrinsic[0, 2] *= float(image_dim[0] - 1) / float(intrinsic_image_dim[0] - 1)
intrinsic[1, 2] *= float(image_dim[1] - 1) / float(intrinsic_image_dim[1] - 1)
return intrinsic
def get_sample_files(samples_path):
files = [f for f in os.listdir(samples_path) if f.endswith('.sample')] # and os.path.isfile(join(samples_path, f))]
return files
def get_sample_files_for_scene(scene, samples_path):
files = [f for f in os.listdir(samples_path) if
f.startswith(scene) and f.endswith('.sample')] # and os.path.isfile(join(samples_path, f))]
print('found ', len(files), ' for ', os.path.join(samples_path, scene))
return files
def cond_mkdir(path):
if not os.path.exists(path):
os.makedirs(path)
def load_pose(filename):
assert os.path.isfile(filename)
pose = torch.Tensor(4, 4)
lines = open(filename).read().splitlines()
assert len(lines) == 4
lines = [[x[0], x[1], x[2], x[3]] for x in (x.split(" ") for x in lines)]
return torch.from_numpy(np.asarray(lines).astype(np.float32))
def expand_to_feature_map(torch_tensor, img_size):
return torch_tensor[:, :, None, None].repeat(1, 1, img_size[0], img_size[1])
def normalize(img):
return (img - img.min()) / (img.max() - img.min())
def write_image(writer, name, img, iter):
writer.add_image(name, normalize(img.permute([0, 3, 1, 2])), iter)
def print_network(net):
model_parameters = filter(lambda p: p.requires_grad, net.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
print("%d" % params)
def custom_load(model, path, discriminator=None):
whole_dict = torch.load(path)
model.load_state_dict(whole_dict['model'])
if discriminator:
discriminator.load_state_dict(whole_dict['discriminator'])
def custom_save(model, path, discriminator=None):
whole_dict = {'model': model.state_dict()}
if discriminator:
whole_dict.update({'discriminator': discriminator.state_dict()})
torch.save(whole_dict, path)
def get_nearest_neighbors_pose(train_pose_dir, test_pose_dir, sampling_pattern='skip_2', metric='cos'):
if sampling_pattern != 'all':
skip_val = int(sampling_pattern.split('_')[-1])
else:
skip_val = 0
train_pose_files = sorted(glob(os.path.join(train_pose_dir, '*.txt')))
idcs = list(range(len(train_pose_files)))[::skip_val + 1]
train_pose_files = train_pose_files[::skip_val + 1]
test_pose_files = sorted(glob(os.path.join(test_pose_dir, '*.txt')))
train_poses = np.stack([data_util.load_pose(pose)[:3, 3] for pose in train_pose_files], axis=0)
train_poses /= np.linalg.norm(train_poses, axis=1, keepdims=True)
test_poses = np.stack([data_util.load_pose(pose)[:3, 3] for pose in test_pose_files], axis=0)
test_poses /= np.linalg.norm(test_poses, axis=1, keepdims=True)
if metric == 'cos':
cos_distance_mat = test_poses.dot(train_poses.T) # nxn matrix of cosine distances
nn_idcs = [idcs[int(val)] for val in np.argmax(cos_distance_mat, axis=1)]
elif metric == 'l2':
l2_distance_mat = np.linalg.norm(test_poses[:, None, :] - train_poses[None, :, :], axis=2)
nn_idcs = [idcs[int(val)] for val in np.argmin(l2_distance_mat, axis=1)]
return nn_idcs
| StarcoderdataPython |
3265122 |
#
# Copyright (C) 2007 <NAME> (fire at downgra dot de)
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# vim:syntax=python:sw=4:ts=4:expandtab
import os
def info():
lavg = os.getloadavg()
msg = 'looks like it\'s normal'
if max(lavg) > 1.5:
msg = 'quite high!'
if max(lavg) > 4.0:
msg = 'extremely high!'
return 'Disk load is: %.2f %.2f %.2f' % lavg + ' , ' + msg
| StarcoderdataPython |
120510 | from PyQt5.QtGui import QPixmap
from PyQt5.QtWidgets import QLabel
from Constants import *
class Bullet(QLabel):
def __init__(self, offset_x, offset_y, parent, enemy=False):
QLabel.__init__(self, parent)
if enemy:
self.setPixmap(QPixmap("images/bullet/enemy_bullet.png"))
else:
self.setPixmap(QPixmap("images/bullet/bullet.png"))
self.offset_x = offset_x
self.offset_y = offset_y
self.active = False
self.setGeometry(SCREEN_WIDTH, SCREEN_HEIGHT, self.pixmap().width(), self.pixmap().height())
# self.setStyleSheet("border: 1px solid white;")
self.show()
def player_game_update(self) -> bool:
self.setGeometry(self.x(), self.y() - BULLET_SPEED, self.pixmap().width(), self.pixmap().height())
if self.y() + self.pixmap().height() <= 0:
self.active = False
self.close()
return True
return False
def enemy_game_update(self, enemy) -> bool:
try:
if not self.active:
x = enemy.x() + enemy.width() / 2 - self.width() / 2
y = enemy.y() + enemy.height()
self.setGeometry(x, y, self.pixmap().width(), self.pixmap().height())
self.active = True
else:
self.setGeometry(self.x(), self.y() + BULLET_SPEED, self.pixmap().width(), self.pixmap().height())
if self.y() >= SCREEN_HEIGHT:
self.hit()
return True
return False
except AttributeError:
return True
def hit(self) -> None:
self.active = False
self.setGeometry(SCREEN_WIDTH, SCREEN_HEIGHT, self.pixmap().width(), self.pixmap().height())
| StarcoderdataPython |
1735417 | ''' all-table virtual column related code '''
import pandas as pd
from bsbetl.alltable_calcs import at_columns
from bsbetl.alltable_calcs.at_virtual_cols import Virtual_Column
class at_virtual_MovingAverage(Virtual_Column):
""" represents and can creates a column in an all-table dataframe containing moving average price """
def __init__(self):
super().__init__()
self.kind = 'All-table virtual column'
self.name = 'MovingAveragesPrice'
self.assoc_columns = ['price']
self.added_columns = ['MA-5', 'MA-20', 'MA-200']
self.added_columns_affinities = {
'MA-5': at_columns.PRICE_AFFINITY,
'MA-20': at_columns.PRICE_AFFINITY,
'MA-200': at_columns.PRICE_AFFINITY
}
self.description = 'Moving Average Prices'
def create_virtual_columns(self, df_arg: pd.DataFrame, parameters: list) -> pd.DataFrame:
"""The actual implementation of the plugin
"""
#df = argument.loc[argument['Lazy'] == False, :]
#periods = parameters[0]
# add a moving average column(s)
for ma in parameters:
if ma == self.added_columns[0]:
df_arg[ma] = df_arg['price'].rolling(window=5).mean()
if ma == self.added_columns[1]:
df_arg[ma] = df_arg['price'].rolling(window=20).mean()
if ma == self.added_columns[2]:
df_arg[ma] = df_arg['price'].rolling(window=200).mean()
# print(df_arg.head(15))
return df_arg
def contribute_dash_dropdown_options(stage: int) -> list:
options_list = []
# price moving averages
options_list.append(
{'label': 'MA-5', 'value': 'MA-5', 'title': 'insert a temporary 5 period moving-average price column'})
options_list.append(
{'label': 'MA-20', 'value': 'MA-20', 'title': 'insert a temporary 20 period moving-average price column'})
options_list.append(
{'label': 'MA-200', 'value': 'MA-200', 'title': 'insert a temporary 200 period moving-average price column'})
return options_list
| StarcoderdataPython |
1649285 | from .cbl_type import CBLType, CBLTypeInstance, CBLTypeMeta
from .containers import Temporary
from .function_type import InstanceFunctionType
import cmd_ir.instructions as i
class StructTypeInstance(CBLTypeInstance):
def __init__(self, compiler, this, var_members, func_members, func_properties):
super().__init__(func_members, func_properties)
self.__this = this
self.__var_members = []
for name, var_type in var_members.items():
self.__var_members.append(self.construct_var(compiler, name,
var_type))
def construct_var(self, compiler, name, type):
value = type.allocate(compiler, name)
return self.construct_member(name, type, value)
def as_variables(self):
if self.__this is None:
vars = []
for m in self.__var_members:
vars.extend(m.type.as_variables(m.value))
return vars
return (self.__this,)
def as_variable(self, typename):
assert self.__this is not None, "Cannot convert %s to variable" % typename
return self.__this
class StructTypeInstanceShadow(StructTypeInstance):
def __init__(self, shadow_instance, *args):
self.__shadow = shadow_instance
super().__init__(*args)
def construct_var(self, compiler, name, type):
value = self.__shadow.get_member(compiler, name).value
return self.construct_member(name, type, value)
class StructuredType(CBLType):
def __init__(self):
super().__init__()
self.__var_members = {}
self.__vars_allowed = True
self._is_nbt = False
@property
def meta_type_type(self):
return StructTypeMeta
def extend_from(self, parent):
super().extend_from(parent)
if isinstance(parent, StructuredType):
self.__var_members.update(parent.get_var_members())
@property
def ir_type(self):
if self._is_nbt:
return i.VarType.nbt
raise TypeError('%s does not have an IR type' % self)
def ir_types(self):
if self._is_nbt:
return (self.ir_type,)
types = []
for m_type in self.__var_members.values():
types.extend(m_type.ir_types())
return types
def as_variable(self, instance):
return instance.as_variable(self.typename)
def as_variables(self, instance):
return instance.as_variables()
def instance_member(self, name):
m = super().instance_member(name)
if m is None:
m = self.__var_members.get(name)
return m
def get_var_members(self):
return dict(self.__var_members)
def effective_var_size(self):
if self._is_nbt:
return 1
return sum(t.effective_var_size() for t in self.__var_members.values())
def add_variable_member(self, name, type):
self.__can_extend = False
if self.instance_member(name):
raise KeyError('%s is already defined in type %s' % (name,
self.name))
if not self.__vars_allowed:
raise RuntimeError('Cannot add more variables. Tried adding %s' % \
name)
self.__var_members[name] = type
def allocate(self, compiler, namehint):
assert not self.incomplete, "Incomplete type %s" % self.typename
if self._is_nbt:
this = compiler.create_var(namehint, i.VarType.nbt)
def create_sub_var(subname, var_type):
path = i.VirtualString('.' + subname)
insn = i.NBTSubPath(this, path, var_type)
return compiler.define(namehint + '_' + subname, insn)
else:
this = None
orig_create_var = compiler.create_var
def create_sub_var(subname, var_type):
return orig_create_var(namehint + '_' + subname, var_type)
with compiler.set_create_var(create_sub_var):
return StructTypeInstance(compiler, this, self.__var_members,
self.get_func_members(),
self.get_func_properties())
def add_function_member(self, compiler, name, ret_type, params, inline,
is_async):
self.__complete_vars()
return super().add_function_member(compiler, name, ret_type, params,
inline, is_async)
def add_operator_member(self, compiler, op, ret_type, params, inline):
self.__complete_vars()
return super().add_operator_member(compiler, op, ret_type, params,
inline)
def add_constructor(self, compiler, params, inline):
self.__complete_vars()
return super().add_constructor(compiler, params, inline)
def __complete_vars(self):
if not self.__vars_allowed:
return
self.__vars_allowed = False
self.__can_extend = False
# Initially we are not NBT wrapped
size = self.effective_var_size()
# Become NBT wrapped if size exceeds 3 variables
if size > 3:
self._is_nbt = True
def _copy_impl(self, compiler, this, other):
thisobj = this.value
if self._is_nbt:
compiler.add_insn(i.SetScore(thisobj.as_variable(self.typename),
other.value.as_variable(other.type.typename)))
else:
# Pair each var member
for var in self.__var_members.keys():
lvar = thisobj.get_member(compiler, var)
rvar = other.value.get_member(compiler, var)
lvar.type.dispatch_operator(compiler, '=', lvar, rvar)
return other
def _default_ctor(self, compiler, container, args):
ret = super()._default_ctor(compiler, container, args)
self.__construct_members(compiler, container.this, {})
return ret
def complete_type(self, compiler):
self.__complete_vars()
super().complete_type(compiler)
def do_construction(self, compiler, thisobj, member_inits):
if self.parent_type:
pname = self.parent_type.typename
pargs = ()
# Steal parent arguments if exists from member_inits
if pname in member_inits:
pargs = member_inits[pname]
del member_inits[pname]
self._construct_parent(compiler, thisobj, pargs)
self.__construct_members(compiler, thisobj, member_inits)
def __construct_members(self, compiler, thisobj, member_inits):
own_members = self.__var_members
if isinstance(self.parent_type, StructuredType):
p_members = self.parent_type.get_var_members().keys()
own_members = { name: m for name, m in self.__var_members.items() \
if name not in p_members }
for name in member_inits.keys():
assert name in own_members, (self, name)
for varname in own_members.keys():
member = thisobj.get_member(compiler, varname)
args = member_inits.get(varname, ())
member.type.run_constructor(compiler, member, args)
def coerce_to(self, compiler, container, type):
super_did_coerce = super().coerce_to(compiler, container, type)
if super_did_coerce:
return super_did_coerce
if self.parent_type is not None:
if type == self.parent_type and isinstance(type, StructuredType):
# Can re-use the nbt wrapper. Since extend is append-only
# we know self._is_nbt == True
if type._is_nbt:
return container
# Create a shadow copy using the subset of our members
# found in the parent type
val = StructTypeInstanceShadow(container.value, compiler, None,
type.get_var_members(),
type.get_func_members(),
type.get_func_properties())
return Temporary(type, val)
# Walk the hierarchy to see if we can coerce from a parent type
return self.parent_type.coerce_to(compiler, container, type)
return None
class StructTypeMeta(CBLTypeMeta, StructuredType):
def __init__(self, the_type):
StructuredType.__init__(self)
CBLTypeMeta.__init__(self, the_type)
def create_meta(self, compiler, namehint):
super().create_meta(compiler, namehint)
for name, type in self.get_var_members().items():
sym = compiler.scope.declare_symbol(name, type)
self._meta_instance[name] = sym
| StarcoderdataPython |
3215582 | <gh_stars>0
# Copyright (c) The Libra Core Contributors
# SPDX-License-Identifier: Apache-2.0
from jwcrypto.common import base64url_encode
from cryptography.exceptions import InvalidSignature
from jwcrypto import jwk, jws
import json
class OffChainInvalidSignature(Exception):
pass
class IncorrectInputException(Exception):
pass
class ComplianceKey:
def __init__(self, key):
''' Creates a compliance key from a JWK Ed25519 key. '''
self._key = key
def get_public(self):
return self._key.get_op_key('verify')
def get_private(self):
return self._key.get_op_key('sign')
@staticmethod
def generate():
''' Generate an Ed25519 key pair for EdDSA '''
key = jwk.JWK.generate(kty='OKP', crv='Ed25519')
return ComplianceKey(key)
@staticmethod
def from_str(data):
''' Generate a compliance key from a JWK JSON string. '''
key = jwk.JWK(**json.loads(data))
return ComplianceKey(key)
@staticmethod
def from_pub_bytes(pub_key_data):
''' Generate a compliance public key (for verification) from
32 bytes of Ed25519 key. '''
key = jwk.JWK(
kty='OKP',
crv='Ed25519',
x=base64url_encode(pub_key_data)
)
return ComplianceKey(key)
@staticmethod
def from_pem(filename, password=None):
raise NotImplementedError
#with open(filename, 'rb') as pemfile:
# return jwk.JWK.from_pem(pemfile.read(), password=password)
def to_pem(self, filename, private_key=False, password=None):
data = self._key.export_to_pem(
private_key=private_key, password=password
)
with open(filename, 'wb') as pemfile:
pemfile.write(data)
def export_pub(self):
return self._key.export_public()
def export_full(self):
return self._key.export_private()
def sign_message(self, payload):
signer = jws.JWS(payload.encode('utf-8'))
signer.add_signature(self._key, alg='EdDSA')
sig = signer.serialize(compact=True)
return sig
def verify_message(self, signature):
try:
verifier = jws.JWS()
verifier.deserialize(signature)
verifier.verify(self._key, alg='EdDSA')
return verifier.payload.decode("utf-8")
except jws.InvalidJWSSignature:
raise OffChainInvalidSignature(signature)
def thumbprint(self):
return self._key.thumbprint()
def __eq__(self, other):
if not isinstance(other, ComplianceKey):
return False
return self._key.has_private == other._key.has_private \
and self._key.thumbprint() == other._key.thumbprint()
def sign_ref_id(self, reference_id_bytes, libra_address_bytes, value_u64):
""" Sign the reference_id and associated data required for the recipient
signature using the complance key.
Params:
reference_id_bytes (bytes): the bytes of the reference_id.
libra_address_bytes (bytes): the 16 bytes of the Libra Blockchain address
value_u64 (int): a unsigned integer of the value.
Returns the hex encoded string ed25519 signature (64 x 2 char).
"""
msg_b = encode_ref_id_data(reference_id_bytes, libra_address_bytes, value_u64)
priv = self._key._get_private_key()
return priv.sign(msg_b).hex()
def verify_ref_id(self, reference_id_bytes, libra_address_bytes, value_u64, signature):
""" Verify the reference_id and associated data sgnature from a recipient. Parameters
are the same as for sign_ref_id, with the addition of the signature in hex format
as returned by sign_ref_id. """
msg_b = encode_ref_id_data(reference_id_bytes, libra_address_bytes, value_u64)
pub = self._key._get_public_key()
try:
pub.verify(bytes.fromhex(signature), msg_b)
except InvalidSignature:
raise OffChainInvalidSignature(reference_id_bytes, libra_address_bytes, value_u64, signature)
def encode_ref_id_data(reference_id_bytes, libra_address_bytes, value_u64):
if len(libra_address_bytes) != 16:
raise IncorrectInputException('Libra Address raw format is 16 bytes.')
message = b''
message += reference_id_bytes
message += libra_address_bytes
message += value_u64.to_bytes(8, byteorder='little')
domain_sep = b'@@$$LIBRA_ATTEST$$@@'
message += domain_sep
return message
| StarcoderdataPython |
4823393 | import os
# f = open("E:\\My Codes\\Python Codes\\April 2021\\30-04-2021\\file2.txt", "a")
# Counts the number of letters in the string
# count = f.write("<NAME>\n")
# print(count)
# To read and write both at the same time
print(os.getcwd())
f = open("E:\\My Codes\\Python Codes\\April 2021\\30-04-2021\\file2.txt", "r+")
print(f.read())
f.write("Python")
# print(f.read())
| StarcoderdataPython |
3222208 |
from typing import Counter
class Node:
''''
THis the class is responsiple to create the Nodes
'''
def __init__(self, value=""):
self.value = value
self.next = None
def __add__(self, other):
return Node(self.value + other.value)
# def __str__(self,value) -> str:
# return value
def __str__(self):
return str(self.value)
class LinkedList():
'''
This Class Responsable For Creating LinkdeList
'''
def __init__(self):
self.head = None
def insert(self, value):
node = Node(value)
# print(node)
if self.head:
node.next = self.head
self.head = node
def includes(self,vlaue):
current=self.head
while current :
# print(current.value)
if vlaue ==current.value:
return True
current=current.next
return False
def append(self,value):
current=Node(value)
last=self.head
while current:
if last.next==None:
last.next=current
break
last=last.next
# def insert_before(self,old,value):
# current=Node(old)
# last=self.head
# temp=last.next
# if str(current)==str(last):
# self.insert(value)
# return
# while last.next:
# if str(current)==str(temp):
# last.next=Node(value)
# last=last.next
# last.next=temp
# last=last.next
# temp=last.next
def insert_before(self,flage,new_value):
head=self.head
nextv=self.head
node=Node(new_value)
if str(flage)==str(self.head):
self.insert(new_value)
return
while flage!=nextv.value:
head=nextv
nextv=nextv.next
head.next=node
node.next=nextv
def insert_after(self,old_value,value):
node=Node(value)
current=self.head
temp=self.head
while current.next!=None:
if current.value==old_value:
temp=temp.next
current.next=node
current=current.next
current.next=temp
return
current=current.next
temp=temp.next
self.append(value)
def __len__(self):
counter = 0
current = self.head
while current:
counter += 1
current = current.next
return counter
def __str__(self):
string = ""
current = self.head
while current:
string += f"{str(current.value)} -> "
# print(current.next)
current = current.next
string += "None"
return string
def __iter__(self):
# new_list = []
current = self.head
while current:
yield current.value
current = current.next
# return new_list
def __repr__(self):
return "LinkedList()"
def kthFromEnd(self,num):
num1=len(self)-1
if num1<num:
return ("out of range")
num_of_loop=(len(self)-num)-1
current=self.head
while num_of_loop >0 :
# print(num_of_loop)
current=current.next
num_of_loop -=1
return current.value
# def zipLists(list1, list2):
# head_L1=list1.head
# next_l1=head_L1.next
# head_L2=list2.head
# next_l2=head_L2.next
# conter=3
# for i in range(10):
# head_L1.next=head_L2
# head_L2.next=next_l1
# next_l1.next=next_l2
# # next_l2.next=
# print(list1)
def zipLists(list1, list2):
current1 = list1.head
current2 = list2.head
if current1 == None or current2 == None:
if current1:
return list1.__str__()
elif current2:
return list2.__str__()
else:
return "Linked lists are both Empty "
zip_list = []
while current1 or current2:
if(current1):
zip_list+=[current1.value]
current1 = current1.next
if(current2):
zip_list+=[current2.value]
current2 = current2.next
insertion_values=''
for item in zip_list:
insertion_values+=f'{item}-> '
insertion_values+='None'
return insertion_values
def swap_head(link_list):
while link_list.head.next.next:
curent=link_list.head
next1=link_list.head.next.next
link_list.head=next1
link_list.head.next=curent.next
curent.next=next1.next
curent.next.next=curent
return link_list
# def reverse(linklist,pre=None):
# def _re
# if linklist.head:
# if linklist.head.next:
# global next1=linklist.head.next
# linklist.head.next=pre
# # print(pre)
# pre=linklist.head
# # print(pre)
# linklist.head=next1
# reverse (linklist,pre)
# linklist.head=pre
# return linklist
if __name__ == "__main__":
ll = LinkedList()
test_node=Node(5)
ss=LinkedList()
ll.insert(5)
ll.insert(4)
ll.insert(3)
ll.insert(2)
ll.insert(1)
ss.insert(555)
ss.insert(1)
ss.insert(2)
ss.insert(3)
print(ll)
reverse(ll)
# reverse(ll)
# print(ss)
print(ll)
# print(swap_head(ss))
# ll.insert_before(3,555555555555555)
# print(f'{ll.head} if the Length')
# ll.insert(71)
| StarcoderdataPython |
3224371 | from asyncio import gather
from datetime import datetime, timezone
from sanic import Blueprint
from sanic.request import Request
from sanic.response import HTTPResponse, json
from vxwhatsapp import config
from vxwhatsapp.auth import validate_hmac
from vxwhatsapp.claims import store_conversation_claim
from vxwhatsapp.models import Event, Message
from vxwhatsapp.schema import validate_schema, whatsapp_webhook_schema
bp = Blueprint("whatsapp", version=1)
async def publish_message(request, message):
return await gather(
request.app.publisher.publish_message(message),
store_conversation_claim(
request.app.redis,
request.headers.get("X-Turn-Claim"),
message.from_addr,
),
)
async def dedupe_and_publish_message(request, message):
if not request.app.redis:
return await publish_message(request, message)
lock_key = f"msglock:{message.message_id}"
seen_key = f"msgseen:{message.message_id}"
lock = request.app.redis.lock(lock_key, timeout=1.0, blocking_timeout=2.0)
async with lock:
if await request.app.redis.get(seen_key) is not None:
return
await publish_message(request, message)
await request.app.redis.setex(seen_key, config.DEDUPLICATION_WINDOW, "")
@bp.route("/webhook", methods=["POST"])
@validate_hmac("X-Turn-Hook-Signature", lambda: config.HMAC_SECRET)
@validate_schema(whatsapp_webhook_schema)
async def whatsapp_webhook(request: Request) -> HTTPResponse:
tasks = []
for msg in request.json.get("messages", []):
if msg["type"] == "system":
# Ignore system messages
continue
timestamp = datetime.fromtimestamp(float(msg.pop("timestamp")), tz=timezone.utc)
content = None
if msg["type"] == "text":
content = msg.pop("text")["body"]
elif msg["type"] == "location":
content = msg["location"].pop("name", None)
elif msg["type"] == "button":
content = msg["button"].pop("text")
elif msg["type"] == "interactive":
if msg["interactive"]["type"] == "list_reply":
content = msg["interactive"]["list_reply"].pop("title")
else:
content = msg["interactive"]["button_reply"].pop("title")
elif msg["type"] in ("unknown", "contacts"):
content = None
else:
content = msg[msg["type"]].pop("caption", None)
message = Message(
to_addr=config.WHATSAPP_NUMBER,
from_addr=msg.pop("from"),
content=content,
in_reply_to=msg.get("context", {}).pop("id", None),
transport_name=config.TRANSPORT_NAME,
transport_type=Message.TRANSPORT_TYPE.HTTP_API,
timestamp=timestamp,
message_id=msg.pop("id"),
to_addr_type=Message.ADDRESS_TYPE.MSISDN,
from_addr_type=Message.ADDRESS_TYPE.MSISDN,
transport_metadata={
"contacts": request.json.get("contacts"),
"message": msg,
"claim": request.headers.get("X-Turn-Claim"),
},
)
tasks.append(dedupe_and_publish_message(request, message))
for ev in request.json.get("statuses", []):
message_id = ev.pop("id")
status = ev["status"]
event_type, delivery_status = {
"read": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
"delivered": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
"ack": (Event.EVENT_TYPE.ACK, None),
"failed": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.FAILED,
),
"deleted": (
Event.EVENT_TYPE.DELIVERY_REPORT,
Event.DELIVERY_STATUS.DELIVERED,
),
}[status]
timestamp = datetime.fromtimestamp(float(ev.pop("timestamp")), tz=timezone.utc)
event = Event(
user_message_id=message_id,
event_type=event_type,
timestamp=timestamp,
sent_message_id=message_id,
delivery_status=delivery_status,
helper_metadata=ev,
)
tasks.append(request.app.publisher.publish_event(event))
await gather(*tasks)
return json({})
| StarcoderdataPython |
1713296 | <filename>test/programytest/oob/test_default.py
import unittest
from programy.oob.default import DefaultOutOfBandProcessor
import xml.etree.ElementTree as ET
from programy.context import ClientContext
from programytest.aiml_tests.client import TestClient
class DefaultOutOfBandProcessorTests(unittest.TestCase):
def setUp(self):
client = TestClient()
self._client_context = client.create_client_context("testid")
def test_processor(self):
oob_processor = DefaultOutOfBandProcessor()
self.assertIsNotNone(oob_processor)
self.assertEqual("", oob_processor.execute_oob_command(self._client_context))
oob_content = ET.fromstring("<something>process</something>")
self.assertEqual("", oob_processor.process_out_of_bounds(self._client_context, oob_content)) | StarcoderdataPython |
3329857 | # Credit to GPFlow.
import tensorflow as tf
import numpy as np
class Gaussian(object):
def logdensity(self, x, mu, var):
return -0.5 * (np.log(2 * np.pi) + tf.log(var) + tf.square(mu-x) / var)
def __init__(self, variance=1.0, **kwargs):
self.variance = tf.exp(tf.Variable(np.log(variance), dtype=tf.float64, name='lik_log_variance'))
def logp(self, F, Y):
return self.logdensity(Y, F, self.variance)
def conditional_mean(self, F):
return tf.identity(F)
def conditional_variance(self, F):
return tf.fill(tf.shape(F), tf.squeeze(self.variance))
def predict_mean_and_var(self, Fmu, Fvar):
return tf.identity(Fmu), Fvar + self.variance
def predict_density(self, Fmu, Fvar, Y):
return self.logdensity(Y, Fmu, Fvar + self.variance)
def variational_expectations(self, Fmu, Fvar, Y):
return -0.5 * np.log(2 * np.pi) - 0.5 * tf.log(self.variance) \
- 0.5 * (tf.square(Y - Fmu) + Fvar) / self.variance
class MultiClassInvLink(object):
# From https://github.com/gpflow/gpflow
def __init__(self, num_classes):
self.epsilon = 1e-3
self.num_classes = num_classes
self.epsilon_k1 = self.epsilon / (self.num_classes - 1.0)
def __call__(self, Fmu):
return tf.one_hot(Fmu.argmax(axis=1), self.num_classes, 1 - self.epsilon, self.epsilon)
def prob_is_largest(self, Y, mu, var, gh_x, gh_w):
float_type = mu.dtype
Y = tf.cast(Y, tf.int64)
# work out what the mean and variance is of the indicated latent function.
oh_on = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 1., 0.), float_type)
mu_selected = tf.reduce_sum(oh_on * mu, 1)
var_selected = tf.reduce_sum(oh_on * var, 1)
# generate Gauss Hermite grid
X = tf.reshape(mu_selected, (-1, 1)) + gh_x * tf.reshape(
tf.sqrt(tf.clip_by_value(2. * var_selected, 1e-10, np.inf)), (-1, 1))
# compute the CDF of the Gaussian between the latent functions and the grid (including the selected function)
dist = (tf.expand_dims(X, 1) - tf.expand_dims(mu, 2)) / tf.expand_dims(
tf.sqrt(tf.clip_by_value(var, 1e-10, np.inf)), 2)
cdfs = 0.5 * (1.0 + tf.erf(dist / np.sqrt(2.0)))
cdfs = cdfs * (1 - 2e-4) + 1e-4
# blank out all the distances on the selected latent function
oh_off = tf.cast(tf.one_hot(tf.reshape(Y, (-1,)), self.num_classes, 0., 1.), float_type)
cdfs = cdfs * tf.expand_dims(oh_off, 2) + tf.expand_dims(oh_on, 2)
# take the product over the latent functions, and the sum over the GH grid.
return tf.matmul(tf.reduce_prod(cdfs, reduction_indices=[1]), tf.reshape(gh_w / np.sqrt(np.pi), (-1, 1)))
class MultiClass(object):
# Modified from https://github.com/gpflow/gpflow
def __init__(self, num_classes):
self.num_classes = num_classes
self.gauss_points = 20
self.invlink = MultiClassInvLink(num_classes)
def logp(self, F, Y):
# F: N x D
# Y: N x 1
y_hat = F.argmax(axis=1)
correct = tf.equal(y_hat[:, None], Y)
Y_shape = tf.shape(Y)
ones = tf.ones(Y_shape, dtype=tf.float64) - self.invlink.epsilon
zeros = tf.zeros(Y_shape, dtype=tf.float64) + self.invlink.epsilon_k1
p = tf.where(correct, ones, zeros)
return tf.log(p)
def _predict_mean(self, Fmu, Fvar):
possible_outputs = [tf.fill(tf.stack([tf.shape(Fmu)[0], 1]), np.array(i, dtype=np.int64)) for i in
range(self.num_classes)]
ps = [self._density(Fmu, Fvar, po) for po in possible_outputs]
ps = tf.transpose(tf.stack([tf.reshape(p, (-1,)) for p in ps]))
return ps
def _density(self, Fmu, Fvar, Y):
gauss_points, gauss_weights = np.polynomial.hermite.hermgauss(self.gauss_points)
p = self.invlink.prob_is_largest(Y, Fmu, Fvar, gauss_points, gauss_weights)
return p * (1.0 - self.invlink.epsilon) + (1.0 - p) * self.invlink.epsilon_k1
def predict_density(self, Fmu, Fvar, Y):
return tf.log(self._density(Fmu, Fvar, Y))
def conditional_mean(self, Fmu):
return self.invlink(Fmu)
def predict_mean_and_var(self, Fmu, Fvar):
mean = self._predict_mean(Fmu, Fvar)
return mean, mean - tf.square(mean)
| StarcoderdataPython |
1792721 | <filename>byte_api/client.py
from .api import Api
from .types import *
class Client(object):
"""
Initializes the API for the client
:param token: Authorization token
:type token: str
:param headers: Additional headers **except Authorization**
:type headers: dict
"""
def __init__(self, token: str, headers=None):
"""
Initializes the API for the client
:param token: Authorization token
:type token: str
"""
self.api = Api(token, headers)
def follow(self, id: str) -> Response:
"""
Subscribes to a user
:param id: User id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.put('account/id/{}/follow'.format(id))
return Response.de_json(response)
def unfollow(self, id: str) -> Response:
"""
Unsubscribes to a user
:param id: User id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.delete('account/id/{}/follow'.format(id))
return Response.de_json(response)
def get_user(self, id: str) -> Response:
"""
Gets a user profile
:param id: User id
:type id: str
:rtype: :class:`Response`, :class:`Account`
"""
response = self.api.get('account/id/{}'.format(id))
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Account.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def like(self, id: str) -> Response:
"""
Likes a byte
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.put('post/id/{}/feedback/like'.format(id))
return Response.de_json(response)
def dislike(self, id: str) -> Response:
"""
Removes like from a byte
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.delete('post/id/{}/feedback/like'.format(id))
return Response.de_json(response)
def comment(self, id: str, text: str) -> Response:
"""
Comments a byte
:param id: Byte (post) id
:type id: str
:param text: Comment text
:type id: str
:rtype: :class:`Response`, :class:`Comment`
"""
response = self.api.post('post/id/{}/feedback/comment'.format(id),
json_data={
'postID': id,
'body': text
})
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Comment.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def delete_comment(self, id: str) -> Response:
"""
Deletes a comment
:param id: Comment id patterned by **{post id}-{comment id}**
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('feedback/comment/id/{}'.format(id),
json_data={
'commentID': id
})
response = Response.de_json(response)
return Response.de_json(response)
def loop(self, id: str) -> Response:
"""
Increments loop counter
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('post/id/{}/loop'.format(id))
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = LoopCounter.de_json(response.data)
if hasattr(response, 'error'):
error = Error.de_json(response.error)
return Response(response.success, data=data, error=error)
def rebyte(self, id: str) -> Response:
"""
Increments loop counter
:param id: Byte (post) id
:type id: str
:rtype: :class:`Response`
"""
response = self.api.post('rebyte',
json_data={
'postID': id
})
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Rebyte.de_json(response.data)
if hasattr(response, 'error'):
error = response.error
return Response(response.success, data=data, error=error)
def get_colors(self) -> Response:
"""
Gets available color schemes
:rtype: :class:`Response`
"""
response = self.api.get('account/me/colors')
response = Response.de_json(response)
data = None
error = None
if hasattr(response, 'data'):
data = Colors.de_json(response.data)
if hasattr(response, 'error'):
error = response.error
return Response(response.success, data=data, error=error)
def set_info(self, bio: str = None, display_name: str = None,
username: str = None, color_scheme: int = None) -> Response:
"""
Sets profile info
:param bio: New bio
:type bio: str
:param display_name: New name to display
:type display_name: str
:param username: New username
:type username: str
:param color_scheme: Id of new color scheme
:type color_scheme: int
:rtype: :class:`Response`
"""
data = {}
if bio:
data['bio'] = bio
if display_name:
data['displayName'] = display_name
if username:
data['username'] = username
if color_scheme:
data['colorScheme'] = color_scheme
response = self.api.put('account/me',
data=data)
return Response.de_json(response)
def set_username(self, username: str) -> Response:
"""
Sets username
:param username: New username
:type username: str
:rtype: :class:`Response`
"""
return self.set_info(username=username)
def set_bio(self, bio: str) -> Response:
"""
Sets bio
:param bio: New bio
:type bio: str
:rtype: :class:`Response`
"""
return self.set_info(bio=bio)
def set_display_name(self, display_name: str) -> Response:
"""
Sets name to display
:param display_name: New name to display
:type display_name: str
:rtype: :class:`Response`
"""
return self.set_info(display_name=display_name)
def set_color_scheme(self, color_scheme: int) -> Response:
"""
Sets color scheme
:param color_scheme: Id of new color scheme
:type color_scheme: str
:rtype: :class:`Response`
"""
return self.set_info(color_scheme=color_scheme)
| StarcoderdataPython |
103222 | #!/usr/bin/env python3
import argparse
import tempfile
import logging
import difflib
import glob
import json
import sys
import os
import re
from pprint import pformat
from .counter import PapersForCount, SenateCounter
from .aecdata import CandidateList, SenateATL, SenateBTL, FormalPreferences
from .common import logger
from .results import JSONResults
class SenateCountPost2015:
disable_bulk_exclusions = True
def __init__(self, state_name, get_input_file, **kwargs):
self.candidates = CandidateList(state_name,
get_input_file('all-candidates'),
get_input_file('senate-candidates'))
self.tickets_for_count = PapersForCount()
self.s282_candidates = kwargs.get('s282_candidates')
self.s282_method = kwargs.get('s282_method')
self.max_ballots = kwargs['max_ballots'] if 'max_ballots' in kwargs else None
self.remove_candidates = None
self.remove_method = kwargs.get('remove_method')
remove = kwargs.get('remove_candidates')
if remove:
self.remove_candidates = [self.candidates.get_candidate_id(*t) for t in remove]
def atl_flow(form):
by_pref = {}
for pref, group in zip(form, self.candidates.groups):
if pref is None:
continue
if pref not in by_pref:
by_pref[pref] = []
by_pref[pref].append(group)
prefs = []
for i in range(1, len(form) + 1):
at_pref = by_pref.get(i)
if not at_pref or len(at_pref) != 1:
break
the_pref = at_pref[0]
for candidate in the_pref.candidates:
candidate_id = candidate.candidate_id
prefs.append(candidate_id)
if not prefs:
return None
return prefs
def btl_flow(form):
by_pref = {}
for pref, candidate in zip(form, self.candidates.candidates):
if pref is None:
continue
if pref not in by_pref:
by_pref[pref] = []
by_pref[pref].append(candidate.candidate_id)
prefs = []
for i in range(1, len(form) + 1):
at_pref = by_pref.get(i)
if not at_pref or len(at_pref) != 1:
break
candidate_id = at_pref[0]
prefs.append(candidate_id)
# must have unique prefs for 1..6, or informal
if len(prefs) < 6:
return None
return prefs
def resolve_non_s282(atl, btl):
"resolve the formal form from ATL and BTL forms. BTL takes precedence, if formal"
return btl_flow(btl) or atl_flow(atl)
def resolve_s282_restrict_form(atl, btl):
"resolve the formal form as for resolve_non_s282, but restrict to s282 candidates"
expanded = btl_flow(btl) or atl_flow(atl)
restricted = [candidate_id for candidate_id in expanded if candidate_id in self.s282_candidates]
if len(restricted) == 0:
return None
return restricted
def resolve_remove_candidates(atl, btl, min_candidates):
"resolve the formal form, removing the listed candidates from eligibiity"
restricted = None
btl_expanded = btl_flow(btl)
if btl_expanded:
restricted = [candidate_id for candidate_id in btl_expanded if candidate_id not in self.remove_candidates]
if min_candidates is not None and len(restricted) < min_candidates:
restricted = None
if restricted is None:
atl_expanded = atl_flow(atl)
if atl_expanded:
restricted = [candidate_id for candidate_id in atl_expanded if candidate_id not in self.remove_candidates]
if len(restricted) == 0:
restricted = None
return restricted
def resolve_s282_restrict_form_with_savings(atl, btl):
"resolve the formal form as for resolve_non_s282, but restrict to s282 candidates"
restricted = None
# if we were formal BTL in a non-s282 count, restrict the form. if at least one
# preference, we're formal
btl_expanded = btl_flow(btl)
if btl_expanded:
restricted = [candidate_id for candidate_id in btl_expanded if candidate_id in self.s282_candidates]
if len(restricted) == 0:
restricted = None
# if, before or after restriction, we are not formal BTL, try restricting the ATL form
if restricted is None:
atl_expanded = atl_flow(atl)
if atl_expanded:
restricted = [candidate_id for candidate_id in atl_expanded if candidate_id in self.s282_candidates]
if len(restricted) == 0:
restricted = None
return restricted
atl_n = len(self.candidates.groups)
btl_n = len(self.candidates.candidates)
assert(atl_n > 0 and btl_n > 0)
informal_n = 0
n_ballots = 0
resolution_fn = resolve_non_s282
if self.s282_candidates:
if self.s282_method == 'restrict_form':
resolution_fn = resolve_s282_restrict_form
elif self.s282_method == 'restrict_form_with_savings':
resolution_fn = resolve_s282_restrict_form_with_savings
else:
raise Exception("unknown s282 method: `%s'" % (self.s282_method))
if self.remove_candidates:
if self.remove_method == 'relaxed':
resolution_fn = lambda atl, btl: resolve_remove_candidates(atl, btl, None)
elif self.remove_method == 'strict':
resolution_fn = lambda atl, btl: resolve_remove_candidates(atl, btl, 6)
# the (extremely) busy loop reading preferences and expanding them into
# forms to be entered into the count
for raw_form, count in FormalPreferences(get_input_file('formal-preferences')):
if self.max_ballots and n_ballots >= self.max_ballots:
break
atl = raw_form[:atl_n]
btl = raw_form[atl_n:]
form = resolution_fn(atl, btl)
if form is not None:
self.tickets_for_count.add_ticket(tuple(form), count)
else:
informal_n += count
n_ballots += count
# slightly paranoid check, but outside the busy loop
assert(len(raw_form) == atl_n + btl_n)
if informal_n > 0:
logger.info("%d ballots are informal and were excluded from the count" % (informal_n))
def get_papers_for_count(self):
return self.tickets_for_count
def get_candidate_ids(self):
candidate_ids = [c.candidate_id for c in self.candidates.candidates]
if self.s282_candidates:
candidate_ids = [t for t in candidate_ids if t in self.s282_candidates]
if self.remove_candidates:
candidate_ids = [t for t in candidate_ids if t not in self.remove_candidates]
return candidate_ids
def get_parties(self):
return dict((c.party_abbreviation, c.party_name)
for c in self.candidates.candidates)
def get_candidate_title(self, candidate_id):
c = self.candidates.candidate_by_id[candidate_id]
return "{}, {}".format(c.surname, c.given_name)
def get_candidate_order(self, candidate_id):
return self.candidates.candidate_by_id[candidate_id].candidate_order
def get_candidate_party(self, candidate_id):
return self.candidates.candidate_by_id[candidate_id].party_abbreviation
class SenateCountPre2015:
disable_bulk_exclusions = False
def __init__(self, state_name, get_input_file, **kwargs):
if 's282_recount' in kwargs:
raise Exception('s282 recount not implemented for pre2015 data')
self.candidates = CandidateList(state_name,
get_input_file('all-candidates'),
get_input_file('senate-candidates'))
self.atl = SenateATL(
state_name,
get_input_file('group-voting-tickets'),
get_input_file('first-preferences'))
self.btl = SenateBTL(get_input_file('btl-preferences'))
def load_tickets(ticket_obj):
if ticket_obj is None:
return
for form, n in ticket_obj.get_tickets():
self.tickets_for_count.add_ticket(form, n)
self.tickets_for_count = PapersForCount()
load_tickets(self.atl)
load_tickets(self.btl)
def get_papers_for_count(self):
return self.tickets_for_count
def get_candidate_ids(self):
return [c.candidate_id for c in self.candidates.candidates]
def get_parties(self):
return dict((c.party_abbreviation, c.party_name)
for c in self.candidates.candidates)
def get_candidate_title(self, candidate_id):
c = self.candidates.candidate_by_id[candidate_id]
return "{}, {}".format(c.surname, c.given_name)
def get_candidate_order(self, candidate_id):
return self.candidates.candidate_by_id[candidate_id].candidate_order
def get_candidate_party(self, candidate_id):
return self.candidates.candidate_by_id[candidate_id].party_abbreviation
def verify_test_logs(verified_dir, test_log_dir):
test_re = re.compile(r'^round_(\d+)\.json')
rounds = []
for fname in os.listdir(verified_dir):
m = test_re.match(fname)
if m:
rounds.append(int(m.groups()[0]))
def fname(d, r):
return os.path.join(d, 'round_%d.json' % r)
def getlog(d, r):
try:
with open(fname(d, r)) as fd:
return json.load(fd)
except FileNotFoundError:
return {}
ok = True
for idx in sorted(rounds):
v = getlog(verified_dir, idx)
t = getlog(test_log_dir, idx)
if v != t:
logger.error("Round %d: FAIL" % (idx))
logger.error("Log should be:")
logger.error(pformat(v))
logger.error("Log is:")
logger.error(pformat(t))
logger.error("Diff:")
logger.error(
'\n'.join(
difflib.unified_diff(
pformat(v).split('\n'),
pformat(t).split('\n'))))
ok = False
else:
logger.debug("Round %d: OK" % (idx))
if ok and len(rounds) > 0:
for fname in os.listdir(test_log_dir):
if test_re.match(fname):
os.unlink(os.path.join(test_log_dir, fname))
os.rmdir(test_log_dir)
return ok
def read_config(config_file):
with open(config_file) as fd:
return json.load(fd)
def cleanup_json(out_dir):
for fname in glob.glob(out_dir + '/*.json'):
logger.debug("cleanup: removing `%s'" % (fname))
os.unlink(fname)
def write_angular_json(config, out_dir):
json_f = os.path.join(out_dir, 'count.json')
with open(json_f, 'w') as fd:
obj = {
'title': config['title']
}
obj['counts'] = [{
'name': count['name'],
'state': count['state'],
'description': count['description'],
'path': count['shortname']}
for count in config['count']]
json.dump(obj, fd, sort_keys=True, indent=4, separators=(',', ': '))
def get_data(input_cls, base_dir, count, **kwargs):
aec_data = count['aec-data']
def input_file(name):
return os.path.join(base_dir, aec_data[name])
return input_cls(count['state'], input_file, **kwargs)
class AutomationException(Exception):
pass
class Automation:
def __init__(self, name, automation_data, count_data):
"""
name: name of this automation instance, for logging
automation_data: list of questions and responses [ [ question, response ], ... ]
"""
self._name = name
self._data = automation_data
self._count_data = count_data
self._upto = 0
def _qstr(self, question):
"we need to cope with a list, or a list of lists"
parts = []
for entry in question:
if type(entry) is list:
parts.append(self._qstr(entry))
else:
parts.append('"%s"<%d>' % (self._count_data.get_candidate_title(entry), entry))
return ', '.join(parts)
def create_callback(self):
"""
create a callback, suitable to be passed to SenateCounter
"""
def __callback(question_posed):
logger.debug("%s: asked to choose between: %s" % (self._name, self._qstr(question_posed)))
if self._upto == self._data:
logger.error("%s: out of automation data, requested to pick between %s" % (self._name, self._qstr(question_posed)))
raise AutomationException("out of automation data")
question_archived, answer = self._data[self._upto]
if question_archived != question_posed:
logger.error("%s: automation data mismatch, expected question `%s', got question `%s'" % (self._name, self._qstr(question_archived), self._qstr(question_posed)))
resp = question_posed.index(answer)
self._upto += 1
return resp
return __callback
def check_complete(self):
if self._upto != len(self._data):
logger.error("%s: not all automation data was consumed (upto %d/%d)" % (self._name, self._upto, len(self._data)))
return False
return True
def json_count_path(out_dir, shortname):
return os.path.join(out_dir, shortname + '.json')
def get_outcome(count, count_data, base_dir, out_dir):
test_logs_okay = True
test_log_dir = None
if 'verified' in count:
test_log_dir = tempfile.mkdtemp(prefix='dividebatur_tmp')
logger.debug("test logs are written to: %s" % (test_log_dir))
outf = json_count_path(out_dir, count['shortname'])
logger.info("counting `%s'. output written to `%s'" % (count['name'], outf))
result_writer = JSONResults(
outf,
test_log_dir,
count_data.get_candidate_ids(),
count_data.get_parties(),
count_data.get_candidate_order,
count_data.get_candidate_title,
count_data.get_candidate_party,
name=count.get('name'),
description=count.get('description'),
house=count['house'],
state=count['state'])
disable_bulk_exclusions = count.get('disable_bulk_exclusions', count_data.disable_bulk_exclusions)
logger.debug("disable bulk exclusions: %s" % (disable_bulk_exclusions))
election_order_auto = Automation('election order', count['election_order_ties'], count_data)
exclusion_tie_auto = Automation('exclusion tie', count['exclusion_ties'], count_data)
election_tie_auto = Automation('election tie', count['election_ties'], count_data)
counter = SenateCounter(
result_writer,
count['vacancies'],
count_data.get_papers_for_count(),
election_order_auto.create_callback(),
exclusion_tie_auto.create_callback(),
election_tie_auto.create_callback(),
count_data.get_candidate_ids(),
count_data.get_candidate_order,
disable_bulk_exclusions)
counter.run()
if any(not t.check_complete() for t in (election_order_auto, exclusion_tie_auto, election_tie_auto)):
logger.error("** Not all automation data consumed. Failed. **")
sys.exit(1)
if test_log_dir is not None:
if not verify_test_logs(os.path.join(base_dir, count['verified']), test_log_dir):
test_logs_okay = False
if not test_logs_okay:
logger.error("** TESTS FAILED **")
sys.exit(1)
return (outf, result_writer.summary())
def get_input_method(format):
# determine the counting method
if format == 'AusSenatePre2015':
return SenateCountPre2015
elif format == 'AusSenatePost2015':
return SenateCountPost2015
def check_counting_method_valid(method_cls, data_format):
if method_cls is None:
raise Exception("unsupported AEC data format '%s' requested" % (data_format))
def s282_options(out_dir, count, written):
options = {}
s282_config = count.get('s282')
if not s282_config:
return options
options = {
's282_method': s282_config['method']
}
shortname = s282_config['recount_from']
if not shortname:
return options
fname = json_count_path(out_dir, shortname)
if fname not in written:
logger.error("error: `%s' needed for s282 recount has not been calculated during this dividebatur run." % (fname))
sys.exit(1)
with open(fname) as fd:
data = json.load(fd)
options['s282_candidates'] = [t['id'] for t in data['summary']['elected']]
return options
def remove_candidates_options(count):
options = {}
remove = count.get('remove')
if remove is None:
return options
options['remove_candidates'] = remove['candidates']
options['remove_method'] = remove['method']
return options
def check_config(config):
"basic checks that the configuration file is valid"
shortnames = [count['shortname'] for count in config['count']]
if len(shortnames) != len(set(shortnames)):
logger.error("error: duplicate `shortname' in count configuration.")
return False
return True
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
'-q', '--quiet',
action='store_true', help="Disable informational output")
parser.add_argument(
'-v', '--verbose',
action='store_true', help="Enable debug output")
parser.add_argument(
'--max-ballots',
type=int, help="Maximum number of ballots to read")
parser.add_argument(
'--only',
type=str, help="Only run the count with this shortname")
parser.add_argument(
'--only-verified',
action='store_true', help="Only run verified counts")
parser.add_argument(
'config_file',
type=str,
help='JSON config file for counts')
parser.add_argument(
'out_dir',
type=str,
help='Output directory')
return parser.parse_args()
def execute_counts(out_dir, config_file, only, only_verified, max_ballots=None):
base_dir = os.path.dirname(os.path.abspath(config_file))
config = read_config(config_file)
if not check_config(config):
return
# global config for the angular frontend
cleanup_json(out_dir)
write_angular_json(config, out_dir)
written = set()
for count in config['count']:
if only is not None and count['shortname'] != only:
continue
if only_verified and 'verified' not in count:
continue
aec_data_config = count['aec-data']
data_format = aec_data_config['format']
input_cls = get_input_method(data_format)
check_counting_method_valid(input_cls, data_format)
count_options = {}
count_options.update(s282_options(out_dir, count, written))
count_options.update(remove_candidates_options(count))
if max_ballots is not None:
count_options.update({'max_ballots': max_ballots})
logger.debug("reading data for count: `%s'" % (count['name']))
data = get_data(input_cls, base_dir, count, **count_options)
logger.debug("determining outcome for count: `%s'" % (count['name']))
outf, _ = get_outcome(count, data, base_dir, out_dir)
written.add(outf)
def main():
args = parse_args()
if args.quiet:
logger.setLevel(logging.ERROR)
elif args.verbose:
logger.setLevel(logging.DEBUG)
execute_counts(args.out_dir, args.config_file, args.only, args.only_verified, max_ballots=args.max_ballots)
if __name__ == '__main__':
main()
| StarcoderdataPython |
68650 | <gh_stars>0
# Define a Product class. Objects should have 3 variables for price, code, and quantity
class Product:
def __init__(self, price=0.00, code='aaaa', quantity=0):
self.price = price
self.code = code
self.quantity = quantity
def __repr__(self):
return f'Product({self.price!r}, {self.code!r}, {self.quantity!r})'
def __str__(self):
return f'The product code is: {self.code}'
# Define an inventory class and a function for calculating the total value of the inventory.
class Inventory:
def __init__(self):
self.products_list = []
def add_product(self, product):
self.products_list.append(product)
return self.products_list
def total_value(self):
return sum(product.price * product.quantity for product in self.products_list)
| StarcoderdataPython |
4826403 | #
# Copyright 2016 The BigDL Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import os
from unittest import TestCase
import numpy as np
import pytest
import logging
import time
import torch
import torch.nn as nn
from bigdl.orca import OrcaContext
from bigdl.orca.data.pandas import read_csv
from bigdl.orca.learn.metrics import Accuracy
from bigdl.dllib.nncontext import init_nncontext
from bigdl.orca.learn.pytorch import Estimator
from bigdl.orca.data import SparkXShards
from bigdl.orca.data.image.utils import chunks
np.random.seed(1337) # for reproducibility
resource_path = os.path.join(
os.path.realpath(os.path.dirname(__file__)), "../../../resources")
class LinearDataset(torch.utils.data.Dataset):
"""y = a * x + b"""
def __init__(self, size=1000):
X1 = torch.randn(size // 2, 50)
X2 = torch.randn(size // 2, 50) + 1.5
self.x = torch.cat([X1, X2], dim=0)
Y1 = torch.zeros(size // 2, 1)
Y2 = torch.ones(size // 2, 1)
self.y = torch.cat([Y1, Y2], dim=0)
def __getitem__(self, index):
return self.x[index, None], self.y[index, None]
def __len__(self):
return len(self.x)
class Net(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(50, 50)
self.relu1 = nn.ReLU()
self.dout = nn.Dropout(0.2)
self.fc2 = nn.Linear(50, 100)
self.prelu = nn.PReLU(1)
self.out = nn.Linear(100, 1)
self.out_act = nn.Sigmoid()
def forward(self, input_):
a1 = self.fc1(input_)
h1 = self.relu1(a1)
dout = self.dout(h1)
a2 = self.fc2(dout)
h2 = self.prelu(a2)
a3 = self.out(h2)
y = self.out_act(a3)
return y
class IdentityNet(nn.Module):
def __init__(self):
super().__init__()
# need this line to avoid optimizer raise empty variable list
self.fc1 = nn.Linear(50, 50)
def forward(self, input_):
return input_
class LinearModel(nn.Module):
def __init__(self):
super().__init__()
# need this line to avoid optimizer raise empty variable list
self.fc1 = nn.Linear(1, 1, bias=False)
self.fc1.weight.data.fill_(1.0)
def forward(self, input_):
return self.fc1(input_)
class MultiInputNet(nn.Module):
def __init__(self):
super().__init__()
self.fc1 = nn.Linear(50, 50)
self.out = nn.Linear(50, 1)
self.out_act = nn.Sigmoid()
def forward(self, input1, input2):
x = torch.cat((input1, input2), 1)
x = self.fc1(x)
x = self.out(x)
x = self.out_act(x)
return x
class SimpleModel(nn.Module):
def __init__(self):
super().__init__()
self.fc = nn.Linear(2, 1)
self.out_act = nn.Sigmoid()
def forward(self, input1, input2):
x = torch.stack((input1, input2), dim=1)
x = self.fc(x)
x = self.out_act(x).flatten()
return x
def train_data_loader(config, batch_size):
train_dataset = LinearDataset(size=config.get("data_size", 1000))
train_loader = torch.utils.data.DataLoader(
train_dataset,
batch_size=batch_size
)
return train_loader
def val_data_loader(config, batch_size):
val_dataset = LinearDataset(size=config.get("val_size", 400))
validation_loader = torch.utils.data.DataLoader(
val_dataset,
batch_size=batch_size
)
return validation_loader
def get_model(config):
torch.manual_seed(0)
return Net()
def get_optimizer(model, config):
return torch.optim.SGD(model.parameters(), lr=config.get("lr", 1e-2))
def get_zero_optimizer(model, config):
return torch.optim.SGD(model.parameters(), lr=0.0)
def get_estimator(workers_per_node=1, model_fn=get_model, sync_stats=False,
log_level=logging.INFO, loss=nn.BCELoss(), optimizer=get_optimizer):
estimator = Estimator.from_torch(model=model_fn,
optimizer=optimizer,
loss=loss,
metrics=Accuracy(),
config={"lr": 1e-2},
workers_per_node=workers_per_node,
backend="torch_distributed",
sync_stats=sync_stats,
log_level=log_level)
return estimator
class TestPyTorchEstimator(TestCase):
def test_data_creator(self):
estimator = get_estimator(workers_per_node=2)
start_val_stats = estimator.evaluate(val_data_loader, batch_size=64)
print(start_val_stats)
train_stats = estimator.fit(train_data_loader, epochs=4, batch_size=128)
print(train_stats)
end_val_stats = estimator.evaluate(val_data_loader, batch_size=64)
print(end_val_stats)
assert 0 < end_val_stats["Accuracy"] < 1
assert estimator.get_model()
# sanity check that training worked
dloss = end_val_stats["val_loss"] - start_val_stats["val_loss"]
dacc = (end_val_stats["Accuracy"] -
start_val_stats["Accuracy"])
print(f"dLoss: {dloss}, dAcc: {dacc}")
assert dloss < 0 < dacc, "training sanity check failed. loss increased!"
# Verify syncing weights, i.e. the two workers have the same weights after training
import ray
remote_workers = estimator.remote_workers
state_dicts = ray.get([worker.get_state_dict.remote() for worker in remote_workers])
weights = [state["models"] for state in state_dicts]
worker1_weights = weights[0][0]
worker2_weights = weights[1][0]
for layer in list(worker1_weights.keys()):
assert np.allclose(worker1_weights[layer].numpy(),
worker2_weights[layer].numpy())
estimator.shutdown()
def test_spark_xshards(self):
from bigdl.dllib.nncontext import init_nncontext
from bigdl.orca.data import SparkXShards
estimator = get_estimator(workers_per_node=1)
sc = init_nncontext()
x_rdd = sc.parallelize(np.random.rand(4000, 1, 50).astype(np.float32))
# torch 1.7.1+ requires target size same as output size, which is (batch, 1)
y_rdd = sc.parallelize(np.random.randint(0, 2, size=(4000, 1, 1)).astype(np.float32))
rdd = x_rdd.zip(y_rdd).map(lambda x_y: {'x': x_y[0], 'y': x_y[1]})
train_rdd, val_rdd = rdd.randomSplit([0.9, 0.1])
train_xshards = SparkXShards(train_rdd)
val_xshards = SparkXShards(val_rdd)
train_stats = estimator.fit(train_xshards, batch_size=256, epochs=2)
print(train_stats)
val_stats = estimator.evaluate(val_xshards, batch_size=128)
print(val_stats)
estimator.shutdown()
def test_dataframe_train_eval(self):
sc = init_nncontext()
rdd = sc.range(0, 100)
df = rdd.map(lambda x: (np.random.randn(50).astype(np.float).tolist(),
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
estimator = get_estimator(workers_per_node=2)
estimator.fit(df, batch_size=4, epochs=2,
feature_cols=["feature"],
label_cols=["label"])
estimator.evaluate(df, batch_size=4,
feature_cols=["feature"],
label_cols=["label"])
def test_dataframe_shard_size_train_eval(self):
from bigdl.orca import OrcaContext
OrcaContext._shard_size = 30
sc = init_nncontext()
rdd = sc.range(0, 100)
df = rdd.map(lambda x: (np.random.randn(50).astype(np.float).tolist(),
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
estimator = get_estimator(workers_per_node=2)
estimator.fit(df, batch_size=4, epochs=2,
feature_cols=["feature"],
label_cols=["label"])
estimator.evaluate(df, batch_size=4,
feature_cols=["feature"],
label_cols=["label"])
def test_partition_num_less_than_workers(self):
sc = init_nncontext()
rdd = sc.range(200, numSlices=1)
df = rdd.map(lambda x: (np.random.randn(50).astype(np.float).tolist(),
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
estimator = get_estimator(workers_per_node=2)
assert df.rdd.getNumPartitions() < estimator.num_workers
estimator.fit(df, batch_size=4, epochs=2,
feature_cols=["feature"],
label_cols=["label"])
estimator.evaluate(df, batch_size=4,
feature_cols=["feature"],
label_cols=["label"])
estimator.predict(df, feature_cols=["feature"]).collect()
def test_dataframe_predict(self):
sc = init_nncontext()
rdd = sc.parallelize(range(20))
df = rdd.map(lambda x: ([float(x)] * 5,
[int(np.random.randint(0, 2, size=()))])
).toDF(["feature", "label"])
estimator = get_estimator(workers_per_node=2,
model_fn=lambda config: IdentityNet())
result = estimator.predict(df, batch_size=4,
feature_cols=["feature"])
expr = "sum(cast(feature <> to_array(prediction) as int)) as error"
assert result.selectExpr(expr).first()["error"] == 0
def test_xshards_predict(self):
sc = init_nncontext()
rdd = sc.range(0, 110).map(lambda x: np.array([x]*50))
shards = rdd.mapPartitions(lambda iter: chunks(iter, 5)).map(lambda x: {"x": np.stack(x)})
shards = SparkXShards(shards)
estimator = get_estimator(workers_per_node=2,
model_fn=lambda config: IdentityNet())
result_shards = estimator.predict(shards, batch_size=4)
result = np.concatenate([shard["prediction"] for shard in result_shards.collect()])
expected_result = np.concatenate([shard["x"] for shard in result_shards.collect()])
assert np.array_equal(result, expected_result)
def test_pandas_dataframe(self):
OrcaContext.pandas_read_backend = "pandas"
file_path = os.path.join(resource_path, "orca/learn/ncf.csv")
data_shard = read_csv(file_path, usecols=[0, 1, 2], dtype={0: np.float32, 1: np.float32,
2: np.float32})
estimator = get_estimator(model_fn=lambda config: SimpleModel())
estimator.fit(data_shard, batch_size=2, epochs=2,
feature_cols=["user", "item"],
label_cols=["label"])
estimator.evaluate(data_shard, batch_size=2, feature_cols=["user", "item"],
label_cols=["label"])
result = estimator.predict(data_shard, batch_size=2, feature_cols=["user", "item"])
result.collect()
def test_multiple_inputs_model(self):
sc = init_nncontext()
rdd = sc.parallelize(range(100))
from pyspark.sql import SparkSession
spark = SparkSession(sc)
df = rdd.map(lambda x: ([float(x)] * 25, [float(x)] * 25,
[int(np.random.randint(0, 2, size=()))])
).toDF(["f1", "f2", "label"])
estimator = get_estimator(workers_per_node=2,
model_fn=lambda config: MultiInputNet())
estimator.fit(df, batch_size=4, epochs=2,
feature_cols=["f1", "f2"],
label_cols=["label"])
estimator.evaluate(df, batch_size=4,
feature_cols=["f1", "f2"],
label_cols=["label"])
result = estimator.predict(df, batch_size=4,
feature_cols=["f1", "f2"])
result.collect()
def test_sync_stats(self):
sc = init_nncontext()
rdd = sc.range(0, 100).repartition(2)
# the data and model are constructed that loss on worker 0 is always 0.0
# and loss on worker 1 is always 1.0
df = rdd.mapPartitionsWithIndex(lambda idx, iter: [([float(idx)], [0.0]) for _ in iter]
).toDF(["feature", "label"])
estimator = get_estimator(workers_per_node=2,
model_fn=lambda config: LinearModel(),
loss=nn.MSELoss(),
optimizer=get_zero_optimizer,
sync_stats=True)
stats = estimator.fit(df, batch_size=4, epochs=2,
feature_cols=["feature"],
label_cols=["label"],
reduce_results=False)
worker_0_stat0, worker_1_stats = stats[0]
for k in worker_0_stat0:
if k in {"num_samples"}:
continue
v0 = worker_0_stat0[k]
v1 = worker_1_stats[k]
error_msg = f"stats from all workers should be the same, " \
f"but got worker_0_stat0: {worker_0_stat0}, " \
f"worker_1_stats: {worker_1_stats}"
assert abs(v1 - v0) < 1e-6, error_msg
def test_not_sync_stats(self):
sc = init_nncontext()
rdd = sc.range(0, 100).repartition(2)
# the data and model are constructed that loss on worker 0 is always 0.0
# and loss on worker 1 is always 1.0
df = rdd.mapPartitionsWithIndex(lambda idx, iter: [([float(idx)], [0.0]) for _ in iter]
).toDF(["feature", "label"])
estimator = get_estimator(workers_per_node=2,
model_fn=lambda config: LinearModel(),
loss=nn.MSELoss(),
optimizer=get_zero_optimizer,
sync_stats=False)
stats = estimator.fit(df, batch_size=4, epochs=2,
feature_cols=["feature"],
label_cols=["label"],
reduce_results=False)
worker_0_stats, worker_1_stats = stats[0]
train_loss_0 = worker_0_stats["train_loss"]
train_loss_1 = worker_1_stats["train_loss"]
error_msg = f"stats from all workers should not be the same, " \
f"but got worker_0_stats: {worker_0_stats}, worker_1_stats: {worker_1_stats}"
assert abs(train_loss_0 - train_loss_1) > 0.9, error_msg
def test_data_parallel_sgd_correctness(self):
sc = init_nncontext()
rdd = sc.range(0, 100).repartition(2)
# partition 0: [(0, 0), (0, 0)]
# partition 1: [(1, 0), (1, 0)]
# model: y = w * x
# loss = (wx)^2
# dloss/dw = 2x^2*w
# end of first iteration:
# partition 0 loss: 0.0
# partition 1 loss: 1.0
# avg_grad = avg([0, 0, 2, 2]) = 1
# weight = 1.0 - 0.5 * avg_grad = 0.5
# end of second iteration:
# partition 0 loss: 0.0
# partition 1 loss: 0.25
# avg_grad = avg([0, 0, 1, 1]) = 0.5
# weight = 0.5 - 0.5 * avg_grad = 0.25
df = rdd.mapPartitionsWithIndex(lambda idx, iter: [([float(idx)], [0.0]) for _ in iter][:2]
).toDF(["feature", "label"])
def get_optimizer(model, config):
return torch.optim.SGD(model.parameters(), lr=0.5)
estimator = Estimator.from_torch(model=lambda config: LinearModel(),
optimizer=get_optimizer,
loss=torch.nn.MSELoss(),
metrics=Accuracy(),
config={},
workers_per_node=2,
backend="torch_distributed",
sync_stats=False)
stats = estimator.fit(df, batch_size=4, epochs=2,
feature_cols=["feature"],
label_cols=["label"],
reduce_results=False)
state = estimator.get_state_dict()
assert state['models'][0]['fc1.weight'].item() == 0.25
# not work right now
# ray logs cannot be captured
# not sure why
# def test_logging_train_stats(self):
# sc = init_nncontext()
# rdd = sc.range(0, 100)
# df = rdd.map(lambda x: (np.random.randn(50).astype(np.float).tolist(),
# [int(np.random.randint(0, 2, size=()))])
# ).toDF(["feature", "label"])
# estimator = get_estimator(workers_per_node=2, sync_stats=False, log_level=logging.DEBUG)
# captured_before = self._capsys.readouterr().out
# stats = estimator.fit(df, batch_size=4, epochs=2,
# feature_cols=["feature"],
# label_cols=["label"])
# captured_after = self._capsys.readouterr().out
# message = captured_after[len(captured_before):]
# assert "Finished training epoch 1, stats: {" in message
# assert "Finished training epoch 2, stats: {" in message
# @pytest.fixture(autouse=True)
# def inject_fixtures(self, capsys):
# self._capsys = capsys
if __name__ == "__main__":
pytest.main([__file__])
| StarcoderdataPython |
1755316 | from os import path
import glob
from updater import get_updater
from filter import get_filter
from cache import get_cache
from merge import (FileHunk, MemoryHunk, apply_filters, merge,
make_url, merge_filters)
__all__ = ('Bundle', 'BundleError',)
class BundleError(Exception):
pass
class BuildError(BundleError):
pass
class Bundle(object):
"""A bundle is the unit django-assets uses to organize groups of media
files, which filters to apply and where to store them.
Bundles can be nested.
"""
def __init__(self, *contents, **options):
self.env = None
self.contents = contents
self.output = options.get('output')
self.filters = options.get('filters')
self.debug = options.get('debug')
self.extra_data = {}
def __repr__(self):
return "<Bundle output=%s, filters=%s, contents=%s>" % (
self.output,
self.filters,
self.contents,
)
def _get_filters(self):
return self._filters
def _set_filters(self, value):
"""Filters may be specified in a variety of different ways,
including by giving their name; we need to make sure we resolve
everything to an actual filter instance.
"""
if value is None:
self._filters = ()
return
if isinstance(value, basestring):
filters = map(unicode.strip, unicode(value).split(','))
elif isinstance(value, (list, tuple)):
filters = value
else:
filters = [value]
self._filters = [get_filter(f) for f in filters]
filters = property(_get_filters, _set_filters)
def _get_contents(self):
return self._contents
def _set_contents(self, value):
self._contents = value
self._resolved_contents = None
contents = property(_get_contents, _set_contents)
def resolve_contents(self, env):
"""Returns contents, with globbed patterns resolved to actual
filenames.
"""
# TODO: We cache the values, which in theory is problematic, since
# due to changes in the env object, the result of the globbing may
# change. Not to mention that a different env object may be passed
# in. We should find a fix for this.
if not getattr(self, '_resolved_contents', None):
l = []
for item in self.contents:
if isinstance(item, basestring):
# We only go through glob() if this actually is a
# pattern; this means that invalid filenames will
# remain in the content set, and only raise an error
# at a later point in time.
# TODO: This is possible a good place to check for
# a file's existance though; currently, when in debug
# mode, no error would be raised at all, and simply a
# broken url sent to the browser.
if glob.has_magic(item):
path = env.abspath(item)
for f in glob.glob(path):
l.append(f[len(path)-len(item):])
else:
l.append(item)
else:
l.append(item)
self._resolved_contents = l
return self._resolved_contents
def determine_action(self, env):
"""Decide what needs to be done when this bundle needs to be
resolved.
Specifically, whether to apply filters and whether to merge. This
depends on both the global settings, as well as the ``debug``
attribute of this bundle.
Returns a 2-tuple of (should_merge, should_filter). The latter
always implies the former.
"""
if not env.debug:
return True, True
debug = self.debug if self.debug is not None else env.debug
if debug == 'merge':
return True, False
elif debug is True:
return False, False
elif debug is False:
return True, True
else:
raise BundleError('Invalid debug value: %s' % debug)
def get_files(self, env=None):
"""Return a flattened list of all source files of this bundle,
and all the nested bundles.
"""
env = self._get_env(env)
files = []
for c in self.resolve_contents(env):
if isinstance(c, Bundle):
files.extend(c.get_files(env))
else:
files.append(env.abspath(c))
return files
@property
def is_container(self):
"""Return true if this is a container bundle, that is, a bundle
that acts only as a container for a number of sub-bundles.
It must not contain any files of it's own, and must have an
empty ``output`` attribute.
"""
has_files = any([c for c in self.contents if not isinstance(c, Bundle)])
return not has_files and not self.output
def _get_env(self, env):
# Note how bool(env) can be False, due to __len__.
env = env if env is not None else self.env
if env is None:
raise BundleError('Bundle is not connected to an environment')
return env
def _build(self, env, output_path, force, no_filters, parent_filters=[]):
"""Internal recursive build method.
"""
# TODO: We could support a nested bundle downgrading it's debug
# setting from "filters" to "merge only", i.e. enabling
# ``no_filters``. We cannot support downgrading to
# "full debug/no merge" (debug=True), of course.
#
# Right now we simply use the debug setting of the root bundle
# we build, und it overrides all the nested bundles. If we
# allow nested bundles to overwrite the debug value of parent
# bundles, as described above, then we should also deal with
# a child bundle enabling debug=True during a merge, i.e.
# raising an error rather than ignoring it as we do now.
resolved_contents = self.resolve_contents(env)
if not resolved_contents:
raise BuildError('empty bundle cannot be built')
# Ensure that the filters are ready
for filter in self.filters:
filter.set_environment(env)
# Apply input filters to all the contents. Note that we use
# both this bundle's filters as well as those given to us by
# the parent. We ONLY do those this for the input filters,
# because we need them to be applied before the apply our own
# output filters.
# TODO: Note that merge_filters() removes duplicates. Is this
# really the right thing to do, or does it just confuse things
# due to there now being different kinds of behavior...
combined_filters = merge_filters(self.filters, parent_filters)
cache = get_cache(env)
hunks = []
for c in resolved_contents:
if isinstance(c, Bundle):
hunk = c._build(env, output_path, force, no_filters,
combined_filters)
hunks.append(hunk)
else:
hunk = FileHunk(env.abspath(c))
if no_filters:
hunks.append(hunk)
else:
hunks.append(apply_filters(
hunk, combined_filters, 'input', cache,
output_path=output_path))
# Return all source hunks as one, with output filters applied
final = merge(hunks)
if no_filters:
return final
else:
return apply_filters(final, self.filters, 'output', cache)
def build(self, env=None, force=False, no_filters=False):
"""Build this bundle, meaning create the file given by the
``output`` attribute, applying the configured filters etc.
A ``FileHunk`` will be returned.
TODO: Support locking. When called from inside a template tag,
this should lock, so that multiple requests don't all start
to build. When called from the command line, there is no need
to lock.
"""
if not self.output:
raise BuildError('No output target found for %s' % self)
env = self._get_env(env)
# Determine if we really need to build, or if the output file
# already exists and nothing has changed.
if force:
update_needed = True
elif not path.exists(env.abspath(self.output)):
if not env.updater:
raise BuildError(('\'%s\' needs to be created, but '
'automatic building is disabled ('
'configure an updater)') % self)
else:
update_needed = True
else:
source_paths = [p for p in self.get_files(env)]
update_needed = get_updater(env.updater)(
env.abspath(self.output), source_paths)
if not update_needed:
# We can simply return the existing output file
return FileHunk(env.abspath(self.output))
hunk = self._build(env, self.output, force, no_filters)
hunk.save(env.abspath(self.output))
return hunk
def iterbuild(self, env=None):
"""Iterate over the bundles which actually need to be built.
This will often only entail ``self``, though for container
bundles (and container bundle hierarchies), a list of all the
non-container leafs will be yielded.
Essentally, what this does is "skip" bundles which do not need
to be built on their own (container bundles), and gives the
caller the child bundles instead.
"""
env = self._get_env(env)
if self.is_container:
for bundle in self.resolve_contents(env):
if bundle.is_container:
for t in bundle.iterbuild(env):
yield t
else:
yield bundle
else:
yield self
def _urls(self, env, *args, **kwargs):
env = self._get_env(env)
supposed_to_merge, do_filter = self.determine_action(env)
if supposed_to_merge and (self.filters or self.output):
# We need to build this bundle, unless a) the configuration
# tells us not to ("determine_action"), or b) this bundle
# isn't actually configured to be built, that is, has no
# filters and no output target.
hunk = self.build(env, no_filters=not do_filter, *args, **kwargs)
return [make_url(env, self.output)]
else:
# We either have no files (nothing to build), or we are
# in debug mode: Instead of building the bundle, we
# source all contents instead.
urls = []
for c in self.resolve_contents(env):
if isinstance(c, Bundle):
urls.extend(c.urls(env, *args, **kwargs))
else:
urls.append(make_url(env, c, expire=False))
return urls
def urls(self, env=None, *args, **kwargs):
"""Return a list of urls for this bundle.
Depending on the environment and given options, this may be a
single url (likely the case in production mode), or many urls
(when we source the original media files in DEBUG mode).
Insofar necessary, this will automatically create or update
the files behind these urls.
"""
urls = []
for bundle in self.iterbuild(env):
urls.extend(bundle._urls(env, *args, **kwargs))
return urls
| StarcoderdataPython |
3302603 | import random
# MOVIES = ['Titanic',
# 'Docker: Unleashed',
# 'Julia: Elegant and Hip',
# 'Python : The Slow Snake',
# 'C: Kids these days dont know how to compile code',
# '<NAME>']
class RecommenderClass:
"""Class for grouping all my functions"""
def __init__(self, list_of_movies):
self.list_of_movies = list_of_movies
def recommend_movie(self):
"""Randomly recommend one movie from a list."""
result = random.choice(self.list_of_movies)
return result
def nmf(self):
"""Coming soon in version 2.0!"""
pass
def cosim(self):
"""Coming soon in version 3.0!"""
pass
# if __name__ == '__main__':
# rec = RecommenderClass(MOVIES)
# result = rec.recommend_movie()
# print(result)
| StarcoderdataPython |
1751025 | """
PLease set following environment variables for this script to work properly
export DAPP_INSTANCES=<some integer> default value=1
export WEB3J_ETHCLIENT_HOST=<ip address> default value=None
export WEB3J_ETHCLIENT_PROTOCOL=<> default value=http
export WEB3J_ETHCLIENT_PORT=<> default value=8545
export SHARE_CONTRACT=<boolean> default value=false
"""
import json
import os
import random
import subprocess
import threading
import time
import urllib3
from solcx import compile_files, install_solc
from web3 import Web3
from web3.auto import w3 as w3help
from web3.middleware import geth_poa_middleware
urllib3.disable_warnings()
w3 = None
abi = None
bytecode = None
# compiling securityToken source file
def compile_security_token():
install_solc("0.6.0")
global abi, bytecode
compiled_sol = compile_files(
["../../hardhat/contracts/SecurityToken.sol"], solc_version='0.6.0', optimize=True)
print("compiled sources ")
bytecode = compiled_sol['../../hardhat/contracts/SecurityToken.sol:SecurityToken']['bin']
abi = compiled_sol['../../hardhat/contracts/SecurityToken.sol:SecurityToken']['abi']
# function to validate tx hash and poll for receipt
def tx_receipt_poll(construct_txn, acc_priv_key):
signed_txn = w3.eth.account.sign_transaction(
construct_txn, acc_priv_key)
# Validating transaction hash
tx_hash_send = signed_txn.hash
tx_hash = w3.eth.send_raw_transaction(signed_txn.rawTransaction)
assert tx_hash_send == tx_hash, "tx hash mismatch"
tx_receipt = w3.eth.wait_for_transaction_receipt(tx_hash)
print("Transaction receipt: '{}'".format(tx_receipt))
assert tx_receipt.status == 1, "transaction failed"
return tx_receipt
# function to deploy contract address and distribute tokens among all senders
def deploy_contract(contract_deploy_account, contract_deploy_account_key, host):
# connecting to end point
url = "http://" + host + ":8545"
compile_security_token()
global w3
w3 = Web3(Web3.HTTPProvider(url,
request_kwargs={"verify": False}))
w3.middleware_onion.inject(geth_poa_middleware, layer=0)
contract = w3.eth.contract(abi=abi, bytecode=bytecode)
# deploying contract
construct_txn = contract.constructor("ERC20", "ERC20", 1000000000000).buildTransaction(
{
"from": contract_deploy_account,
"gas": 2000000,
"gasPrice": 0,
"nonce": w3.eth.get_transaction_count(contract_deploy_account),
"chainId": 5000,
}
)
tx_receipt = tx_receipt_poll(construct_txn, contract_deploy_account_key)
print("smart contract deploy success, contract address: '{}'".format(
tx_receipt.contractAddress))
contract_address = tx_receipt.contractAddress
dapp_contract = w3.eth.contract(address=contract_address, abi=abi)
acc_balance = dapp_contract.functions.balanceOf(
contract_deploy_account).call()
print("Account {} has balance of {} tokens \n".format(
contract_deploy_account, acc_balance))
return contract_address
# distributing token to senders
def distribute_tokens(accts, priv_keys, contract_address):
dapp_contract = w3.eth.contract(address=contract_address, abi=abi)
for i in range(1, len(accts)):
construct_txn = dapp_contract.functions.transfer(accts[i], 10000000000000000000).buildTransaction({
'from': accts[0],
'gas': 2000000,
'gasPrice': 0,
'nonce': w3.eth.get_transaction_count(accts[0]),
'chainId': 5000
})
tx_receipt_poll(construct_txn, priv_keys[0])
acc_balance = dapp_contract.functions.balanceOf(accts[i]).call()
print("Account {} has balance of {} tokens \n".format(
accts[i], acc_balance))
# expose port on photon/linux machines
def expose_port(port):
cmd = 'sudo iptables -A INPUT -p tcp --dport ' + str(port) + ' -j ACCEPT'
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)
# function to run new ERC20 dapp instance
def run_dapp(priv_key, contract_address, port):
print("start run on port " + str(port))
if os.getenv('EXPOSE_UI_PORT_EXTERNALLY', 'False') in ('true', 'True', 'TRUE'):
expose_port(port)
if contract_address:
os.environ["TOKEN_CONTRACT_ADDRESS"] = contract_address
jar = "target/erc20-benchmark-1.0-SNAPSHOT.jar"
cmd = "cd ..; java -jar -Dserver.port=" + str(port) + " -Dtoken.private-key=" + priv_key + " " + jar
p = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, check=True)
if not p.returncode:
print("Dapp with port {} completed with status code {}".format(
port, p.returncode)) # is 0 if success
return not p.returncode
# extract kv from list and adds them to dictionary
def list_to_kv(inp_list, inp_dict):
for obj in inp_list:
obj_kv = obj.split("=")
inp_dict[obj_kv[0]] = inp_dict.get(obj_kv[0], 0) + int(obj_kv[1])
# Read reports of all runs and write to aggregate-report.json
def aggregate_report(instance):
port = 8000
filename = "../output/result/report-"
aggregate_throughput = 0
aggregate_latency = 0
aggregate_tx = 0
aggregate_loadfactor = 0
aggregate_tx_status = {}
aggregate_tx_errors = {}
aggregate_receipt_status = {}
aggregate_receipt_errors = {}
for i in range(1, instance + 1):
try:
with open(filename + str(port + i) + '.json', 'r') as f:
data = json.load(f)
aggregate_tx += data['txTotal']
aggregate_throughput += data['averageThroughput']
aggregate_latency += data['averageLatency']
aggregate_loadfactor += data['loadFactor']
if data["txStatus"]:
tx_status_list = data["txStatus"].split(",")
list_to_kv(tx_status_list, aggregate_tx_status)
if data["txErrors"]:
tx_errors_list = data["txErrors"].split("<br>")
list_to_kv(tx_errors_list, aggregate_tx_errors)
if data["receiptStatus"]:
receipt_status_list = data["receiptStatus"].split(",")
list_to_kv(receipt_status_list, aggregate_receipt_status)
if data["receiptErrors"]:
receipt_errors_list = data["receiptErrors"].split("<br>")
list_to_kv(receipt_errors_list, aggregate_receipt_errors)
except IOError:
print("result file not available for run {}".format(port + i))
json_obj = {'aggregate_tx': aggregate_tx, 'aggregate_throughput': aggregate_throughput,
'aggregate_latency': int(aggregate_latency / instance), 'aggregate_loadfactor': aggregate_loadfactor,
'aggregate_txStatus': aggregate_tx_status, 'aggregate_txErrors': aggregate_tx_errors,
'aggregate_receiptStatus': aggregate_receipt_status, 'aggregate_receiptErrors': aggregate_receipt_errors
}
filename = "../output/result/aggregate-report.json"
with open(filename, 'w') as f:
json.dump(json_obj, f, indent=4)
# function to start wavefront proxy
def start_wavefront_proxy():
wavefront_token = os.environ['WAVEFRONT_TOKEN']
cmd = 'docker run -d -p 2878:2878 -e WAVEFRONT_URL=https://vmware.wavefront.com/api -e ' \
'WAVEFRONT_TOKEN=' + wavefront_token + ' -e JAVA_HEAP_USAGE=4G -e JVM_USE_CONTAINER_OPTS=false --name ' \
'wavefront-proxy athena-docker-local.artifactory.eng.vmware.com' \
'/wavefront-proxy:9.7 '
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
# set environment variables inside .env
def set_env_var():
cmd = 'source ./.env'
subprocess.call(cmd, shell=True, stdout=subprocess.PIPE)
def main():
set_env_var()
host = os.environ['WEB3J_ETHCLIENT_HOST']
client_port = os.getenv('WEB3J_ETHCLIENT_PORT', 8545)
protocol = os.getenv('WEB3J_ETHCLIENT_PROTOCOL', "http")
dapp_count = int(os.getenv('DAPP_INSTANCES', 1))
share_contract = os.getenv('SHARE_CONTRACT', 'False') in ('true', 'True', 'TRUE')
ethrpc_url = "{0}://{1}:{2}/".format(protocol, host, client_port)
wavefront_enabled = os.getenv('MANAGEMENT_METRICS_EXPORT_WAVEFRONT_ENABLED', 'False') in ('true', 'True', 'TRUE')
max_sleep_time = int(os.getenv('MAX_SLEEP_TIME', 5))
print("No of dapp Instances ", dapp_count)
print("Ethereum Endpoint ", ethrpc_url)
if wavefront_enabled:
start_wavefront_proxy()
accts = []
priv_keys = []
for i in range(dapp_count + 1):
acct = w3help.eth.account.create('KEYSMASH F<KEY>')
accts.append(Web3.toChecksumAddress(acct.address[2:].lower()))
priv_keys.append(acct.privateKey.hex()[2:].lower())
print("Account address list = ", accts)
contract_address = None
if share_contract:
assert dapp_count > 1, "At least 2 instances should run to share contract."
contract_address = deploy_contract(accts[0], priv_keys[0], host)
print("Contract Address -", contract_address)
distribute_tokens(accts, priv_keys, contract_address)
print("tokens distributed among senders")
threads = []
port = 8000
for i in range(1, len(accts)):
time.sleep(random.randint(1, max_sleep_time))
t = threading.Thread(target=run_dapp, args=(
priv_keys[i], contract_address, port + i))
threads.append(t)
t.start()
for t in threads:
t.join()
aggregate_report(dapp_count)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1789762 | import re
data = [line.strip() for line in open('final_project/poi_names.txt')]
valid_data = []
for l in data:
if re.search('\(.\)', l):
valid_data.append(l)
print valid_data
poi_names = 0
for l in valid_data:
if re.search('\(y\)',l):
poi_names += 1
print "# of POI names :", len(valid_data)
| StarcoderdataPython |
23584 | from pandas.core.algorithms import mode
import torch
import torch.nn as nn
from albumentations import Compose,Resize,Normalize
from albumentations.pytorch import ToTensorV2
import wandb
import time
import torchvision
import torch.nn.functional as F
import torch.optim as optim
from torch.cuda.amp import autocast,GradScaler
import os
import numpy as np
from tqdm import tqdm
from callbacks import EarlyStopping
import pandas as pd
from torch.utils.data import Dataset, DataLoader
import cv2
import torch.nn.functional as F
import random
from build_model import Deformed_Darknet53
torch.manual_seed(2021)
np.random.seed(2021)
random.seed(2021)
torch.backends.cudnn.benchmark = True
torch.backends.cudnn.deterministic = True
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
TOTAL_EPOCHS = 100
scaler = GradScaler()
early_stop = EarlyStopping()
wandb.init(project='deformed-darknet',entity='tensorthug',name='new-darknet-256x256_32')
print("***** Loading the Model in {} *****".format(DEVICE))
Model = Deformed_Darknet53().to(DEVICE)
print("Model Shipped to {}".format(DEVICE))
data = pd.read_csv("data.csv")
train_loss_fn = nn.BCEWithLogitsLoss()
val_loss_fn = nn.BCEWithLogitsLoss()
optim = torch.optim.Adam(Model.parameters())
wandb.watch(Model)
class dog_cat(Dataset):
def __init__(self,df,mode="train",folds=0,transforms=None):
super(dog_cat,self).__init__()
self.df = df
self.mode = mode
self.folds = folds
self.transforms = transforms
if self.mode == "train":
self.data = self.df[self.df.folds != self.folds].reset_index(drop=True)
else:
self.data = self.df[self.df.folds == self.folds].reset_index(drop=True)
def __len__(self):
return len(self.data)
def __getitem__(self,idx):
img = cv2.imread(self.data.loc[idx,"Paths"])
label = self.data.loc[idx,'Labels']
if self.transforms is not None:
image = self.transforms(image=img)['image']
return image,label
def train_loop(epoch,dataloader,model,loss_fn,optim,device=DEVICE):
model.train()
epoch_loss = 0
epoch_acc = 0
#start_time = time.time()
pbar = tqdm(enumerate(dataloader),total=len(dataloader))
for i,(img,label) in pbar:
optim.zero_grad()
img = img.to(DEVICE).float()
label = label.to(DEVICE).float()
#LOAD_TIME = time.time() - start_time
with autocast():
yhat = model(img)
#Loss Calculation
train_loss = loss_fn(input = yhat.flatten(), target = label)
out = (yhat.flatten().sigmoid() > 0.5).float()
correct = (label == out).float().sum()
scaler.scale(train_loss).backward()
scaler.step(optim)
scaler.update()
epoch_loss += train_loss.item()
epoch_acc += correct.item() / out.shape[0]
train_epoch_loss = epoch_loss / len(dataloader)
train_epoch_acc = epoch_acc / len(dataloader)
wandb.log({"Training_Loss":train_epoch_loss})
wandb.log({"Training_Acc":train_epoch_acc})
#print(f"Epoch:{epoch}/{TOTAL_EPOCHS} Epoch Loss:{epoch_loss / len(dataloader):.4f} Epoch Acc:{epoch_acc / len(dataloader):.4f}")
return train_epoch_loss,train_epoch_acc
def val_loop(epoch,dataloader,model,loss_fn,device = DEVICE):
model.eval()
val_epoch_loss = 0
val_epoch_acc = 0
pbar = tqdm(enumerate(dataloader),total=len(dataloader))
with torch.no_grad():
for i,(img,label) in pbar:
img = img.to(device).float()
label = label.to(device).float()
yhat = model(img)
val_loss = loss_fn(input=yhat.flatten(),target=label)
out = (yhat.flatten().sigmoid()>0.5).float()
correct = (label == out).float().sum()
val_epoch_loss += val_loss.item()
val_epoch_acc += correct.item() / out.shape[0]
val_lossd = val_epoch_loss / len(dataloader)
val_accd = val_epoch_acc / len(dataloader)
wandb.log({"Val_Loss":val_lossd,"Epoch":epoch})
wandb.log({"Val_Acc":val_accd/len(dataloader),"Epoch":epoch})
return val_lossd,val_accd
if __name__ == "__main__":
train_per_epoch_loss,train_per_epoch_acc = [],[]
val_per_epoch_loss,val_per_epoch_acc = [],[]
train = dog_cat(data,transforms=Compose([Resize(256,256),Normalize(),ToTensorV2()]))
val = dog_cat(data,mode='val',transforms=Compose([Resize(256,256),Normalize(),ToTensorV2()]))
train_load = DataLoader(train,batch_size=32,shuffle=True,num_workers=4)
val_load = DataLoader(val,batch_size=32,num_workers=4)
for e in range(TOTAL_EPOCHS):
train_loss,train_acc = train_loop(e,train_load,Model,train_loss_fn,optim)
val_loss,val_acc = val_loop(e,val_load,Model,val_loss_fn)
train_per_epoch_loss.append(train_loss)
train_per_epoch_acc.append(train_acc)
val_per_epoch_loss.append(val_loss)
val_per_epoch_acc.append(val_acc)
print(f"TrainLoss:{train_loss:.4f} TrainAcc:{train_acc:.4f}")
print(f"ValLoss:{val_loss:.4f} ValAcc:{val_acc:.4f}")
early_stop(Model,val_loss)
if early_stop.early_stop:
break
| StarcoderdataPython |
3341308 | <filename>dashboard/src/commands/data.py<gh_stars>1-10
import click
import pandas as pd
from flask.cli import AppGroup
from src.database import db
def get_states(csv_path):
'''
Read the relevant state columns from specified CSV file
'''
df = pd.read_csv(csv_path, delimiter=';', encoding='ISO-8859-1',
usecols=['SG_UF', 'NO_UF'])
return df.rename(columns={'SG_UF': 'state_code', 'NO_UF': 'state'})
def get_products(csv_path):
'''
Read the relevant product columns from specified CSV file
'''
df = pd.read_csv(csv_path, delimiter=';', encoding='ISO-8859-1',
usecols=['CO_NCM', 'NO_NCM_POR'])
return df.rename(columns={'CO_NCM': 'product_code',
'NO_NCM_POR': 'product'})
data_cli = AppGroup('data',
short_help='Commands to populate the database tables')
@data_cli.command()
@click.argument('csv_path', type=click.Path(exists=True), nargs=1)
@click.argument('states_path', type=click.Path(exists=True), nargs=1)
@click.argument('products_path', type=click.Path(exists=True), nargs=1)
@click.option(
'--kind',
type=click.Choice(['import', 'export']),
help='The kind of trade being processed',
required=True
)
@click.option(
'--year',
type=int,
required=True,
help='The year whose data should be aggregated')
@click.option(
'--n',
default=3,
show_default=True,
help='How many of the top products to return for each state')
def aggregate_by_state_and_add(csv_path, states_path, products_path,
kind, year, n=3):
'''
Process the specified CSV files to generate a table with the top n
products with highest total traded value in the specified year, by
state, for the kind of trade specified.
'''
click.echo(f'Processing {csv_path}...')
df = pd.read_csv(csv_path, delimiter=';',
usecols=['CO_ANO', 'CO_NCM', 'SG_UF_NCM', 'VL_FOB'])
df.columns = ['year', 'product_code', 'state_code', 'total']
df = df[df['year'] == year]
# Compute the totals for each combination of state and product
totals = df.groupby(['state_code', 'product_code'])[['total']].sum()
# Rank the products of each state by their total values
ranked = totals.assign(
rank=totals.sort_values(['total'], ascending=False)
.groupby(['state_code'])
.cumcount() + 1
)
# Keep only the wanted number of products for each state
top = (ranked.query(f'rank <= {n}')
.sort_values(['state_code', 'rank'])
.drop('rank', axis=1)
).reset_index()
# Add columns for the year and kind of trade being processed
top = top.assign(year=year, kind=kind)
# JOIN metadata
states = get_states(states_path)
products = get_products(products_path)
merged_top = top.merge(states, on='state_code')\
.merge(products, on='product_code')
merged_top.to_sql('top_by_state_and_year', db.engine, index=False,
if_exists='append')
click.echo(f'Finished ranking of products {kind}ed in {year} by state.')
@data_cli.command()
@click.argument('csv_path', type=click.Path(exists=True), nargs=1)
@click.argument('states_path', type=click.Path(exists=True), nargs=1)
@click.argument('products_path', type=click.Path(exists=True), nargs=1)
@click.option(
'--kind',
type=click.Choice(['import', 'export']),
help='The kind of trade being processed',
required=True
)
@click.option(
'--year',
type=int,
required=True,
help='The year whose data should be aggregated')
@click.option(
'--n',
default=3,
show_default=True,
help='How many of the top products to return for each state and month')
def aggregate_by_month_and_state_and_add(csv_path, states_path,
products_path, kind, year, n=3):
'''
Process the specified CSV files to generate a table with the top n
products with highest total traded value in the specified year, by
month and state, for the kind of trade specified.
'''
click.echo(f'Processing {csv_path}...')
df = pd.read_csv(csv_path, delimiter=';',
usecols=['CO_ANO', 'CO_MES', 'CO_NCM', 'SG_UF_NCM',
'VL_FOB'])
df.columns = ['year', 'month', 'product_code', 'state_code', 'total']
df = df[df['year'] == year]
# Compute the totals for each combination of state, month and product
totals = df.groupby(
['month', 'state_code', 'product_code'])[['total']].sum()
# Rank the products of each state and month by their total values
ranked = totals.assign(
rank=totals.sort_values(['total'], ascending=False)
.groupby(['month', 'state_code'])
.cumcount() + 1
)
# Keep only the wanted number of products for each state and month
top = (ranked.query(f'rank <= {n}')
.sort_values(['state_code', 'month', 'rank'])
.drop('rank', axis=1)
).reset_index()
# Add columns for the year and kind of trade being processed
top = top.assign(year=year, kind=kind)
# JOIN metadata
states = get_states(states_path)
products = get_products(products_path)
merged_top = top.merge(states, on='state_code')\
.merge(products, on='product_code')
merged_top.to_sql('top_by_state_and_month', db.engine, index=False,
if_exists='append')
click.echo(f'Finished ranking of products {kind}ed in {year} ' +
'by month and state.')
@data_cli.command()
@click.argument('csv_path', type=click.Path(exists=True), nargs=1)
@click.argument('states_path', type=click.Path(exists=True), nargs=1)
@click.option(
'--kind',
type=click.Choice(['import', 'export']),
help='The kind of trade being processed',
required=True
)
@click.option(
'--year',
type=int,
required=True,
help='The year whose data should be aggregated')
def aggregate_state_contributions_and_add(csv_path, states_path,
kind, year):
'''
Process the CSV file specified to generate a table with the percentage
of contribution of each state to the country's total transactions in
the given year, for the kind of trade specified.
'''
click.echo(f'Processing {csv_path}...')
df = pd.read_csv(csv_path, delimiter=';',
usecols=['CO_ANO', 'SG_UF_NCM', 'VL_FOB'])
df.columns = ['year', 'state_code', 'total']
df = df[df['year'] == year]
year_total = df['total'].sum()
state_contribs = df.groupby(
['state_code'], as_index=False)['total'].sum()
state_contribs['year'] = year
state_contribs['kind'] = kind
state_contribs['percentage'] = \
100.0 * state_contribs['total'] / year_total
# JOIN metadata
states = get_states(states_path)
merged_state_contribs = state_contribs\
.merge(states, on='state_code')
merged_state_contribs.to_sql('state_contributions', db.engine,
index=False, if_exists='append')
click.echo('Finished aggregation of states contributions to the ' +
f'{kind}s in {year}.')
def init_app(app):
app.cli.add_command(data_cli)
| StarcoderdataPython |
3317947 | from ..flags import *
if BACKEND_FLAGS.HAS_PROTO:
from . import ChainProto
def get_protobuf_numbering_scheme(numbering_scheme):
"""
Returns ChainProto field value of a given numbering scheme
Args:
numbering_scheme:
Returns:
"""
if numbering_scheme == NUMBERING_FLAGS.KABAT:
proto_numbering_scheme = ChainProto.KABAT
elif numbering_scheme == NUMBERING_FLAGS.CHOTHIA:
proto_numbering_scheme = ChainProto.CHOTHIA
elif numbering_scheme == NUMBERING_FLAGS.CHOTHIA_EXT or numbering_scheme == NUMBERING_FLAGS.MARTIN:
proto_numbering_scheme = ChainProto.CHOTHIA_EXT
else:
raise ValueError(f"{numbering_scheme} numbering scheme not supported by protobuf "
f"serialisation yet!")
return proto_numbering_scheme
def get_numbering_scheme_from_protobuf(proto_numbering_scheme):
"""
Returns ChainProto field value of a given numbering scheme
Args:
numbering_scheme:
Returns:
"""
if proto_numbering_scheme == ChainProto.KABAT:
numbering_scheme = NUMBERING_FLAGS.KABAT
elif proto_numbering_scheme == ChainProto.CHOTHIA:
numbering_scheme = NUMBERING_FLAGS.CHOTHIA
elif proto_numbering_scheme == ChainProto.CHOTHIA_EXT:
numbering_scheme = NUMBERING_FLAGS.CHOTHIA_EXT
else:
raise ValueError(f"ChainProto numbering scheme {proto_numbering_scheme} is not compatible with abpytools!")
return numbering_scheme
| StarcoderdataPython |
1645845 | #!/usr/bin/env python3
import os
import argparse
import shutil
from collections import namedtuple
class BalanceSource():
def __init__(self, path, free_bytes, total_bytes, used_bytes):
self.path = path
self.free_bytes = free_bytes
self.total_bytes = total_bytes
self.used_bytes = used_bytes
SourceFile = namedtuple('SourceFile', 'file rel size')
class Balancer():
def __init__(self, sources):
self.sources = sources
def balance(self, dry_run=False):
balance_sources = []
for source in self.sources:
(free_bytes, total_bytes, used_bytes) = self._get_source_usage_stats(source)
balance_source = BalanceSource(path=source, free_bytes=free_bytes, total_bytes=total_bytes, used_bytes=used_bytes)
balance_sources.append(balance_source)
desired_used_bytes = sum([source.used_bytes for source in balance_sources])/len(balance_sources)
overloaded_sources = [source for source in balance_sources if source.used_bytes > desired_used_bytes]
overloaded_sources.sort(key=lambda x: x.used_bytes) # most overloaded first
underloaded_sources = [source for source in balance_sources if source not in overloaded_sources]
underloaded_sources.sort(key=lambda x: (x.used_bytes, x.path), reverse=True) # most underloaded first
for originating_source in overloaded_sources:
files = os.listdir(originating_source.path)
balanced_files = []
source_files = []
for file in files:
orig_path = os.path.join(originating_source.path, file)
size = self._get_path_size(orig_path)
source_file = SourceFile(file=orig_path, rel=file, size=size)
source_files.append(source_file)
source_files.sort(key=lambda tup: (tup[2], tup[1]), reverse=True) # by largest size, then reverse relative path
for destination_source in underloaded_sources:
# filter out files that have already been balanced to other drives
unbalanced_source_files = [f for f in source_files if f.rel not in balanced_files]
for source_file in unbalanced_source_files:
# it doesn't fit on the destination
if destination_source.free_bytes - source_file.size < 0:
continue
# it brings too much data to the destination
if destination_source.used_bytes + source_file.size > desired_used_bytes:
continue
# it makes the originator store too little data
if originating_source.used_bytes - source_file.size < desired_used_bytes:
continue
print(f"Moving {source_file.rel} from {originating_source.path} to {destination_source.path}")
if not dry_run:
shutil.move(source_file.file, os.path.join(destination_source.path, source_file.rel))
balanced_files.append(source_file.rel)
originating_source.used_bytes = originating_source.used_bytes - source_file.size
originating_source.free_bytes = originating_source.free_bytes + source_file.size
destination_source.used_bytes = destination_source.used_bytes + source_file.size
destination_source.free_bytes = destination_source.free_bytes - source_file.size
def _get_source_usage_stats(self, source):
st = os.statvfs(source)
free_bytes = st.f_bavail * st.f_frsize
total_bytes = st.f_blocks * st.f_frsize
used_bytes = (st.f_blocks - st.f_bfree) * st.f_frsize
return free_bytes, total_bytes, used_bytes
def _get_path_size(self, path):
if os.path.isfile(path):
return os.path.getsize(path)
# traverse dir and sum file sizes
total_size = 0
for dirpath, dirnames, filenames in os.walk(path):
for f in filenames:
fp = os.path.join(dirpath, f)
total_size += os.path.getsize(fp)
return total_size
def main(sources, dry_run):
balancer = Balancer(sources)
balancer.balance(dry_run)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='The Cinch Filesystem Balancer')
parser.add_argument('--dry-run', action='store_true', help="Don't move any files")
parser.add_argument('sources', action="store")
args = parser.parse_args()
sources = args.sources.split(',')
main(sources, args.dry_run)
| StarcoderdataPython |
88071 | import numpy as np
from scipy.interpolate import LinearNDInterpolator, interp1d
from astropy import table
from astropy.table import Table, Column
import warnings
def get_track_meta(track, key="FeH"):
""" get meta info from a track """
assert key in track.meta.keys()
return track.meta[key]
def find_rank_1d(arr, val, sort=False):
""" return ind of the two elements in *arr* that bracket *val* """
if sort:
arr = np.sort(arr)
sub = np.where((arr[:-1] < val) & (arr[1:] >= val))[0]
assert len(sub) > 0
return np.hstack((sub, sub + 1))
def get_track_item_given_eeps(track, eeps):
""" return track items given a set of eeps """
ind = np.zeros((len(track, )), dtype=bool)
for eep in eeps:
ind |= track["_eep"] == eep
return track[ind]
def calc_weight(arr, val, norm=1.):
""" calculate normalized weight """
weight = np.abs(arr - val)
weight *= norm / np.sum(weight)
return np.array(weight)
def table_linear_combination(t, weight):
""" given weight, return the linear combination of each row for each column
"""
assert len(t) == len(weight)
new_cols = []
colnames = t.colnames
ncols = len(colnames)
for i in range(ncols):
if t.dtype[i] in (np.int, np.float):
colname = colnames[i]
new_cols.append(
Column(np.array([np.sum(t[colname].data * weight)]), colname))
return Table(new_cols)
class StarObject():
def __init__(self, t):
assert len(t) == 1
colnames = t.colnames
ncols = len(colnames)
for i in range(ncols):
self.__setattr__(colnames[i], t[colnames[i]].data[0])
class TrackSet:
""" a set of tracks """
data = []
eep_bounds = (1, 808)
default_coord = ["_lgmass", "_feh", "_lgage", "_eep"]
bci = None
def __init__(self, tracks,
metadict=dict(minit="initial_mass",
feh="FEH",
eep="EEPS",
mbol="Mbol")):
""" initialization of track set object """
self.metadict = metadict
self.data = np.array(tracks)
self.grid_minit = np.array(
[get_track_meta(track, metadict["minit"]) for track in tracks])
self.grid_feh = np.array(
[get_track_meta(track, metadict["feh"]) for track in tracks])
#self.grid_EEP = [get_track_meta(track, metadict["eep"]) for track in tracks]
# every track starts from EEP=1
self.grid_EEP0 = np.array([np.min(_["_eep"]) for _ in self.data])
self.grid_EEP1 = np.array([np.max(_["_eep"]) for _ in self.data])
self.u_minit = np.unique(self.grid_minit)
self.u_feh = np.unique(self.grid_feh)
self.min_minit = np.min(self.u_minit)
self.max_minit = np.max(self.u_minit)
self.min_feh = np.min(self.u_feh)
self.max_feh = np.max(self.u_feh)
self.min_eep = np.min(self.grid_EEP0)
self.max_eep = np.max(self.grid_EEP1)
def get_track4(self, mass_feh=(1.01, 0.01)):
""" return the 4 neighboring stellar tracks """
test_minit, test_feh = np.array(mass_feh, dtype=np.float)
# assert Minit [Fe/H] in range
try:
assert self.min_minit < test_minit <= self.max_minit
assert self.min_feh < test_feh <= self.max_feh
except AssertionError as ae:
return None
# 1. locate 4 tracks
ind_minit = find_rank_1d(self.u_minit, test_minit)
ind_feh = find_rank_1d(self.u_feh, test_feh)
val_minit = self.u_minit[ind_minit]
val_feh = self.u_feh[ind_feh]
ind_track = np.where(np.logical_and(
(self.grid_minit == val_minit[0]) | (
self.grid_minit == val_minit[1]),
(self.grid_feh == val_feh[0]) | (self.grid_feh == val_feh[1])))[0]
track4 = self.data[ind_track]
return track4
def get_track4_unstructured(self, mass_feh=(1.01, 0.01)):
""" return the 4 neighboring stellar tracks given unstructured grid """
test_minit, test_feh = np.array(mass_feh, dtype=np.float)
d_minit_feh = (np.log10(self.grid_minit)-np.log10(test_minit))**2. + \
(self.grid_feh - test_feh) ** 2.
mask00 = (self.grid_minit < test_minit) & (self.grid_feh < test_feh)
mask01 = (self.grid_minit < test_minit) & (self.grid_feh >= test_feh)
mask10 = (self.grid_minit >= test_minit) & (self.grid_feh < test_feh)
mask11 = (self.grid_minit >= test_minit) & (self.grid_feh >= test_feh)
if np.any(np.array([np.sum(mask00), np.sum(mask01),
np.sum(mask10), np.sum(mask11)]) == 0):
return None
ind00 = np.argmin(np.ma.MaskedArray(d_minit_feh, ~mask00))
ind01 = np.argmin(np.ma.MaskedArray(d_minit_feh, ~mask01))
ind10 = np.argmin(np.ma.MaskedArray(d_minit_feh, ~mask10))
ind11 = np.argmin(np.ma.MaskedArray(d_minit_feh, ~mask11))
return self.data[[ind00, ind01, ind10, ind11]]
def interp_mass_feh_eep(self, interp_colname="_lgage",
mfe=(1.01, 0.01, 503.2),
lndi=True, debug=False, raise_error=False):
test_minit, test_feh, test_eep = np.array(mfe, dtype=np.float)
# 1. assert Minit [Fe/H] in range
try:
assert self.min_minit < test_minit <= self.max_minit
assert self.min_feh < test_feh <= self.max_feh
except AssertionError as ae:
if not raise_error:
return np.nan
else:
raise ae("The test values are not in bounds!")
# 2. locate 4 tracks
# ind_minit = find_rank_1d(self.u_minit, test_minit)
# ind_feh = find_rank_1d(self.u_feh, test_feh)
# val_minit = self.u_minit[ind_minit]
# val_feh = self.u_feh[ind_feh]
#
# ind_track = np.where(np.logical_and(
# (self.grid_minit == val_minit[0]) | (self.grid_minit == val_minit[1]),
# (self.grid_feh == val_feh[0]) | (self.grid_feh == val_feh[1])))[0]
# track4 = self.data[ind_track]
track4 = self.get_track4_unstructured((test_minit, test_feh))
if track4 is None:
if raise_error:
raise(ValueError("Bad test values!"))
else:
return np.nan
eep_maxmin = np.max([_["_eep"][0] for _ in track4])
eep_minmax = np.min([_["_eep"][-1] for _ in track4])
# 3. assert EEP in range
try:
assert eep_maxmin < test_eep <= eep_minmax
except AssertionError as ae:
if not raise_error:
return np.nan
else:
raise ae("EEP value is not in bounds!")
# 4. locate EEP
eep_arr = np.arange(eep_maxmin, eep_minmax + 1)
ind_eep = find_rank_1d(eep_arr, test_eep)
val_eep = eep_arr[ind_eep]
with warnings.catch_warnings():
warnings.simplefilter("ignore")
track_box = table.vstack([
get_track_item_given_eeps(track, val_eep) for track in track4])
# 5. interpolate
if not lndi:
# points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
# values = track_box[interp_colname].data
# lndi = LinearNDInterpolator(points, values)
# test_points = np.array((np.log10(test_minit), test_feh, test_eep))
w_mfe = (1 - calc_weight(track_box["_lgmass"], np.log10(test_minit), 4)) * \
(1 - calc_weight(track_box["_feh"], test_feh, 4)) * \
(1 - calc_weight(track_box["_eep"], test_eep, 4))
if debug:
return w_mfe
star_result = table_linear_combination(track_box, w_mfe)
return star_result
elif type(interp_colname) is not list:
points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
# for linear mass
# points[:, 0] = np.power(10, points[:, 0])
values = track_box[interp_colname].data
lndi = LinearNDInterpolator(points, values)
test_points = np.array((np.log10(test_minit), test_feh, test_eep))
# for linear mass
# test_points = np.array((np.log10(test_minit), test_feh, test_eep))
return lndi(test_points)[0]
elif type(interp_colname) is list:
points = np.array(track_box["_lgmass", "_feh", "_eep"].to_pandas())
test_points = np.array((np.log10(test_minit), test_feh, test_eep))
results = []
for _interp_colname in interp_colname:
if type(_interp_colname) is int:
# directly return the input value if int
results.append(test_points[_interp_colname])
else:
values = track_box[_interp_colname].data
results.append(
LinearNDInterpolator(points, values)(test_points)[0])
return np.array(results)
def calc_dlgagedeep(self, track, deep=0.1):
I = interp1d(track["_eep"], track["_lgage"], kind="linear",
bounds_error=False, fill_value=-np.inf)
dlgagedeep = (I(track["_eep"] + deep) - I(track["_eep"] - deep)) \
/ deep / 2.
track.add_column(Column(dlgagedeep, "dlgagedeep"))
return track
def calc_dlgagedeep_for_all_tracks(self, deep=0.1):
for track in self.data:
I = interp1d(track["_eep"], track["_lgage"], kind="linear",
bounds_error=False, fill_value=-np.inf)
dlgagedeep = (I(track["_eep"] + deep) -
I(track["_eep"] - deep)) / deep / 2.
if "dlgagedeep" not in track.colnames:
track.add_column(Column(dlgagedeep, "dlgagedeep"))
else:
track["dlgagedeep"] = dlgagedeep
return
def get_track(self, minit, feh):
""" get the track closest to (minit, feh) """
ind_mindist = np.argmin(
(self.grid_minit - minit) ** 2. + (self.grid_feh - feh) ** 2.)
return self.data[ind_mindist]
def get_track_minit(self, minit):
""" get the track closest to minit """
chosen_minit = self.u_minit[np.argmin((self.u_minit - minit) ** 2)]
ind_minit = self.grid_minit == chosen_minit
return self.data[ind_minit]
def get_track_feh(self, feh):
""" get the track closest to feh """
chosen_feh = self.u_feh[np.argmin((self.u_feh - feh) ** 2)]
ind_feh = self.grid_feh == chosen_feh
return self.data[ind_feh]
def dtdeep(self):
""" calculate dtdeep for each track """
pass
def lnprior(minit, feh, age):
# 1. determine deep = dt deep/
return 0 | StarcoderdataPython |
1605587 | #!/usr/bin/env python3
"""
Contains a class to use an atlas to look up your location inside a brain.
Created 2/8/2021 by <NAME>.
"""
from pathlib import Path
from typing import Dict, Tuple
import templateflow.api
import pandas
import nibabel
import numpy
from functools import cached_property
from dataclasses import dataclass
@dataclass
class Atlas():
"""
Looks up atlas coordinates for you. All coordinates are in voxel space, NOT scanner space.
Your data MUST be aligned and resampled to the 1mm or 2mm MNI152NLin2009cAsym brain.
When constructing an Atlas object, you can set lower_resolution=True if you'd like the atlas to use 2mm resolution.
You may use the Atlas like a Python dictionary if you'd like. For example,
>>> coordinate_lookup = Atlas()
>>> print(coordinate_lookup[(100, 100, 100)])
prints the following:
'Right Cerebral White Matter'
"""
lower_resolution: bool=False
def __getitem__(self, thruple: Tuple[int, int, int]) -> str:
assert len(thruple) == 3, "You must pass a tuple with exactly 3 coordinates, i.e. (x, y, z)"
x, y, z = thruple
return self.translation_array[x, y, z]
@cached_property
def _image(self) -> nibabel.nifti1.Nifti1Image:
"""
Returns raw atlas image to be translated. If self.lower_resolution = True, raw atlas image will be 2mm resolution.
"""
nifti_path = self._MNI_dir / "tpl-MNI152NLin2009cAsym_res-01_desc-carpet_dseg.nii.gz"
if self.lower_resolution == True:
nifti_path = self._MNI_dir / "tpl-MNI152NLin2009cAsym_res-02_desc-carpet_dseg.nii.gz"
return nibabel.load(nifti_path)
@cached_property
def _translation_dictionary(self) -> Dict[int, str]:
"""
Returns a dict containing the code for each area of the brain recorded in
{MNI_dir}/"tpl-MNI152NLin2009cAsym_desc-carpet_dseg.tsv"
Each key is an index, and each value is a brain region.
"""
tsv_lookup = self._MNI_dir / "tpl-MNI152NLin2009cAsym_desc-carpet_dseg.tsv"
dataframe_lookup = pandas.read_csv(tsv_lookup, delimiter="\t")
return dict(zip(dataframe_lookup["index"], dataframe_lookup["name"]))
@cached_property
def _MNI_dir(self) -> Path:
"""
Uses templateflow to download MNI brain stuff. Returns directory in which it's downloaded.
"""
return templateflow.api.get("MNI152NLin2009cAsym")[0].parent
@cached_property
def translation_array(self) -> numpy.array:
"""
Returns an array. Contains an atlas location at each coordinate in the array.
"""
untranslated_array = numpy.asarray(self._image.dataobj).astype(int)
return self._replace_using_dict(untranslated_array, self._translation_dictionary)
def mask_image(self, image, region: str) -> numpy.ma.masked_array:
"""
Given a NiBabel image, returns a masked array for a region of interest.
Image must be in the same space as the atlas.
"""
image_array = image.get_fdata()
number_of_dimensions = image_array.ndim
assert number_of_dimensions == 3 or number_of_dimensions == 4, "Image must be 3-dimensional or 4-dimensional."
if number_of_dimensions == 3:
mask = self.get_3d_mask(region)
else:
fourth_dimension_length=image_array.shape[3]
mask = self.get_4d_mask(region, fourth_dimension_length)
masked_image = numpy.ma.masked_array(image_array, mask=mask)
return masked_image
def get_4d_mask(self, region: str, fourth_dimension_length: int) -> numpy.array:
"""
Returns a 4d array where each coordinate of the specified region equals False, and all other values are True.
Use this for atlasing EPI images or other 4d structures.
"""
third_dimensional_array = self.get_3d_mask(region)
fourth_dimensional_array = numpy.repeat(third_dimensional_array[..., numpy.newaxis], fourth_dimension_length, axis=-1)
return fourth_dimensional_array
def get_3d_mask(self, region: str) -> numpy.array:
"""
Returns a 3d array where each coordinate of the specified region is False, and all other values are True.
"""
mask = self.atlas_array != region
return mask
def _replace_using_dict(self, array: numpy.array, dictionary: Dict) -> numpy.array:
"""
Replace all keys in target array with their specified values.
"""
keys = numpy.array(list(dictionary.keys()))
values = numpy.array(list(dictionary.values()))
mapping_array = numpy.zeros(keys.max()+1, dtype=values.dtype)
mapping_array[keys] = values
return mapping_array[array]
| StarcoderdataPython |
3231311 | <filename>setup.py
import os
from setuptools import setup, find_packages
root_dir_path = os.path.dirname(os.path.abspath(__file__))
long_description = open(os.path.join(root_dir_path, "README.md")).read()
version = open(os.path.join(root_dir_path, "version.txt")).read()
requirements_path = os.path.join(root_dir_path, "requirements.txt")
with open(requirements_path) as requirements_file:
requirements = requirements_file.readlines()
setup(
name="gelidum",
version=version,
author="<NAME>",
author_email="<EMAIL>",
description="Freeze your python objects",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Programming Language :: Python :: Implementation :: CPython",
"Programming Language :: Python :: Implementation :: PyPy",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Topic :: Software Development :: Libraries",
"Development Status :: 4 - Beta",
"License :: OSI Approved :: MIT License"
],
install_requires=requirements,
license="MIT",
keywords="freeze python object",
url="https://github.com/diegojromerolopez/gelidum",
packages=find_packages(),
data_files=["version.txt", "requirements.txt"],
include_package_data=True,
scripts=[]
)
| StarcoderdataPython |
1705266 | import sys, os
import streamlit as st
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from next_word_prediction import GPT2
@st.cache(hash_funcs={GPT2: lambda _: None})
def load_model():
return GPT2()
def app():
gpt2 = load_model()
st.title("Next Word Prediction Using GPT-2")
text = st.text_input("Input a sentence:")
topk = st.slider("Display Number", 1, 10, 5)
if text:
with st.spinner("Wait for it..."):
prediction = gpt2.predict_next(text, topk)
expander = st.beta_expander("View Results", expanded=True)
for index, word in enumerate(prediction):
expander.write(f"{index}. {word}")
if __name__ == "__main__":
app()
| StarcoderdataPython |
3282453 | from whitenoise import WhiteNoise
from app import app
application = WhiteNoise(app)
application.add_files('static/', prefix='static/') | StarcoderdataPython |
3292457 | #!/usr/bin/env python
import os
import setuptools
CLASSIFIERS = [
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: Microsoft :: Windows',
'Programming Language :: Python',
'Topic :: Software Development :: Libraries',
]
setuptools.setup(
author='<NAME>',
author_email='<EMAIL>',
name='py-raildriver',
version='1.1.7',
description='Python interface to Train Simulator 2016',
long_description=open(os.path.join(os.path.dirname(__file__), 'README.rst')).read(),
url='https://github.com/centralniak/py-raildriver',
license='MIT License',
platforms=['Windows'],
classifiers=CLASSIFIERS,
install_requires=open('requirements.txt').read(),
tests_require=open('test_requirements.txt').read(),
packages=setuptools.find_packages(),
include_package_data=False,
zip_safe=False,
test_suite='nose.collector',
)
| StarcoderdataPython |
3327354 | <gh_stars>1-10
import hashlib
import os
import random
import string
from datetime import date
import mechanicalsoup
import pytest
import requests
from mechanicalsoup import StatefulBrowser
from igem_wikisync.browser import check_login, iGEM_login, iGEM_upload_file, iGEM_upload_page
from igem_wikisync.files import HTMLfile, OtherFile
# I know this is bad
# but I couldn't find a better way to
# maintain the session across tests.
# Please submit a PR if you can improve.
pytest.browser = StatefulBrowser()
@pytest.fixture
def config():
return {
'team': 'BITSPilani-Goa_India',
'src_dir': 'tests/data',
'build_dir': 'tests/build',
'year': str(date.today().year)
}
@pytest.fixture
def credentials():
return {
'username': os.environ.get('IGEM_USERNAME'),
'password': <PASSWORD>('IGEM_PASSWORD'),
'team': 'BITSPilani-Goa_India'
}
def md5hash_string(text):
return hashlib.md5(text.encode('UTF-8')).hexdigest()
def md5hash_file(url):
''' Returns the md5 hash of a file from its URL. '''
r = requests.get(url)
# make a hash object
h = hashlib.md5()
for data in r.iter_content(1024):
h.update(data)
return h.hexdigest()
def test_check_login_before(config):
assert not check_login(pytest.browser, config['team'], config['year'])
def test_credentials(credentials):
assert credentials['username'] is not None
def test_iGEM_login(credentials, config, caplog):
# Login for the first time
assert iGEM_login(pytest.browser, credentials, config)
assert 'Successfully logged in' in caplog.text
def test_check_login_after(credentials, config, caplog):
# Check that once we're logged in, it doesn't login again
assert iGEM_login(pytest.browser, credentials, config)
assert 'Already logged in' in caplog.text
def test_iGEM_upload_page(config, caplog):
# Read file
with open('tests/data/Test/html/raw.html') as file:
contents = file.read()
# Add a random string
# to check that the modified data is uploaded everytime
contents += '\nRandom string for confirmation: '
contents += ''.join(random.choices(string.ascii_uppercase + string.digits, k=10))
# Generate URL
html_file = HTMLfile('Test/html/raw.html', config)
url = html_file.upload_URL
raw_URL = html_file.raw_URL
print(raw_URL)
# Upload contents
assert iGEM_upload_page(pytest.browser, contents, url)
response = requests.get(raw_URL)
assert md5hash_string(contents) == md5hash_string(response.text)
def test_iGEM_upload_file(config):
file_object = OtherFile('assets/img/test.jpg', config)
iGEM_upload_file(pytest.browser, file_object, config['year'])
url = "https://2020.igem.org/wiki/images/5/57/T--BITSPilani-Goa_India--img--test.jpg"
assert file_object.md5_hash == md5hash_file(url)
def test_iGEM_login_invalid_username(credentials, config, caplog):
credentials['username'] = 'helloinvalidusername'
browser = mechanicalsoup.StatefulBrowser()
assert not iGEM_login(browser, credentials, config)
assert 'username is invalid' in caplog.text
def test_iGEM_login_invalid_password(credentials, config, caplog):
credentials['password'] = '<PASSWORD>'
browser = mechanicalsoup.StatefulBrowser()
assert not iGEM_login(browser, credentials, config)
assert 'the password is not' in caplog.text
| StarcoderdataPython |
57179 | # coding: utf-8
"""
Utilities to handle mongoengine classes and connections.
"""
import contextlib
from pymatgen.util.serialization import pmg_serialize
from monty.json import MSONable
from mongoengine import connect
from mongoengine.context_managers import switch_collection
from mongoengine.connection import DEFAULT_CONNECTION_NAME
class DatabaseData(MSONable):
"""
Basic class providing data to connect to a collection in the database and switching to that collection.
Wraps mongoengine's connect and switch_collection functions.
"""
def __init__(self, database, host=None, port=None, collection=None, username=None, password=None):
"""
Args:
database: name of the database
host: the host name of the mongod instance to connect to
port: the port that the mongod instance is running on
collection: name of the collection
username: username to authenticate with
password: <PASSWORD>
"""
#TODO handle multiple collections?
# note: if making collection a list (or a dict), make it safe for mutable default arguments, otherwise there
# will probably be problems with the switch_collection
self.database = database
self.host = host
self.port = port
self.collection = collection
self.username = username
self.password = password
@classmethod
def from_dict(cls, d):
d = d.copy()
d.pop("@module", None)
d.pop("@class", None)
return cls(**d)
@pmg_serialize
def as_dict(self):
return dict(database=self.database, host=self.host, port=self.port, collection=self.collection,
username=self.username, password=self.password)
@pmg_serialize
def as_dict_no_credentials(self):
return dict(database=self.database, host=self.host, port=self.port, collection=self.collection)
def connect_mongoengine(self, alias=DEFAULT_CONNECTION_NAME):
"""
Open the connection to the selected database
"""
return connect(db=self.database, host=self.host, port=self.port, username=self.username,
password=self.password, alias=alias)
@contextlib.contextmanager
def switch_collection(self, cls):
"""
Switches to the chosen collection using Mongoengine's switch_collection.
"""
if self.collection:
with switch_collection(cls, self.collection) as new_cls:
yield new_cls
else:
yield cls
| StarcoderdataPython |
3369999 | __all__ = ["graphic", "play", "sound"]
from . import graphic
from . import play
from . import sound
| StarcoderdataPython |
1708510 | <filename>encore/events/tests/test_event_manager.py
#
# (C) Copyright 2011 Enthought, Inc., Austin, TX
# All right reserved.
#
# This file is open source software distributed according to the terms in LICENSE.txt
#
# Standard library imports.
import unittest
import mock
import weakref
import threading
# Local imports.
from encore.events.event_manager import EventManager, BaseEvent
from encore.events.api import (get_event_manager, set_event_manager,
BaseEventManager)
import encore.events.package_globals as package_globals
class TestEventManager(unittest.TestCase):
def setUp(self):
self.evt_mgr = EventManager()
def test_register(self):
""" Test if event is successfully registered.
"""
self.evt_mgr.register(BaseEvent)
self.assertTrue(BaseEvent in self.evt_mgr.get_event())
def test_emit(self):
""" Test if events are succesfully emitted.
"""
callback = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback)
evt1 = BaseEvent()
self.evt_mgr.emit(evt1)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args, ((evt1, ), {}))
callback2 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback2)
evt2 = BaseEvent()
self.evt_mgr.emit(evt2)
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback.call_args, ((evt2, ), {}))
self.assertEqual(callback2.call_count, 1)
self.assertEqual(callback2.call_args, ((evt2, ), {}))
# Exceptions in listeners should still propagate events.
def callback3(evt):
raise RuntimeError('i\'m just like this')
callback3 = mock.Mock(wraps=callback3)
callback4 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback3)
self.evt_mgr.connect(BaseEvent, callback4)
evt3 = BaseEvent()
self.evt_mgr.emit(evt3)
self.assertEqual(callback.call_count, 3)
self.assertEqual(callback2.call_count, 2)
self.assertEqual(callback3.call_count, 1)
self.assertEqual(callback4.call_count, 1)
def test_connect(self):
""" Test if adding connections works.
"""
callback = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback)
self.assertEqual(
list(self.evt_mgr.get_listeners(BaseEvent)), [callback])
callback2 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback2)
self.assertEqual(
list(self.evt_mgr.get_listeners(BaseEvent)),
[callback, callback2])
def test_listeners(self):
""" Test if correct listeners are returned.
"""
self.assertEqual(list(self.evt_mgr.get_listeners(BaseEvent)), [])
class MyEvt(BaseEvent):
def __init__(self, name=1):
super(MyEvt, self).__init__()
self.name = name
def callback_bound(self, evt):
pass
def callback_unbound(self):
pass
callback = mock.Mock()
obj = MyEvt()
self.evt_mgr.connect(BaseEvent, callback)
self.evt_mgr.connect(MyEvt, MyEvt.callback_unbound)
self.evt_mgr.connect(MyEvt, obj.callback_bound)
self.assertEqual(
list(self.evt_mgr.get_listeners(MyEvt)),
[callback, MyEvt.callback_unbound, obj.callback_bound])
callback2 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback2, filter={'name': 0})
# get listeners with filtering
self.assertEqual(
list(self.evt_mgr.get_listeners(MyEvt(0))),
[callback, MyEvt.callback_unbound, obj.callback_bound, callback2])
self.assertEqual(
list(self.evt_mgr.get_listeners(MyEvt(1))),
[callback, MyEvt.callback_unbound, obj.callback_bound])
def test_disconnect(self):
""" Test if disconnecting listeners works.
"""
callback = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback)
evt1 = BaseEvent()
self.evt_mgr.emit(evt1)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args, ((evt1, ), {}))
self.evt_mgr.disconnect(BaseEvent, callback)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args, ((evt1, ), {}))
def test_disable(self):
""" Test if temporarily disabling an event works.
"""
class MyEvt(BaseEvent):
def __init__(self):
super(MyEvt, self).__init__()
callback = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback)
callback2 = mock.Mock()
self.evt_mgr.connect(MyEvt, callback2)
evt1 = BaseEvent()
self.evt_mgr.emit(evt1)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback.call_args, ((evt1, ), {}))
# Disabling BaseEvent.
self.evt_mgr.disable(BaseEvent)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 1)
# Disabling BaseEvent should also disable MyEvt.
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 0)
# Reenabling BaseEvent should fire notifications.
self.evt_mgr.enable(BaseEvent)
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback2.call_count, 1)
# Disabling MyEvt should not disable BaseEvent but only MyEvt.
self.evt_mgr.disable(MyEvt)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 3)
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 3)
self.assertEqual(callback2.call_count, 1)
# Reenabling MyEvent should notify callback2.
self.evt_mgr.enable(MyEvt)
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 4)
self.assertEqual(callback2.call_count, 2)
# Test for disable before any method is registered.
class MyEvt2(BaseEvent):
pass
self.evt_mgr.disable(MyEvt2)
callback = mock.Mock()
self.evt_mgr.connect(MyEvt2, callback)
self.evt_mgr.emit(MyEvt2())
self.assertFalse(callback.called)
def test_mark_as_handled(self):
""" Test if mark_as_handled() works.
"""
class MyEvent(BaseEvent):
def __init__(self, veto=False):
super(MyEvent, self).__init__()
self.veto = veto
def callback(evt):
if evt.veto:
evt.mark_as_handled()
callback = mock.Mock(wraps=callback)
self.evt_mgr.connect(MyEvent, callback, priority=2)
callback2 = mock.Mock()
self.evt_mgr.connect(MyEvent, callback2, priority=1)
evt1 = MyEvent()
self.evt_mgr.emit(evt1)
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 1)
evt2 = MyEvent(veto=True)
self.evt_mgr.emit(evt2)
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback.call_args, ((evt2, ), {}))
self.assertEqual(callback2.call_count, 1)
self.assertEqual(callback2.call_args, ((evt1, ), {}))
def test_filtering(self):
""" Test if event filtering on arguments works.
"""
depth = 5
class A(object):
count = depth
def __init__(self):
A.count -= 1
if A.count:
self.a = A()
else:
self.a = 0
class MyEvent(BaseEvent):
def __init__(self, prop1="f0", prop2=True, prop3=None):
super(MyEvent, self).__init__()
self.prop1 = prop1
self.prop2 = prop2
self.prop3 = prop3
callbacks = [mock.Mock() for i in range(8)]
self.evt_mgr.connect(MyEvent, callbacks[0])
self.evt_mgr.connect(MyEvent, callbacks[1], filter={'prop1': 'f2'})
self.evt_mgr.connect(MyEvent, callbacks[2], filter={'prop2': False})
self.evt_mgr.connect(
MyEvent, callbacks[3], filter={'prop3': BaseEvent})
self.evt_mgr.connect(
MyEvent, callbacks[4], filter={'prop1': 'f2',
'prop2': False})
self.evt_mgr.connect(MyEvent, callbacks[5], filter={'prop1.real': 0})
self.evt_mgr.connect(
MyEvent, callbacks[6], filter={'prop1.a.a.a.a.a': 0})
self.evt_mgr.connect(
MyEvent, callbacks[7], filter={'prop1.a.a.a.a': 0})
def check_count(evt, *counts):
self.evt_mgr.emit(evt)
for callback, count in zip(callbacks, counts):
self.assertEqual(callback.call_count, count)
# Notify only 0,1
check_count(MyEvent(prop1='f2'), 1, 1, 0, 0, 0, 0, 0, 0)
# Notify only 0, 1, 2, 4
check_count(MyEvent(prop1='f2', prop2=False), 2, 2, 1, 0, 1, 0, 0, 0)
# Notify only 0, 3
check_count(MyEvent(prop3=BaseEvent), 3, 2, 1, 1, 1, 0, 0, 0)
# Notify only 0; (extended filter fail on AttributeError for 5)
check_count(MyEvent(prop1=1), 4, 2, 1, 1, 1, 0, 0, 0)
# Notify only 0 and 5 (extended attribute filter)
check_count(MyEvent(prop1=1j), 5, 2, 1, 1, 1, 1, 0, 0)
# Notify only 0 and 5 (extended attribute filter)
check_count(MyEvent(prop1=A()), 6, 2, 1, 1, 1, 1, 1, 0)
def test_exception(self):
""" Test if exception in handler causes subsequent notifications.
"""
class MyEvt(BaseEvent):
def __init__(self, err=False):
super(MyEvt, self).__init__()
self.err = err
def callback(evt):
if evt.err:
raise Exception('you did it')
callback = mock.Mock(wraps=callback)
self.evt_mgr.connect(MyEvt, callback)
callback2 = mock.Mock()
self.evt_mgr.connect(MyEvt, callback2)
self.evt_mgr.emit(MyEvt(err=False))
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 1)
self.evt_mgr.emit(MyEvt(err=True))
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback2.call_count, 2)
def test_priority(self):
""" Test if setting priority of handlers works.
"""
class Callback(object):
calls = []
def __init__(self, name):
self.name = name
def __call__(self, evt):
self.calls.append(self.name)
callback = mock.Mock(wraps=Callback(name=1))
self.evt_mgr.connect(BaseEvent, callback, priority=1)
callback2 = mock.Mock(wraps=Callback(name=2))
self.evt_mgr.connect(BaseEvent, callback2, priority=2)
callback3 = mock.Mock(wraps=Callback(name=3))
self.evt_mgr.connect(BaseEvent, callback3, priority=0)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 1)
self.assertEqual(callback3.call_count, 1)
self.assertEqual(Callback.calls, [2, 1, 3])
def test_subclass(self):
""" Test if subclass event notifies superclass listeners.
Cases to test:
1. subclass event should notify superclass listeners
even when the subclass event is not registered/connected
even when the superclass event is added before/after subclass
2. superclass event should not notify subclass listeners
"""
class MyEvt(BaseEvent):
pass
class MyEvt2(MyEvt):
pass
callback = mock.Mock()
callback2 = mock.Mock()
self.evt_mgr.connect(MyEvt, callback)
self.evt_mgr.connect(MyEvt2, callback2)
# No callback called on BaseEvent
self.evt_mgr.emit(BaseEvent())
self.assertEqual(callback.call_count, 0)
self.assertEqual(callback2.call_count, 0)
# Only callback called on MyEvt
self.evt_mgr.emit(MyEvt())
self.assertEqual(callback.call_count, 1)
self.assertEqual(callback2.call_count, 0)
# Both callbacks called on MyEvt2
self.evt_mgr.emit(MyEvt2())
self.assertEqual(callback.call_count, 2)
self.assertEqual(callback2.call_count, 1)
# Add a new subclass event
class MyEvt3(MyEvt2):
pass
# Subclass event not registered
# Both callbacks called on MyEvt3
self.evt_mgr.emit(MyEvt3())
self.assertEqual(callback.call_count, 3)
self.assertEqual(callback2.call_count, 2)
def test_event_hierarchy(self):
""" Test whether the correct hierarchy of event classes is returned.
"""
class MyEvt(BaseEvent):
pass
class MyEvt2(MyEvt):
pass
class MyEvt3(MyEvt):
pass
class MyEvt4(MyEvt2, MyEvt3):
pass
self.assertEqual(
self.evt_mgr.get_event_hierarchy(BaseEvent), (BaseEvent, ))
self.assertEqual(
self.evt_mgr.get_event_hierarchy(MyEvt), (MyEvt, BaseEvent))
self.assertEqual(
self.evt_mgr.get_event_hierarchy(MyEvt2),
(MyEvt2, MyEvt, BaseEvent))
self.assertEqual(
self.evt_mgr.get_event_hierarchy(MyEvt3),
(MyEvt3, MyEvt, BaseEvent))
self.assertEqual(
self.evt_mgr.get_event_hierarchy(MyEvt4),
(MyEvt4, MyEvt2, MyEvt3, MyEvt, BaseEvent))
def test_prepost_emit(self):
""" Test whether pre/post methods of event are called correctly on emit.
"""
call_seq = []
class MyEvt(BaseEvent):
def pre_emit(self):
call_seq.append(0)
def post_emit(self):
call_seq.append(2)
def callback(evt):
call_seq.append(1)
evt = MyEvt()
self.evt_mgr.connect(BaseEvent, callback)
self.evt_mgr.emit(evt)
self.assertEqual(call_seq, list(range(3)))
def test_reentrant_disconnect_emit(self):
""" Test listener is called even if it is disconnected before notify.
"""
data = []
def callback(evt):
data.append(0)
self.evt_mgr.disconnect(BaseEvent, callback2)
self.evt_mgr.disconnect(BaseEvent, callback3)
data.append(1)
def callback2(evt):
data.append(2)
def callback3(evt):
data.append(3)
self.evt_mgr.connect(BaseEvent, callback)
self.evt_mgr.connect(BaseEvent, callback2)
self.evt_mgr.connect(BaseEvent, callback3)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [0, 1, 2, 3])
def test_lambda_connect(self):
""" Test if lambda functions w/o references are not garbage collected.
"""
data = []
self.evt_mgr.connect(BaseEvent, lambda evt: data.append(1))
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [1])
def test_method_weakref(self):
""" Test if methods do not prevent garbage collection of objects.
"""
data = []
class MyHeavyObject(object):
def callback(self, evt):
data.append(1)
obj = MyHeavyObject()
obj_wr = weakref.ref(obj)
self.evt_mgr.connect(BaseEvent, obj.callback)
del obj
# Now there should be no references to obj.
self.assertEqual(obj_wr(), None)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [])
def test_method_call(self):
""" Test if instance methods are called.
"""
data = []
class MyHeavyObject(BaseEvent):
def callback(self, evt):
data.append(1)
def callback_unbound(self):
data.append(2)
obj = MyHeavyObject()
obj_wr = weakref.ref(obj)
self.evt_mgr.connect(BaseEvent, obj.callback)
self.evt_mgr.connect(BaseEvent, MyHeavyObject.callback_unbound)
self.assertTrue(obj_wr() is not None)
self.evt_mgr.emit(obj)
self.assertEqual(data, [1, 2])
def test_method_collect(self):
""" Test if object garbage collection disconnects listener method.
"""
data = []
class MyHeavyObject(object):
def callback(self, evt):
data.append(1)
obj = MyHeavyObject()
obj_wr = weakref.ref(obj)
self.evt_mgr.connect(BaseEvent, obj.callback)
del obj
# Now there should be no references to obj.
self.assertEqual(obj_wr(), None)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [])
self.assertEqual(len(list(self.evt_mgr.get_listeners(BaseEvent))), 0)
def test_method_disconnect(self):
""" Test if method disconnect works.
"""
data = []
class MyHeavyObject(object):
def callback(self, evt):
data.append(1)
obj = MyHeavyObject()
obj_wr = weakref.ref(obj)
self.evt_mgr.connect(BaseEvent, obj.callback)
self.evt_mgr.disconnect(BaseEvent, obj.callback)
del obj
# Now there should be no references to obj.
self.assertEqual(obj_wr(), None)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [])
def test_method_disconnect2(self):
""" Test if method disconnect on unconnected method fails.
"""
data = []
class MyHeavyObject(object):
def callback(self, evt):
data.append(1)
def callback2(self, evt):
data.append(2)
obj = MyHeavyObject()
self.evt_mgr.connect(BaseEvent, obj.callback)
with self.assertRaises(Exception):
self.evt_mgr.disconnect(BaseEvent, obj.callback2)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(data, [1])
def test_no_block(self):
""" Test if non-blocking emit works.
"""
data = []
lock = threading.Lock()
lock.acquire()
def callback(evt):
# callback will wait until lock is released.
with lock:
data.append('callback')
data.append(threading.current_thread().name)
self.evt_mgr.connect(BaseEvent, callback)
t = self.evt_mgr.emit(BaseEvent(), block=False)
# The next statement will be executed before callback returns.
data.append('main')
# Unblock the callback to proceed.
lock.release()
# Wait until the event handling finishes.
t.join()
data.append('main2')
self.assertEqual(len(data), 4)
self.assertEqual(data[0], 'main')
self.assertEqual(data[1], 'callback')
self.assertEqual(data[3], 'main2')
def test_reentrant_emit(self):
""" Test if reentrant emit works. """
data = []
class MyEvt(BaseEvent):
pass
class MyEvt2(BaseEvent):
pass
def callback(evt):
typ = type(evt)
data.append(typ)
if typ == MyEvt:
self.evt_mgr.emit(MyEvt2())
data.append(typ)
self.evt_mgr.connect(MyEvt, callback)
self.evt_mgr.connect(MyEvt2, callback)
self.evt_mgr.emit(MyEvt())
self.assertEqual(data, [MyEvt, MyEvt2, MyEvt2, MyEvt])
def test_reconnect(self):
""" Test reconnecting already connected listener. """
calls = []
def callback1(evt):
calls.append(1)
def callback2(evt):
calls.append(2)
# Test if reconnect disconnects previous.
self.evt_mgr.connect(BaseEvent, callback1)
self.evt_mgr.connect(BaseEvent, callback1)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(calls, [1])
calls[:] = []
# Test if sequence is changed.
self.evt_mgr.connect(BaseEvent, callback2)
self.evt_mgr.connect(BaseEvent, callback1)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(calls, [2, 1])
calls[:] = []
def test_global_event_manager(self):
""" Test if getting/setting global event manager works. """
evt_mgr = get_event_manager()
self.assertIsInstance(evt_mgr, BaseEventManager)
# Reset the global event_manager
package_globals._event_manager = None
set_event_manager(self.evt_mgr)
self.assertEqual(self.evt_mgr, get_event_manager())
self.assertRaises(ValueError, lambda: set_event_manager(evt_mgr))
class TracingTests(unittest.TestCase):
def setUp(self):
self.evt_mgr = EventManager()
self.traces = []
self.tracedict = {}
self.veto_condition = None
def trace_func(self, name, method, args):
""" Trace function for event manager. """
save = (name, method, args)
self.traces.append(save)
self.tracedict.setdefault(name, []).append(save)
def trace_func_veto(self, name, method, args):
""" Trace function to veto actions. """
self.trace_func(name, method, args)
if self.veto_condition is None or self.veto_condition(name, method,
args):
return True
def test_set_trace(self):
""" Test whether setting trace method works. """
self.evt_mgr.set_trace(self.trace_func)
self.evt_mgr.emit(BaseEvent())
self.assertTrue(len(self.traces), 1)
self.evt_mgr.set_trace(None)
self.evt_mgr.emit(BaseEvent())
self.assertTrue(len(self.traces), 1)
def test_trace_emit(self):
""" Test whether trace works for all actions. """
self.evt_mgr.set_trace(self.trace_func)
self.evt_mgr.emit(BaseEvent())
self.assertTrue(len(self.traces), 1)
self.assertEqual(len(self.tracedict['emit']), 1)
callback1 = mock.Mock()
self.evt_mgr.connect(BaseEvent, callback1)
self.assertTrue(len(self.traces), 2)
self.assertEqual(len(self.tracedict['connect']), 1)
self.evt_mgr.emit(BaseEvent())
self.assertTrue(len(self.traces), 4)
self.assertEqual(len(self.tracedict['emit']), 2)
self.assertEqual(len(self.tracedict['listen']), 1)
self.assertEqual(callback1.call_count, 1)
self.evt_mgr.disconnect(BaseEvent, callback1)
self.assertTrue(len(self.traces), 5)
self.assertEqual(len(self.tracedict['disconnect']), 1)
self.assertEqual(callback1.call_count, 1)
def test_trace_veto(self):
""" Test whether vetoing of actions works. """
callback1 = mock.Mock()
callback2 = mock.Mock()
self.evt_mgr.set_trace(self.trace_func_veto)
self.evt_mgr.connect(BaseEvent, callback1)
self.evt_mgr.emit(BaseEvent())
self.assertTrue(len(self.traces), 2)
self.assertEqual(len(self.tracedict['connect']), 1)
self.assertEqual(len(self.tracedict['emit']), 1)
self.assertEqual(len(self.tracedict.get('listen', [])), 0)
self.assertFalse(callback1.called)
# Disable calling of callback1
self.veto_condition = lambda name, method, args: name == 'listen' and method == callback1
self.evt_mgr.connect(BaseEvent, callback1)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(len(self.tracedict['connect']), 2)
self.assertEqual(len(self.tracedict['emit']), 2)
self.assertEqual(len(self.tracedict['listen']), 1)
self.assertFalse(callback1.called)
# Ensure callback2 is still called.
self.evt_mgr.connect(BaseEvent, callback2)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(len(self.tracedict['connect']), 3)
self.assertEqual(len(self.tracedict['emit']), 3)
self.assertEqual(len(self.tracedict['listen']), 3)
self.assertFalse(callback1.called)
self.assertTrue(callback2.called)
# Disable tracing.
self.evt_mgr.set_trace(None)
self.evt_mgr.emit(BaseEvent())
self.assertEqual(len(self.tracedict['connect']), 3)
self.assertEqual(len(self.tracedict['emit']), 3)
self.assertEqual(len(self.tracedict['listen']), 3)
self.assertTrue(callback1.called)
self.assertEqual(callback2.call_count, 2)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3296894 | <filename>utils/confmodif.py
import os
def conf_file_modify(pop):
config_file = open(os.path.join("utils", "config-feedforward.txt"), "w")
config_file.write("[NEAT]\n")
config_file.write("fitness_criterion = max\n")
config_file.write("fitness_threshold = 50\n")
config_file.write("pop_size = " + str(pop) + "\n")
config_file.write("reset_on_extinction = False\n\n")
config_file.write("[DefaultGenome]\n")
config_file.write("# node activation options\n")
config_file.write("activation_default = tanh\n")
config_file.write("activation_mutate_rate = 0.0\n")
config_file.write("activation_options = tanh\n\n")
config_file.write("# node aggregation options\n")
config_file.write("aggregation_default = sum\n")
config_file.write("aggregation_mutate_rate = 0.0\n")
config_file.write("aggregation_options = sum\n\n")
config_file.write("# node bias options\n")
config_file.write("bias_init_mean = 0.0\n")
config_file.write("bias_init_stdev = 1.0\n")
config_file.write("bias_max_value = 30.0\n")
config_file.write("bias_min_value = -30.0\n")
config_file.write("bias_mutate_power = 0.5\n")
config_file.write("bias_mutate_rate = 0.7\n")
config_file.write("bias_replace_rate = 0.1\n\n")
config_file.write("# genome compatibility options\n")
config_file.write("compatibility_disjoint_coefficient = 1.0\n")
config_file.write("compatibility_weight_coefficient = 0.5\n\n")
config_file.write("# connection add/remove rates\n")
config_file.write("conn_add_prob = 0.5\n")
config_file.write("conn_delete_prob = 0.5\n\n")
config_file.write("# connection enable options\n")
config_file.write("enabled_default = True\n")
config_file.write("enabled_mutate_rate = 0.01\n\n")
config_file.write("feed_forward = True\n")
config_file.write("initial_connection = full\n\n")
config_file.write("# node add/remove rates\n")
config_file.write("node_add_prob = 0.2\n")
config_file.write("node_delete_prob = 0.2\n\n")
config_file.write("# network parameters\n")
config_file.write("num_hidden = 0\n")
config_file.write("num_inputs = 3\n")
config_file.write("num_outputs = 1\n\n")
config_file.write("# node response options\n")
config_file.write("response_init_mean = 1.0\n")
config_file.write("response_init_stdev = 0.0\n")
config_file.write("response_max_value = 30.0\n")
config_file.write("response_min_value = -30.0\n")
config_file.write("response_mutate_power = 0.0\n")
config_file.write("response_mutate_rate = 0.0\n")
config_file.write("response_replace_rate = 0.0\n\n")
config_file.write("# connection weight options\n")
config_file.write("weight_init_mean = 0.0\n")
config_file.write("weight_init_stdev = 1.0\n")
config_file.write("weight_max_value = 30\n")
config_file.write("weight_min_value = -30\n")
config_file.write("weight_mutate_power = 0.5\n")
config_file.write("weight_mutate_rate = 0.8\n")
config_file.write("weight_replace_rate = 0.1\n\n")
config_file.write("[DefaultSpeciesSet]\n")
config_file.write("compatibility_threshold = 3.0\n\n")
config_file.write("[DefaultStagnation]\n")
config_file.write("species_fitness_func = max\n")
config_file.write("max_stagnation = 20\n")
config_file.write("species_elitism = 2\n\n")
config_file.write("[DefaultReproduction]\n")
config_file.write("elitism = 2\n")
config_file.write("survival_threshold = 0.2\n") | StarcoderdataPython |
4832477 | <reponame>sm2774us/amazon_interview_prep_2021<filename>solutions/python3/738.py<gh_stars>10-100
class Solution:
def monotoneIncreasingDigits(self, N):
"""
:type N: int
:rtype: int
"""
n, pos = str(N), 0
for i, char in enumerate(n):
if i>0 and int(n[i])<int(n[i-1]): return int("".join(n[:pos])+str(int(n[pos])-1)+"9"*(len(n)-1-pos)) if int(n[pos])>1 else int("9"*(len(n)-1-pos))
elif i>0 and n[i] != n[i-1]: pos = i
return N | StarcoderdataPython |
1721554 | <filename>wagtail/contrib/simple_translation/tests/test_forms.py
from django.forms import CheckboxInput, HiddenInput
from django.test import TestCase, override_settings
from wagtail.contrib.simple_translation.forms import SubmitTranslationForm
from wagtail.core.models import Locale, Page
from wagtail.tests.i18n.models import TestPage
from wagtail.tests.utils import WagtailTestUtils
@override_settings(
LANGUAGES=[
("en", "English"),
("fr", "French"),
("de", "German"),
],
WAGTAIL_CONTENT_LANGUAGES=[
("en", "English"),
("fr", "French"),
("de", "German"),
],
)
class TestSubmitPageTranslation(WagtailTestUtils, TestCase):
def setUp(self):
self.en_locale = Locale.objects.first()
self.fr_locale = Locale.objects.create(language_code="fr")
self.de_locale = Locale.objects.create(language_code="de")
self.en_homepage = Page.objects.get(depth=2)
self.fr_homepage = self.en_homepage.copy_for_translation(self.fr_locale)
self.de_homepage = self.en_homepage.copy_for_translation(self.de_locale)
self.en_blog_index = TestPage(title="Blog", slug="blog")
self.en_homepage.add_child(instance=self.en_blog_index)
self.en_blog_post = TestPage(title="Blog post", slug="blog-post")
self.en_blog_index.add_child(instance=self.en_blog_post)
def test_include_subtree(self):
form = SubmitTranslationForm(instance=self.en_blog_post)
self.assertIsInstance(form.fields["include_subtree"].widget, HiddenInput)
form = SubmitTranslationForm(instance=self.en_blog_index)
self.assertIsInstance(form.fields["include_subtree"].widget, CheckboxInput)
self.assertEqual(
form.fields["include_subtree"].label, "Include subtree (1 page)"
)
form = SubmitTranslationForm(instance=self.en_homepage)
self.assertEqual(
form.fields["include_subtree"].label, "Include subtree (2 pages)"
)
def test_locales_queryset(self):
# Homepage is translated to all locales.
form = SubmitTranslationForm(instance=self.en_homepage)
self.assertEqual(
list(
form.fields["locales"].queryset.values_list("language_code", flat=True)
),
[],
)
# Blog index can be translated to `de` and `fr`.
form = SubmitTranslationForm(instance=self.en_blog_index)
self.assertEqual(
list(
form.fields["locales"].queryset.values_list("language_code", flat=True)
),
["de", "fr"],
)
# Blog post can be translated to `de` and `fr`.
form = SubmitTranslationForm(instance=self.en_blog_post)
self.assertEqual(
list(
form.fields["locales"].queryset.values_list("language_code", flat=True)
),
["de", "fr"],
)
def test_select_all(self):
form = SubmitTranslationForm(instance=self.en_homepage)
# Homepage is translated to all locales.
self.assertIsInstance(form.fields["select_all"].widget, HiddenInput)
form = SubmitTranslationForm(instance=self.en_blog_index)
# Blog post can be translated to `de` and `fr`.
self.assertIsInstance(form.fields["select_all"].widget, CheckboxInput)
| StarcoderdataPython |
108256 | import numpy as np
import random
import time
from sudoku.node import Node
class Sudoku():
def __init__(self, size=9, custom=None, verbose=False, debug=False):
# assume size is perfect square (TODO: assert square)
# size is defined as the length of one side
"""
Custom should be a list of lists containing each row of the sudoku.
Empty spots should be represented by a 0.
"""
self.verbose = verbose
self.debug = debug
self.size = size
self._tilesize = int(np.sqrt(size))
initstart = time.time()
self.nodes, self._rows, self._cols, self._tiles = self.initnodes()
self.connect_nodes()
after_init = time.time() - initstart
self.print(f'Node initialisation took {after_init}s')
if custom is not None:
startcustom = time.time()
self.fillgrid(custom)
self.print(f'Loading custom input took {time.time() - startcustom}s')
def get_all_rows(self):
return self._rows
def get_row(self, row):
return self._rows[row]
def get_col(self, col):
return self._cols[col]
def get_tile(self, tile):
return self._tiles[tile]
def initnodes(self):
nodes, rows, cols, tiles = [], [[] for _ in range(self.size)], [[] for _ in range(self.size)], [[] for _ in range(self.size)]
for row in range(self.size):
for col in range(self.size):
node = Node(row, col)
nodes.append(node)
rows[row].append(node)
cols[col].append(node)
# Tiles are for example the 3*3 squares in default sudoku
tilenr = self.calculate_tile(row, col)
tiles[tilenr].append(node)
return nodes, rows, cols, tiles
def calculate_tile(self, row, col):
tilerow = row // self._tilesize
tilecol = col // self._tilesize
return tilerow * self._tilesize + tilecol
def connect_nodes(self):
for node in self.nodes:
for connected_node in self.get_row(node.row) + self.get_col(node.col) + self.get_tile(self.calculate_tile(node.row, node.col)):
node.connected_nodes.add(connected_node)
node.connected_nodes -= set([node])
def fillgrid(self, custom):
try:
for i, row in enumerate(self._rows):
for j, node in enumerate(row):
if custom[i][j] != 0:
node.original = True
node.value = custom[i][j]
except IndexError:
raise IndexError("Custom sudoku layout was not of the right format!")
except Exception as e: # Other error, just raise
raise e
self.print("Custom input submitted and processed:")
self.print(self)
@property
def empty(self):
empty = 0
for node in self.nodes:
if node.value == 0:
empty += 1
self.print(f'{empty} empty values')
return empty
@property
def is_valid(self):
for node in self.nodes:
if not node.is_valid:
return False
return True
def print(self, msg):
if self.verbose:
print(msg)
def equals(self, other):
try:
for i, row in enumerate(self._rows):
for j, node in enumerate(row):
if not node.equals(other.get_row(i)[j]):
return False
except Exception:
return False
return True
def __eq__(self, other):
if not isinstance(other, Sudoku):
return False
return self.equals(other)
def __ne__(self, other):
if not isinstance(other, Sudoku):
return False
return not self.equals(other)
def copy(self):
"""
Returns new sudoku instance with new nodes containing the same values.
"""
custom_input = [[node.value for node in row] for row in self._rows]
self.print('Copying data into new Sudoku.')
newSudoku = Sudoku(size=self.size, custom=custom_input, verbose=self.verbose)
self.print('Verifying data of new Sudoku.')
# Check for original
for node in self.nodes:
for newnode in newSudoku.nodes:
if node.equals(newnode):
newnode.original = node.original
self.print('Data verified.\n')
return newSudoku
def get_options(self, node):
return list(set([i for i in range(1, self.size + 1)]) - node.get_neighbor_values())
def __str__(self):
result = ""
for row in self._rows:
result += str([node.value for node in row]) + '\n'
return result
def solve_smart(self, returnBranching=False, test_unique=False):
to_solve = self.copy()
# This needs to be an object to be easily modified in executeFill
unique = {'solved_once': False} # Used in testing uniqueness
def gather_best_node(sudoku):
"""
Searches nodes with least amount of options, selects one randomly
"""
best_nodes = []
current_min_options = sudoku.size
# Gather a list of nodes with the least
for node in sudoku.nodes:
if not node.value == 0:
continue
options = sudoku.get_options(node)
if len(options) < current_min_options:
# New best node found
best_nodes = [node]
current_min_options = len(options)
elif len(options) == current_min_options:
best_nodes.append(node)
return random.choice(best_nodes) if len(best_nodes) != 0 else None
def executeFill(depth=0):
if self.debug and depth % 50 == 0 and depth != 0:
to_solve.print(f'On rec depth {depth}')
to_solve.print(to_solve)
node = gather_best_node(to_solve)
if node is None:
return {'result': True, 'branchfactor': 1}
options = to_solve.get_options(node)
random.shuffle(options)
branch = 1 # for detetermining branch factor (difficulty)
for option in options:
node.value = option
results = executeFill(depth=depth + 1)
if results['result']:
if test_unique and unique['solved_once']:
# not unique, return as a valid response
return {'result': True}
elif test_unique and not unique['solved_once']:
# first solution found, keep searching
# while keeping track of solution found
unique['solved_once'] = True
continue
else:
if returnBranching:
branch = (branch - 1)**2
branch += results['branchfactor'] # keeping summation going
return {'result': True, 'branchfactor': branch}
branch += 1
# base case
node.value = 0
return {'result': False}
queue = [node for node in to_solve.nodes if not node.original]
if len(queue) == 0:
# The sudoku was already completely full, check if valid or not
if not to_solve.is_valid:
to_solve.print("Given solution is not valid!")
to_solve.print(to_solve)
return False
else:
to_solve.print("Success! Given solution was valid!")
to_solve.print(to_solve)
return True
to_solve.print('Trying to fill board...')
starttime = time.time()
executionResults = executeFill()
interval = time.time() - starttime
to_solve.calculation_time = interval * 1000 # Calc_time in ms
if (not executionResults['result']) or (not to_solve.is_valid):
if test_unique and unique['solved_once']:
return True
to_solve.print("Unable to fill board!")
raise Exception("Unable to fill board!")
else: # Successfully filled the board!
if test_unique:
return not unique['solved_once']
branchingFactor = executionResults.get('branchfactor', None)
to_solve.print("Filled board!")
to_solve.print(f"\nSolution:\n{to_solve}")
to_solve.print(f"Solution found in {interval}s")
if returnBranching:
return to_solve, branchingFactor
return to_solve
@property
def is_unique(self):
return self.solve_smart(test_unique=True)
def _reset_random_node(self):
random.choice(self.nodes).value = 0
return True
def make_puzzle(self, diff=500, retry=5):
if not self.is_valid:
# Self is assumed to be a filled grid
raise ValueError('Sudoku should be a filled grid in order to make a puzzle.')
puzzle = self.copy()
cur_diff = 0
tries = 0
while diff > cur_diff:
prev_diff = cur_diff
prev_puzzle = puzzle.copy()
puzzle._reset_random_node()
if not puzzle.is_unique:
# Puzzle was not unique anymore: if too many retries, return previous iteration
tries += 1
if tries > retry:
puzzle.print('Retried too much!')
return prev_puzzle, prev_diff
else:
puzzle, cur_diff = prev_puzzle, prev_diff
else:
tries = 0
cur_diff = puzzle.estimate_difficulty(iterations=50)
# Sometimes difficulty lowers, only take max diff
if (cur_diff < prev_diff):
puzzle, cur_diff = prev_puzzle, prev_diff
return puzzle, cur_diff
def _diff_from_branching(self, branching):
return branching * 100 + self.empty
def estimate_difficulty(self, iterations=20):
total = 0
for i in range(iterations):
total += self._diff_from_branching(self.solve_smart(returnBranching=True)[1])
return int(total / iterations)
| StarcoderdataPython |
3289813 | from pyokofen.utils import (
OkofenDefinition,
OkofenDefinitionHelperMixin,
temperature_format,
)
class Sk(OkofenDefinitionHelperMixin, OkofenDefinition):
def __init__(self, data):
"""solar circuit data"""
cls = super()
cls.__init__("sk")
cls.set("L_koll_temp", temperature_format(data["L_koll_temp"])) # 405
cls.set("L_spu", temperature_format(data["L_spu"])) # 352
cls.set("L_pump", data["L_pump"]) # 0
cls.set("L_state", data["L_state"]) # 65536
cls.set("L_statetext", data["L_statetext"]) # Tmin PanSol pas atteinte
cls.set("mode", data["mode"]) # 1
cls.set("cooling", data["cooling"]) # 0
cls.set("spu_max", temperature_format(data["spu_max"])) # 800
cls.set("name", data["name"]) #
| StarcoderdataPython |
3331129 | <filename>scale/queue/migrations/0004_remove_queue_is_job_type_paused.py
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('queue', '0003_auto_20151023_1104'),
]
operations = [
migrations.RemoveField(
model_name='queue',
name='is_job_type_paused',
),
]
| StarcoderdataPython |
3314157 | """/**
* @author [<NAME>]
* @email [<EMAIL>]
* @create date 2020-08-13 13:31:28
* @modify date 2020-08-13 13:31:36
* @desc [
Contains:
- logger
- log decorator
- log_all function
Logger level is accessed through Lambda environment variable: log_level
Logger levels described below:
50 Critical
40 Error
30 Warning
20 Info
10 Debug
0 Notset
]
*/
"""
##########
# Imports
##########
from functools import wraps, partial
from logging import getLogger
import os
##########
# Logger
##########
logger = getLogger(__name__)
try:
log_level = int( os.environ['log_level']) ## Set in Lambda environment variable
except:
log_level = 10
logger.setLevel(log_level)
##########
# Decorator
##########
def log_func_name(func, *args, **kwargs):
"""Decorator to log.debug the function name.
"""
@wraps(func)
def func_name_wrap(*args, **kwargs):
logger.debug(f"FUNC: {func.__name__}")
return func(*args, **kwargs)
return func_name_wrap
##########
# Log_all func
##########
@log_func_name
def log_all(*args, log_level: int = 10) -> None:
"""Logs all arguements at log_level keyword."""
log_level_dict = {
10 : logger.debug,
20 : logger.info,
30 : logger.warning,
40 : logger.error,
}
log_type = log_level_dict[log_level]
for arg in args:
log_type(arg)
return
| StarcoderdataPython |
3287547 | <reponame>DrewLazzeriKitware/trame
import venv
from trame import update_state, change
from trame.html import vuetify, paraview
from trame.layouts import SinglePage
from paraview import simple
# -----------------------------------------------------------------------------
# ParaView code
# -----------------------------------------------------------------------------
DEFAULT_RESOLUTION = 6
cone = simple.Cone()
representation = simple.Show(cone)
view = simple.Render()
@change("resolution")
def update_cone(resolution, **kwargs):
cone.Resolution = resolution
html_view.update()
def update_reset_resolution():
update_state("resolution", DEFAULT_RESOLUTION)
# -----------------------------------------------------------------------------
# GUI
# -----------------------------------------------------------------------------
html_view = paraview.VtkRemoteView(view, ref="view")
layout = SinglePage("ParaView cone", on_ready=update_cone)
layout.logo.click = "$refs.view.resetCamera()"
layout.title.set_text("Cone Application")
with layout.toolbar:
vuetify.VSpacer()
vuetify.VSlider(
v_model=("resolution", DEFAULT_RESOLUTION),
min=3,
max=60,
step=1,
hide_details=True,
dense=True,
style="max-width: 300px",
)
vuetify.VDivider(vertical=True, classes="mx-2")
with vuetify.VBtn(icon=True, click=update_reset_resolution):
vuetify.VIcon("mdi-undo-variant")
with layout.content:
vuetify.VContainer(
fluid=True,
classes="pa-0 fill-height",
children=[html_view],
)
# -----------------------------------------------------------------------------
# Main
# -----------------------------------------------------------------------------
if __name__ == "__main__":
layout.start()
| StarcoderdataPython |
3248618 | import doctest
import functools
import numpy
import tensorflow as tf
def static_shape(tensor):
"""Get a static shape of a Tensor.
Args:
tensor: Tensor object.
Return:
List of int.
"""
return tf.convert_to_tensor(tensor).get_shape().as_list()
def static_shapes(*tensors):
"""Get static shapes of Tensors.
Args:
tensors: Tensor objects.
Returns:
List of list of int.
"""
return [static_shape(tensor) for tensor in tensors]
def static_rank(tensor):
"""Get a static rank of a Tensor.
Args:
tensor: Tensor object.
Returns:
int.
"""
return len(static_shape(tf.convert_to_tensor(tensor)))
def dtypes(*tensors):
"""Get data types of Tensors.
Args:
tensors: Tensor objects.
Returns:
DTypes of the Tensor objects.
"""
return [tensor.dtype for tensor in tensors]
def func_scope(name=None, initializer=None):
"""A function decorator to wrap a function in a variable scope.
Args:
name: Name of a variable scope, defaults to a name of a wrapped function.
initializer: Initializer for a variable scope.
Returns:
A decorated function.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with tf.variable_scope(name or func.__name__, initializer=initializer):
return func(*args, **kwargs)
return wrapper
return decorator
def on_device(device_name):
"""A function decorator to run everything in a function on a device.
Args:
device_name: Device name where every operations and variables in a function
are run.
Returns:
A decorated function.
"""
def decorator(func):
@functools.wraps(func)
def wrapper(*args, **kwargs):
with tf.device(device_name):
return func(*args, **kwargs)
return wrapper
return decorator
def dimension_indices(tensor, start=0):
"""Get dimension indices of a Tensor object.
An example below is hopefully comprehensive.
>>> dimension_indices(tf.constant([[1, 2], [3, 4]]))
[0, 1]
Args:
tensor: Tensor object.
start: The first index of output indices.
Returns:
List of dimension indices. For example, when a Tensor `x` is of rank 5,
`dimension_indices(x, 2) == [2, 3, 4]`.
"""
return [*range(static_rank(tensor))][start:]
@func_scope()
def dtype_min(dtype):
"""Get a minimum for a TensorFlow data type.
Args:
dtype: TensorFlow data type.
Returns:
A scalar minimum of the data type.
"""
return tf.constant(numpy.finfo(dtype.as_numpy_dtype).min)
@func_scope()
def dtype_epsilon(dtype):
"""Get a machine epsilon for a TensorFlow data type.
Args:
dtype: TensorFlow data type.
Returns:
A scalar machine epsilon of the data type.
"""
return tf.constant(_numpy_epsilon(dtype.as_numpy_dtype))
def _numpy_epsilon(dtype):
return numpy.finfo(dtype).eps
def flatten(tensor):
"""Flatten a multi-dimensional Tensor object.
Args:
tensor: Tensor object.
Returns:
Flattened Tensor object of a vector.
"""
return tf.reshape(tensor, [-1])
def rename(tensor, name):
"""Rename a Tensor.
Args:
tensor: Tensor object.
name: New name of the Tensor object.
Returns:
Renamed Tensor object.
"""
return tf.identity(tensor, name)
| StarcoderdataPython |
3344817 | import os
from setuptools import find_packages, setup
with open('README.rst') as fh:
readme = fh.read()
description = 'Girder Worker tasks for Large Image.'
long_description = readme
def prerelease_local_scheme(version):
"""
Return local scheme version unless building on master in CircleCI.
This function returns the local scheme version number
(e.g. 0.0.0.dev<N>+g<HASH>) unless building on CircleCI for a
pre-release in which case it ignores the hash and produces a
PEP440 compliant pre-release version number (e.g. 0.0.0.dev<N>).
"""
from setuptools_scm.version import get_local_node_and_date
if os.getenv('CIRCLE_BRANCH') in ('master', ):
return ''
else:
return get_local_node_and_date(version)
setup(
name='large-image-tasks',
use_scm_version={'root': '../..', 'local_scheme': prerelease_local_scheme},
setup_requires=['setuptools-scm'],
description=description,
long_description=long_description,
license='Apache Software License 2.0',
author='Kitware Inc',
author_email='<EMAIL>',
classifiers=[
'Development Status :: 5 - Production/Stable',
'License :: OSI Approved :: Apache Software License',
'Topic :: Scientific/Engineering',
'Intended Audience :: Science/Research',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: 3.9',
'Programming Language :: Python :: 3.10',
],
install_requires=[
# Packages required by both producer and consumer side installations
'girder-worker-utils>=0.8.5',
'importlib-metadata ; python_version < "3.8"',
],
extras_require={
'girder': [
# Dependencies required on the producer (Girder) side.
'large-image-converter',
'girder-worker[girder]>=0.6.0',
],
'worker': [
# Dependencies required on the consumer (Girder Worker) side.
'large-image-converter[sources]',
'girder-worker[worker]>=0.6.0',
],
},
python_requires='>=3.6',
entry_points={
'girder_worker_plugins': [
'large_image_tasks = large_image_tasks:LargeImageTasks',
]
},
packages=find_packages(),
)
| StarcoderdataPython |
3264039 | <reponame>lari/VWsFriend
import logging
from sqlalchemy import and_
from sqlalchemy.exc import IntegrityError
from vwsfriend.model.climatization import Climatization
from weconnect.addressable import AddressableLeaf
LOG = logging.getLogger("VWsFriend")
class ClimatizationAgent():
def __init__(self, session, vehicle):
self.session = session
self.vehicle = vehicle
self.climate = session.query(Climatization).filter(and_(Climatization.vehicle == vehicle,
Climatization.carCapturedTimestamp.isnot(None))) \
.order_by(Climatization.carCapturedTimestamp.desc()).first()
# register for updates:
if self.vehicle.weConnectVehicle is not None:
if self.vehicle.weConnectVehicle.statusExists('climatisation', 'climatisationStatus') \
and self.vehicle.weConnectVehicle.domains['climatisation']['climatisationStatus'].enabled:
self.vehicle.weConnectVehicle.domains['climatisation']['climatisationStatus'].carCapturedTimestamp.addObserver(
self.__onCarCapturedTimestampChange,
AddressableLeaf.ObserverEvent.VALUE_CHANGED,
onUpdateComplete=True)
self.__onCarCapturedTimestampChange(None, None)
def __onCarCapturedTimestampChange(self, element, flags):
if element is not None and element.value is not None:
climateStatus = self.vehicle.weConnectVehicle.domains['climatisation']['climatisationStatus']
current_remainingClimatisationTime_min = None
current_climatisationState = None
if climateStatus.remainingClimatisationTime_min.enabled:
current_remainingClimatisationTime_min = climateStatus.remainingClimatisationTime_min.value
if climateStatus.climatisationState.enabled:
current_climatisationState = climateStatus.climatisationState.value
if self.climate is None or (self.climate.carCapturedTimestamp != climateStatus.carCapturedTimestamp.value and (
self.climate.remainingClimatisationTime_min != current_remainingClimatisationTime_min
or self.climate.climatisationState != current_climatisationState)):
self.climate = Climatization(self.vehicle, climateStatus.carCapturedTimestamp.value, current_remainingClimatisationTime_min,
current_climatisationState)
try:
with self.session.begin_nested():
self.session.add(self.climate)
self.session.commit()
except IntegrityError as err:
LOG.warning('Could not add climatization entry to the database, this is usually due to an error in the WeConnect API (%s)', err)
def commit(self):
pass
| StarcoderdataPython |
1642450 | from drf_spectacular.types import OpenApiTypes
from urllib.parse import unquote
import requests
from requests.models import HTTPBasicAuth
from rest_framework.views import APIView
from rest_framework.generics import ListAPIView, ListCreateAPIView, RetrieveDestroyAPIView, get_object_or_404
from rest_framework.response import Response
from rest_framework import exceptions, status, permissions
from rest_framework.decorators import action, api_view, permission_classes
from drf_spectacular.utils import OpenApiExample, extend_schema
from django.forms.models import model_to_dict
from django.db.models.query_utils import Q
from posts.models import Post, Like
from posts.serializers import LikeSerializer, PostSerializer
from nodes.models import connector_service, Node
from posts.utils import *
from posts.utils import try_get
from .serializers import AuthorSerializer, FollowSerializer, InboxObjectSerializer
from .pagination import *
from .models import Author, Follow, Follow, InboxObject
# https://www.django-rest-framework.org/tutorial/3-class-based-views/
@api_view(['GET'])
@permission_classes([permissions.AllowAny])
def proxy(request, object_url):
"""
[INTERNAL]
get any json from that url (use node auth) and return to frontend
"""
print("proxying object: url: ", object_url)
res = try_get(unquote(object_url))
try:
data = res.json()
return Response(data)
except:
raise exceptions.ParseError("remote server response is not valid json")
class AuthorList(ListAPIView):
serializer_class = AuthorSerializer
pagination_class = AuthorsPagination
# used by the ListCreateAPIView super class
def get_queryset(self):
all_authors = Author.objects.all()
return [author for author in all_authors if author.is_internal]
@extend_schema(
# specify response format for list: https://drf-spectacular.readthedocs.io/en/latest/faq.html?highlight=list#i-m-using-action-detail-false-but-the-response-schema-is-not-a-list
responses=AuthorSerializer(many=True)
)
def get(self, request, *args, **kwargs):
"""
## Description:
List all authors in this server.
## Responses:
**200**: for successful GET request
"""
return super().list(request, *args, **kwargs)
class AuthorDetail(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
def get_serializer_class(self):
# used for schema generation for all methods
# https://drf-spectacular.readthedocs.io/en/latest/customization.html#step-1-queryset-and-serializer-class
return AuthorSerializer
def get(self, request, author_id):
"""
## Description:
Get author profile
## Responses:
**200**: for successful GET request <br>
**404**: if the author id does not exist
"""
try:
author = Author.objects.get(pk=author_id)
except Author.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = AuthorSerializer(author, many=False)
return Response(serializer.data)
def post(self, request, author_id):
"""
## Description:
Update author profile
## Responses:
**200**: for successful POST request <br>
**400**: if the payload failed the serializer check <br>
**404**: if the author id does not exist
"""
try:
author = Author.objects.get(Q(pk=author_id) | Q(url=author_id))
except Author.DoesNotExist:
return Response(status=status.HTTP_404_NOT_FOUND)
serializer = AuthorSerializer(author, data=request.data, partial=True)
if serializer.is_valid():
author = serializer.save()
# modify url to be server path
author.update_fields_with_request(request)
return Response(serializer.data)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class InboxSerializerMixin:
def serialize_inbox_item(self, item, context={}):
model_class = item.content_type.model_class()
if model_class is Follow:
serializer = FollowSerializer
elif model_class is Post:
serializer = PostSerializer
elif model_class is Like:
serializer = LikeSerializer
return serializer(item.content_object, context=context).data
def deserialize_inbox_data(self, data, context={}):
if not data.get('type'):
raise exceptions.ParseError
type = data.get('type')
if type == Follow.get_api_type():
serializer = FollowSerializer
elif type == Post.get_api_type():
serializer = PostSerializer
elif type == Like.get_api_type():
serializer = LikeSerializer
return serializer(data=data, context=context)
class InboxListView(ListCreateAPIView, InboxSerializerMixin):
# permission_classes = [permissions.IsAuthenticated]
pagination_class = InboxObjectsPagination
serializer_class = InboxObjectSerializer
def get(self, request, author_id):
"""
## Description:
Get all objects for the current user. user jwt auth required
## Responses:
**200**: for successful GET request <br>
**401**: if the authenticated user is not the post's poster <br>
**403**: if the request user is not the same as the author <br>
**404**: if the author id does not exist
"""
try:
author = Author.objects.get(id=author_id)
except:
raise exceptions.NotFound
# has to be the current user
# and author without a user is a foreign author
if not author.user or request.user != author.user:
raise exceptions.AuthenticationFailed
inbox_objects = author.inbox_objects.all()
paginated_inbox_objects = self.paginate_queryset(inbox_objects)
return self.get_paginated_response([self.serialize_inbox_item(obj) for obj in paginated_inbox_objects])
# TODO put somewhere else
@extend_schema(
examples=[
OpenApiExample('A post object', value={
"type": "post",
"id": "http://127.0.0.1:8000/author/51914b9c-98c6-4a5c-91bf-fb55a53a92fe/posts/d8fb48fe-a014-49d9-ac4c-bfbdf94b097f/",
"title": "Post1",
"source": "",
"origin": "",
"description": "description for post1",
"contentType": "text/markdown",
"author": {
"type": "author",
"id": "http://127.0.0.1:8000/author/51914b9c-98c6-4a5c-91bf-fb55a53a92fe/",
"host": "http://127.0.0.1:8000/",
"displayName": "Updated!!!",
"url": "http://127.0.0.1:8000/author/51914b9c-98c6-4a5c-91bf-fb55a53a92fe/",
"github": None
},
"content": "# Hello",
"count": 0,
"published": "2021-10-22T20:58:18.072618Z",
"visibility": "PUBLIC",
"unlisted": False
}),
OpenApiExample('A like object', value={
"type": "Like",
"summary": "string",
"author": {
"type": "author",
"id": "string",
"host": "string",
"displayName": "string",
"url": "string",
"github": "string"
},
"object": "string"
}),
OpenApiExample('A friend request object', value={
"type": "Follow",
"summary": "Greg wants to follow Lara",
"actor": {
"type": "author",
"id": "http://127.0.0.1:5454/author/1d698d25ff008f7538453c120f581471",
"url": "http://127.0.0.1:5454/author/1d698d25ff008f7538453c120f581471",
"host": "http://127.0.0.1:5454/",
"displayName": "<NAME>",
"github": "http://github.com/gjohnson"
},
"object": {
"type": "author",
"id": "http://127.0.0.1:5454/author/9de17f29c12e8f97bcbbd34cc908f1baba40658e",
"host": "http://127.0.0.1:5454/",
"displayName": "<NAME>",
"url": "http://127.0.0.1:5454/author/9de17f29c12e8f97bcbbd34cc908f1baba40658e",
"github": "http://github.com/laracroft"
}
}),
],
request={
'application/json': OpenApiTypes.OBJECT
},
)
def post(self, request, author_id):
"""
## Description:
A foreign server sends some json object to the inbox. server basic auth required
## Responses:
**200**: for successful POST request <br>
**400**: if the payload failed the serializer check <br>
**404**: if the author id does not exist
"""
try:
author = Author.objects.get(id=author_id)
except:
raise exceptions.NotFound
serializer = self.deserialize_inbox_data(
self.request.data, context={'author': author})
if serializer.is_valid():
# save the item to database, could be post or like or FR
item = serializer.save()
if hasattr(item, 'update_fields_with_request'):
item.update_fields_with_request(request)
# wrap the item in an inboxObject, links with author
item_as_inbox = InboxObject(content_object=item, author=author)
item_as_inbox.save()
return Response({'req': self.request.data, 'saved': model_to_dict(item_as_inbox)})
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class InboxDetailView(RetrieveDestroyAPIView, InboxSerializerMixin):
permission_classes = [permissions.IsAuthenticated]
def get(self, request, author_id, inbox_id):
"""
## Description:
Get an inbox item by id
## Responses:
**200**: for successful GET request <br>
**404**: if the author id or the inbox id does not exist
"""
try:
author = Author.objects.get(id=author_id)
except:
raise exceptions.NotFound('author not found')
try:
inbox_item = author.inbox_objects.get(id=inbox_id)
except:
raise exceptions.NotFound('inbox object not found')
# has to be the current user
# and author without a user is a foreign author
if not author.user or request.user != author.user:
raise exceptions.AuthenticationFailed
# can only see your own inbox items!
if inbox_item.author != author:
raise exceptions.NotFound('inbox object not found')
return Response(self.serialize_inbox_item(inbox_item))
def delete(self, request, author_id, inbox_id):
"""
## Description:
Delete an inbox item by id
## Responses:
**204**: for successful DELETE request <br>
**404**: if the author id or the inbox id does not exist
"""
try:
author = Author.objects.get(id=author_id)
except:
raise exceptions.NotFound('author not found')
try:
inbox_item = author.inbox_objects.get(id=inbox_id)
except:
raise exceptions.NotFound('inbox object not found')
# has to be the current user
# and author without a user is a foreign author
if not author.user or request.user != author.user:
raise exceptions.AuthenticationFailed
# can only delete your own inbox items!
if inbox_item.author != author:
raise exceptions.NotFound('inbox object not found')
inbox_item.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
class FollowerList(ListAPIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
serializer_class = AuthorSerializer
pagination_class = FollowersPagination
def get_queryset(self):
author_id = self.kwargs.get('author_id')
if author_id is None:
raise exceptions.NotFound
try:
author = Author.objects.get(id=author_id)
except:
raise exceptions.NotFound
# find all author following this author
return Author.objects.filter(followings__object=author, followings__status=Follow.FollowStatus.ACCEPTED)
def get(self, request, *args, **kwargs):
"""
## Description:
Get a list of author who are their followers
## Responses:
**200**: for successful GET request <br>
**404**: if the author id does not exist
"""
return super().list(request, *args, **kwargs)
class FollowerDetail(APIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
@extend_schema(
responses=AuthorSerializer(),
)
def get(self, request, author_id, foreign_author_url):
"""
## Description:
check if user at the given foreign url is a follower of the local author
## Responses:
**200**: for successful GET request, return <Author object of the follower> <br>
**404**: if the author id does not exist
"""
try:
author = Author.objects.get(id=author_id)
except:
raise exceptions.NotFound
return Response(AuthorSerializer(get_object_or_404(
Author,
followings__object=author, # the author being followed
followings__status=Follow.FollowStatus.ACCEPTED,
url=foreign_author_url # the foreign author following the author
)).data)
def delete(self, request, author_id, foreign_author_url):
"""
## Description:
delete a follower by url
## Responses:
**200**: for successful DELETE request <br>
**404**: if the author id does not exist
"""
try:
author = Author.objects.get(id=author_id)
except:
raise exceptions.NotFound("local author is not found")
try:
# the following object for this relationship
follower_following = author.followers.get(
actor__url=foreign_author_url)
except:
raise exceptions.NotFound(
f"foreign author at {foreign_author_url} is not a follower of the local author")
follower_following.delete()
return Response(status=status.HTTP_204_NO_CONTENT)
@extend_schema(
examples=[
OpenApiExample('A Foreign Author Paylod (Optional)', value={
"type": "author",
"id": "http://127.0.0.1:8000/author/change-me-123123/",
"host": "http://127.0.0.1:8000/",
"displayName": "Change Me",
"url": "http://127.0.0.1:8000/author/change-me-123123/",
"github": "https://github.com/123123123as<PASSWORD>/"
})
],
request={
'application/json': OpenApiTypes.OBJECT
},
)
def put(self, request, author_id, foreign_author_url):
"""
## Description:
Add a follower (must be authenticated)
## Responses:
**200**: for successful PUT request <br>
**400**: if the payload failed the serializer check <br>
**401**: if the authenticated user is not the post's poster <br>
**404**: if the author id does not exist
"""
try:
author = Author.objects.get(id=author_id)
if not author.user or request.user != author.user:
raise exceptions.AuthenticationFailed
except Author.DoesNotExist:
raise exceptions.NotFound("author does not exist")
# decode first if it's uri-encoded url
foreign_author_url = unquote(foreign_author_url)
existing_follower_set = Author.objects.filter(url=foreign_author_url)
# sanity check: muliple cached foreign authors with the same url exists. break.
if len(existing_follower_set) > 1:
raise exceptions.server_error(request)
# check if the follower is a local author
if existing_follower_set and existing_follower_set.get().is_internal:
# internal author: do nothing
follower = existing_follower_set.get()
else:
# external author: upcreate it first
follower_serializer = self.get_follower_serializer_from_request(
request, foreign_author_url)
print("foreign author url: ", foreign_author_url)
print("follow serializer: ", follower_serializer)
if follower_serializer.is_valid():
if foreign_author_url != follower_serializer.validated_data['url']:
return Response("payload author's url does not match that in request url", status=status.HTTP_400_BAD_REQUEST)
follower = follower_serializer.save()
else:
return Response(follower_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# accept the follow request (activate the relationship), or create it if not exist already
try:
pending_follow = Follow.objects.get(object=author, actor=follower, status=Follow.FollowStatus.PENDING)
pending_follow.status = Follow.FollowStatus.ACCEPTED
pending_follow.save()
except Follow.DoesNotExist:
_ = Follow.objects.create(
object=author, actor=follower, status=Follow.FollowStatus.ACCEPTED)
except Follow.MultipleObjectsReturned:
raise exceptions.ParseError("There exists multiple Follow objects. Please report how you reached this error")
return Response()
def get_follower_serializer_from_request(self, request, foreign_author_url):
if request.data:
follower_serializer = AuthorSerializer(data=request.data)
else:
# try fetch the foreign user first, upcreate it locally and do it again.
# TODO server2server basic auth, refactor into server2server connection pool/service
res = try_get(foreign_author_url)
follower_serializer = AuthorSerializer(data=res.json())
return follower_serializer
class FollowingList(ListAPIView):
permission_classes = [permissions.IsAuthenticatedOrReadOnly]
serializer_class = FollowSerializer
pagination_class = FollowingsPagination
def get_queryset(self):
try:
author = Author.objects.get(id=self.kwargs.get('author_id'))
except Author.DoesNotExist:
raise exceptions.NotFound
followings = author.followings.all()
followings_to_delete = []
for following in followings:
if following.object.is_internal:
continue
foreign_author_url = following.object.url
if foreign_author_url.endswith("/"):
request_url = foreign_author_url + "followers/" + author.url
else:
request_url = foreign_author_url + "/followers/" + author.url
print("following: request_url: ", request_url)
response = try_get(request_url)
print("following: response: ", response)
if response.status_code > 204:
# try again but with author.id instead of author.url
if foreign_author_url.endswith("/"):
request_url = foreign_author_url + "followers/" + author.id
else:
request_url = foreign_author_url + "/followers/" + author.id
print("following: request_url: ", request_url)
response = try_get(request_url)
if response.status_code == 200:
try:
possible_body = response.json()
if not possible_body['result']:
response.status_code = 404
except Exception as e:
print("following list get: weird things happen when response was 200: ", e)
print("following: response: ", response)
# any status code < 400 indicate success
if response.status_code < 400 and following.status == Follow.FollowStatus.PENDING:
# foreign author accepted the follow request
following.status = Follow.FollowStatus.ACCEPTED
following.save()
elif response.status_code >= 400 and following.status == Follow.FollowStatus.ACCEPTED:
# foreign author removed the author as a follower
followings_to_delete.append(following.id)
# https://stackoverflow.com/a/34890230
followings.filter(id__in=followings_to_delete).delete()
return followings.exclude(id__in=followings_to_delete)
@extend_schema(
responses=FollowSerializer(many=True)
)
def get(self, request, *args, **kwargs):
"""
**[INTERNAL]**
## Description:
List all the authors that this author is currently following
## Responses:
**200**: for successful GET request
"""
return super().list(request, *args, **kwargs)
class FollowingDetail(APIView):
permission_classes = [permissions.IsAuthenticated]
@extend_schema(
responses=FollowSerializer()
)
def post(self, request, author_id, foreign_author_url):
"""
**[INTERNAL]** <br>
## Description:
the /author/<author_id>/friend_request/<foreign_author_url>/ endpoint
- author_id: anything other than slash, but we hope it's a uuid
- foreign_author_url: anything, but we hope it's a valid url.
used only by local users, jwt authentication required. <br>
Its job is to fire a POST to the foreign author's inbox with a FriendRequest json object.
## Responses:
**200**: for successful POST request <br>
**403**: if the follow request already exist <br>
**404**: if the author_id does not exist
"""
try:
author = Author.objects.get(id=author_id)
except:
return Response(status=status.HTTP_404_NOT_FOUND)
# get that foreign author's json object first
print("following: foreign author url: ", foreign_author_url)
# try without the auth
# can either be a local author being followed or foreign server does not require auth
response = requests.get(foreign_author_url)
if response.status_code != 200:
nodes = [x for x in Node.objects.all() if x.host_url in foreign_author_url]
if len(nodes) != 1:
raise exceptions.NotFound("cannot find the node from foreign author url")
node = nodes[0]
response = requests.get(foreign_author_url, auth=node.get_basic_auth_tuple())
foreign_author_json = response.json()
print("following: foreign author: ", foreign_author_json)
# check for foreign author validity
foreign_author_ser = AuthorSerializer(data=foreign_author_json)
if foreign_author_ser.is_valid():
foreign_author = foreign_author_ser.save()
if Follow.objects.filter(actor=author, object=foreign_author):
raise exceptions.PermissionDenied("duplicate follow object exists for the authors")
follow = Follow(
summary=f"{author.display_name} wants to follow {foreign_author.display_name}",
actor=author,
object=foreign_author
)
follow.save()
connector_service.notify_follow(follow, request=request)
return Response(FollowSerializer(follow).data)
return Response({'parsing foreign author': foreign_author_ser.errors}, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
def delete(self, request, author_id, foreign_author_url):
"""
**[INTERNAL]** <br>
## Description:
Should be called when author_id's author no longer wants to follow foreign_author_url's author <br>
This can happen when the author is already following (status ACCEPTED) <br>
Or author wants to remove its friend/follow request (status PENDING)
## Responses:
**204**: For successful DELETE request <br>
**400**: When the author is not following the other author <br>
**404**: When the follower or followee does not exist
"""
try:
author = Author.objects.get(id=author_id)
foreign_author = Author.objects.get(url=foreign_author_url)
follow_object = Follow.objects.get(actor=author, object=foreign_author)
except Author.DoesNotExist as e:
return Response(e.message, status=status.HTTP_404_NOT_FOUND)
except Follow.DoesNotExist:
error_msg = "the follow relationship does not exist between the two authors"
return Response(error_msg, status=status.HTTP_400_BAD_REQUEST)
follow_object.delete()
if author.is_internal and foreign_author.is_internal:
return Response(status=status.HTTP_204_NO_CONTENT)
# send a request to the foreign server telling them to delete the follower
if (foreign_author_url.endswith("/")):
request_url = foreign_author_url + "followers/" + author.url
else:
request_url = foreign_author_url + "/followers/" + author.url
request_url = request_url + '/' if not request_url.endswith('/') else request_url
# try without the auth
response = requests.delete(request_url)
if response.status_code > 204:
try:
res = try_delete(request_url)
print("following:delete: response: ", res, " status: ", res.status_code, " text: ", res.text)
if (foreign_author_url.endswith("/")):
request_url = foreign_author_url + "followers/" + author.id
else:
request_url = foreign_author_url + "/followers/" + author.id
request_url = request_url + '/' if not request_url.endswith('/') else request_url
res = try_delete(request_url)
print("following:delete: response tried with id: ", res, " status: ", res.status_code, " text: ", res.text)
except Node.DoesNotExist:
print("failed to notify remote server of the unfollowing")
print("Reason: Remote Server not connected")
except requests.exceptions.RequestException as e:
print("failed to notify remote server of the unfollowing")
print("Reason: Remote Request Failed: " + e)
return Response(status=status.HTTP_204_NO_CONTENT)
class ForeignAuthorList(ListAPIView):
serializer_class = AuthorSerializer
pagination_class = AuthorsPagination
def get(self, request, node_id):
"""
**[INTERNAL]** <br>
## Description:
Get all authors from a foreign server node by calling their /authors/ endpoint
## Responses:
Whatever the foreign server /authors/ endpoint returned to us <br>
Or **404** if the node_id does not exist
"""
try:
node = Node.objects.get(pk=node_id)
except Node.DoesNotExist:
error_msg = "Cannot find the node with specific id"
raise exceptions.NotFound(error_msg)
request_url = node.host_url
if request_url[-1] != "/":
request_url += "/"
query_params = request.query_params.dict()
page = query_params["page"] if "page" in query_params else 1
size = query_params["size"] if "size" in query_params else 100
request_url += "authors/?page=" + str(page) + "&size=" + str(size)
try:
response = requests.get(request_url, auth=node.get_basic_auth_tuple())
except requests.exceptions.RequestException as err:
return Response(err, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
return Response(response.json(), status=response.status_code)
| StarcoderdataPython |
42514 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
__all__ = [
'GetDedicatedVmHostResult',
'AwaitableGetDedicatedVmHostResult',
'get_dedicated_vm_host',
]
@pulumi.output_type
class GetDedicatedVmHostResult:
"""
A collection of values returned by getDedicatedVmHost.
"""
def __init__(__self__, availability_domain=None, compartment_id=None, dedicated_vm_host_id=None, dedicated_vm_host_shape=None, defined_tags=None, display_name=None, fault_domain=None, freeform_tags=None, id=None, remaining_memory_in_gbs=None, remaining_ocpus=None, state=None, time_created=None, total_memory_in_gbs=None, total_ocpus=None):
if availability_domain and not isinstance(availability_domain, str):
raise TypeError("Expected argument 'availability_domain' to be a str")
pulumi.set(__self__, "availability_domain", availability_domain)
if compartment_id and not isinstance(compartment_id, str):
raise TypeError("Expected argument 'compartment_id' to be a str")
pulumi.set(__self__, "compartment_id", compartment_id)
if dedicated_vm_host_id and not isinstance(dedicated_vm_host_id, str):
raise TypeError("Expected argument 'dedicated_vm_host_id' to be a str")
pulumi.set(__self__, "dedicated_vm_host_id", dedicated_vm_host_id)
if dedicated_vm_host_shape and not isinstance(dedicated_vm_host_shape, str):
raise TypeError("Expected argument 'dedicated_vm_host_shape' to be a str")
pulumi.set(__self__, "dedicated_vm_host_shape", dedicated_vm_host_shape)
if defined_tags and not isinstance(defined_tags, dict):
raise TypeError("Expected argument 'defined_tags' to be a dict")
pulumi.set(__self__, "defined_tags", defined_tags)
if display_name and not isinstance(display_name, str):
raise TypeError("Expected argument 'display_name' to be a str")
pulumi.set(__self__, "display_name", display_name)
if fault_domain and not isinstance(fault_domain, str):
raise TypeError("Expected argument 'fault_domain' to be a str")
pulumi.set(__self__, "fault_domain", fault_domain)
if freeform_tags and not isinstance(freeform_tags, dict):
raise TypeError("Expected argument 'freeform_tags' to be a dict")
pulumi.set(__self__, "freeform_tags", freeform_tags)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if remaining_memory_in_gbs and not isinstance(remaining_memory_in_gbs, float):
raise TypeError("Expected argument 'remaining_memory_in_gbs' to be a float")
pulumi.set(__self__, "remaining_memory_in_gbs", remaining_memory_in_gbs)
if remaining_ocpus and not isinstance(remaining_ocpus, float):
raise TypeError("Expected argument 'remaining_ocpus' to be a float")
pulumi.set(__self__, "remaining_ocpus", remaining_ocpus)
if state and not isinstance(state, str):
raise TypeError("Expected argument 'state' to be a str")
pulumi.set(__self__, "state", state)
if time_created and not isinstance(time_created, str):
raise TypeError("Expected argument 'time_created' to be a str")
pulumi.set(__self__, "time_created", time_created)
if total_memory_in_gbs and not isinstance(total_memory_in_gbs, float):
raise TypeError("Expected argument 'total_memory_in_gbs' to be a float")
pulumi.set(__self__, "total_memory_in_gbs", total_memory_in_gbs)
if total_ocpus and not isinstance(total_ocpus, float):
raise TypeError("Expected argument 'total_ocpus' to be a float")
pulumi.set(__self__, "total_ocpus", total_ocpus)
@property
@pulumi.getter(name="availabilityDomain")
def availability_domain(self) -> str:
"""
The availability domain the dedicated virtual machine host is running in. Example: `Uocm:PHX-AD-1`
"""
return pulumi.get(self, "availability_domain")
@property
@pulumi.getter(name="compartmentId")
def compartment_id(self) -> str:
"""
The OCID of the compartment that contains the dedicated virtual machine host.
"""
return pulumi.get(self, "compartment_id")
@property
@pulumi.getter(name="dedicatedVmHostId")
def dedicated_vm_host_id(self) -> str:
return pulumi.get(self, "dedicated_vm_host_id")
@property
@pulumi.getter(name="dedicatedVmHostShape")
def dedicated_vm_host_shape(self) -> str:
"""
The dedicated virtual machine host shape. The shape determines the number of CPUs and other resources available for VMs.
"""
return pulumi.get(self, "dedicated_vm_host_shape")
@property
@pulumi.getter(name="definedTags")
def defined_tags(self) -> Mapping[str, Any]:
"""
Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Operations.CostCenter": "42"}`
"""
return pulumi.get(self, "defined_tags")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> str:
"""
A user-friendly name. Does not have to be unique, and it's changeable. Avoid entering confidential information. Example: `My Dedicated Vm Host`
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter(name="faultDomain")
def fault_domain(self) -> str:
"""
The fault domain for the dedicated virtual machine host's assigned instances. For more information, see [Fault Domains](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/regions.htm#fault).
"""
return pulumi.get(self, "fault_domain")
@property
@pulumi.getter(name="freeformTags")
def freeform_tags(self) -> Mapping[str, Any]:
"""
Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/resourcetags.htm). Example: `{"Department": "Finance"}`
"""
return pulumi.get(self, "freeform_tags")
@property
@pulumi.getter
def id(self) -> str:
"""
The [OCID](https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm) of the dedicated VM host.
"""
return pulumi.get(self, "id")
@property
@pulumi.getter(name="remainingMemoryInGbs")
def remaining_memory_in_gbs(self) -> float:
"""
The current available memory of the dedicated VM host, in GBs.
"""
return pulumi.get(self, "remaining_memory_in_gbs")
@property
@pulumi.getter(name="remainingOcpus")
def remaining_ocpus(self) -> float:
"""
The current available OCPUs of the dedicated VM host.
"""
return pulumi.get(self, "remaining_ocpus")
@property
@pulumi.getter
def state(self) -> str:
"""
The current state of the dedicated VM host.
"""
return pulumi.get(self, "state")
@property
@pulumi.getter(name="timeCreated")
def time_created(self) -> str:
"""
The date and time the dedicated VM host was created, in the format defined by [RFC3339](https://tools.ietf.org/html/rfc3339). Example: `2016-08-25T21:10:29.600Z`
"""
return pulumi.get(self, "time_created")
@property
@pulumi.getter(name="totalMemoryInGbs")
def total_memory_in_gbs(self) -> float:
"""
The current total memory of the dedicated VM host, in GBs.
"""
return pulumi.get(self, "total_memory_in_gbs")
@property
@pulumi.getter(name="totalOcpus")
def total_ocpus(self) -> float:
"""
The current total OCPUs of the dedicated VM host.
"""
return pulumi.get(self, "total_ocpus")
class AwaitableGetDedicatedVmHostResult(GetDedicatedVmHostResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetDedicatedVmHostResult(
availability_domain=self.availability_domain,
compartment_id=self.compartment_id,
dedicated_vm_host_id=self.dedicated_vm_host_id,
dedicated_vm_host_shape=self.dedicated_vm_host_shape,
defined_tags=self.defined_tags,
display_name=self.display_name,
fault_domain=self.fault_domain,
freeform_tags=self.freeform_tags,
id=self.id,
remaining_memory_in_gbs=self.remaining_memory_in_gbs,
remaining_ocpus=self.remaining_ocpus,
state=self.state,
time_created=self.time_created,
total_memory_in_gbs=self.total_memory_in_gbs,
total_ocpus=self.total_ocpus)
def get_dedicated_vm_host(dedicated_vm_host_id: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetDedicatedVmHostResult:
"""
This data source provides details about a specific Dedicated Vm Host resource in Oracle Cloud Infrastructure Core service.
Gets information about the specified dedicated virtual machine host.
## Example Usage
```python
import pulumi
import pulumi_oci as oci
test_dedicated_vm_host = oci.core.get_dedicated_vm_host(dedicated_vm_host_id=oci_core_dedicated_vm_host["test_dedicated_vm_host"]["id"])
```
:param str dedicated_vm_host_id: The OCID of the dedicated VM host.
"""
__args__ = dict()
__args__['dedicatedVmHostId'] = dedicated_vm_host_id
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('oci:core/getDedicatedVmHost:getDedicatedVmHost', __args__, opts=opts, typ=GetDedicatedVmHostResult).value
return AwaitableGetDedicatedVmHostResult(
availability_domain=__ret__.availability_domain,
compartment_id=__ret__.compartment_id,
dedicated_vm_host_id=__ret__.dedicated_vm_host_id,
dedicated_vm_host_shape=__ret__.dedicated_vm_host_shape,
defined_tags=__ret__.defined_tags,
display_name=__ret__.display_name,
fault_domain=__ret__.fault_domain,
freeform_tags=__ret__.freeform_tags,
id=__ret__.id,
remaining_memory_in_gbs=__ret__.remaining_memory_in_gbs,
remaining_ocpus=__ret__.remaining_ocpus,
state=__ret__.state,
time_created=__ret__.time_created,
total_memory_in_gbs=__ret__.total_memory_in_gbs,
total_ocpus=__ret__.total_ocpus)
| StarcoderdataPython |
1765003 | <reponame>awilkins/CSC18
# Import arcpy module so we can use ArcGIS geoprocessing tools
import arcpy
import sys, os
#---------------------------------------------------------------------------------------------
# 1. Get parameters from the toolbox using 'GetParametersAsText' method
# --> check ArcGIS help for info how to use methods
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//018v00000047000000
#---------------------------------------------------------------------------------------------
# Enable Arcpy to overwrite existing files
#arcpy.env.overwriteOutput = True
input_species_shp = arcpy.GetParameterAsText(0)
output_folder = arcpy.GetParameterAsText(1)
species_attribute = arcpy.GetParameterAsText(2)
attribute_name = arcpy.GetParameterAsText(3)
presence_value = arcpy.GetParameterAsText(4)
"""
species_attribute = "binomial"
input_species_shp = r"F:\Opetus\2015_GIS_Prosessiautomatisointi\Data\DAMSELFISH\DAMSELFISH_distributions.shp"
attribute_name = "PresenceV"
presence_value = 2
output_folder = r"F:\Opetus\2015_GIS_Prosessiautomatisointi\Data\DAMSELFISH\Output"
"""
#---------------------------------------------------------------------------------------------
# 2. Add a new field into the table using 'AddField_management' method
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//001700000047000000
#---------------------------------------------------------------------------------------------
arcpy.AddField_management(in_table=input_species_shp, field_name=attribute_name, field_type="SHORT")
#-----------------------------------------------------------------------------------------------------
# 3. Update the presence value for our newly created attribute with 'CalculateField_management' method
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//00170000004m000000
#-----------------------------------------------------------------------------------------------------
arcpy.CalculateField_management(in_table=input_species_shp, field=attribute_name, expression=presence_value)
#-----------------------------------------------------------------------------------------------------------------------------------
# 4. Get a list of unique species in the table using 'SearchCursor' method
# Method info: http://resources.arcgis.com/en/help/main/10.1/index.html#//018v00000050000000
# More elegant version of the function in ArcPy Cafe: https://arcpy.wordpress.com/2012/02/01/create-a-list-of-unique-field-values/
# ----------------------------------------------------------------------------------------------------------------------------------
# 4.1 CREATE a function that returns unique values of a 'field' within the 'table'
def unique_values(table, field):
# Create a cursor object for reading the table
cursor = arcpy.da.SearchCursor(table, [field]) # A cursor iterates over rows in table
# Create an empty list for unique values
unique_values = []
# Iterate over rows and append value into the list if it does not exist already
for row in cursor:
if not row[0] in unique_values: # Append only if value does not exist
unique_values.append(row[0])
return sorted(unique_values) # Return a sorted list of unique values
# 4.2 USE the function to get a list of unique values
unique_species = unique_values(table=input_species_shp, field=species_attribute)
#--------------------------------------------------------------------------------------------------------------------------------
# 5. Create a feature layer from the shapefile with 'MakeFeatureLayer_management' method that enables us to select specific rows
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//00170000006p000000
#--------------------------------------------------------------------------------------------------------------------------------
species_lyr = arcpy.MakeFeatureLayer_management(in_features=input_species_shp, out_layer="species_lyr")
#---------------------------------------------------
# 6. Iterate over unique_species list and:
# 6.1) export individual species as Shapefiles and
# 6.2) convert those shapefiles into Raster Datasets
#---------------------------------------------------
for individual in unique_species:
# 6.1):
# Create an expression for selection using Python String manipulation
expression = "%s = '%s'" % (species_attribute, individual)
# Select rows based on individual breed using 'SelectLayerByAttribute_management' method
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//001700000071000000
selection = arcpy.SelectLayerByAttribute_management(species_lyr, "NEW_SELECTION", where_clause=expression)
# Create an output path for Shapefile
shape_name = individual + ".shp"
individual_shp = os.path.join(output_folder, shape_name)
# Export the selection as a Shapefile into the output folder using 'CopyFeatures_management' method
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//001700000035000000
arcpy.CopyFeatures_management(in_features=selection, out_feature_class=individual_shp)
# 6.2):
# Create an output path for the Raster Dataset (*.tif)
tif_name = individual + ".tif"
individual_tif = os.path.join(output_folder, tif_name)
# Convert the newly created Shapefile into a Raster Dataset using 'PolygonToRaster_conversion' method
# Method info: http://resources.arcgis.com/en/help/main/10.2/index.html#//001200000030000000
arcpy.PolygonToRaster_conversion(in_features=individual_shp, value_field=attribute_name, out_rasterdataset=individual_tif)
# Print progress info for the user
info = "Processed: " + individual
arcpy.AddMessage(info)
# 7. Print that the process was finished successfully
info = "Process was a great success! Wuhuu!"
arcpy.AddMessage(info)
| StarcoderdataPython |
136194 | import logging
from typing import Any, Dict, Callable
_log = logging.getLogger(__name__)
__all__ = ("EventMixin",)
class EventMixin:
events: Dict[str, Callable] = {}
def dispatch(self, event_name: str, *args: Any, **kwargs: Any) -> Any:
event = self.events.get(event_name)
if not event:
return None
_log.debug(f"Dispatching Event: on_{event_name}")
return event(*args, **kwargs)
| StarcoderdataPython |
98238 | <filename>crear_base.py
from sqlalchemy import create_engine
# se genera en enlace al gestor de base de
# datos
# para el ejemplo se usa la base de datos
# sqlite
engine = create_engine('sqlite:///demobase.db')
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
from sqlalchemy import Column, Integer, String
class Docente(Base):
__tablename__ = 'docentes'
id = Column(Integer, primary_key=True)
nombre = Column(String)
apellido = Column(String)
ciudad = Column(String, nullable=False) # este atributo no puede ser nulo
def __repr__(self):
return "Docente: nombre=%s apellido=%s ciudad:%s" % (
self.nombre,
self.apellido,
self.ciudad)
Base.metadata.create_all(engine)
| StarcoderdataPython |
3330406 | from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import time
import cv2 as cv
from Model import Model
import argparse
def get_opt():
parser = argparse.ArgumentParser()
parser.add_argument('--jpp', type=str, default='checkpoints/jpp.pb', help='model checkpoint for JPPNet')
parser.add_argument('--gmm', type=str, default='checkpoints/gmm.pth', help='model checkpoint for GMM')
parser.add_argument('--tom', type=str, default='checkpoints/tom.pth', help='model checkpoint for TOM')
parser.add_argument('--image', type=str, default='image.jpeg', help='input image')
parser.add_argument('--cloth', type=str, default='cloth.jpeg', help='cloth image')
opt = parser.parse_args()
return opt
opt = get_opt()
model = Model(opt.jpp, opt.gmm, opt.tom, use_cuda=False)
cloth = np.array(Image.open(opt.cloth))
plt.imshow(cloth)
plt.show()
image = np.array(Image.open(opt.image))
plt.imshow(image)
plt.show()
start = time.time()
result,trusts = model.predict(image, cloth, need_pre=False, check_dirty=True)
if result is not None:
end = time.time()
print("time:"+str(end-start))
print("Confidence"+str(trusts))
plt.imshow(result)
plt.show()
cv.imwrite('result.jpeg', result)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.