id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
6690974 | <gh_stars>1-10
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
from math import log
import operator
import pickle
# Based on ID3 algorithm(适用于标称型数据,即离散值)
'''
ID3 以信息熵的下降速度为选取测试属性的标准,即在每个节点选取还尚未被用来划分的具有最高信息增益的属性作为划分标准,然后继续这个过程,直到生成的决策树能完美分类训练样例。
'''
# 基于信息增益 - 划分数据集前后信息发生的变化
def createDataSet():
dataSet = [
[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']
]
labels = ['no surfacing', 'flippers']
return dataSet, labels
def majorityCnt(classList):
classCount = {}
for vote in classList:
if vote not in classCount.keys():
classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.items(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]
# 计算信息熵;
def calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet:
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys():
labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key]) / numEntries
shannonEnt -= prob * log(prob, 2)
return shannonEnt
# 基于给定的特征键值来选择数据集;
def splitDataSet(dataSet, axis, value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis]
reducedFeatVec.extend(featVec[axis + 1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1
# 基准信息熵;
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0
bestFeature = -1
# 遍历每一个特征;
for i in range(numFeatures):
featList = [example[i] for example in dataSet]
# 每个特征对应的所有取值类型;
uniqueVals = set(featList)
newEntropy = 0.0
# 遍历每一个特征值;
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet) / float(len(dataSet))
# 熵加权;
newEntropy += prob * calcShannonEnt(subDataSet)
# 新熵越小越好;
infoGain = baseEntropy - newEntropy
if infoGain > bestInfoGain:
bestInfoGain = infoGain
bestFeature = i
return bestFeature
def createTree(dataSet, labels):
tLabels = labels[:]
classList = [example[-1] for example in dataSet]
# 所有类标签完全相同,分类结束;
if classList.count(classList[0]) == len(classList):
return classList[0]
# 标签用完,开始表决;
if len(dataSet[0]) == 1:
return majorityCnt(classList)
# 选择熵改变最大的特征进行分割,返回标签索引;
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = tLabels[bestFeat]
myTree = {
bestFeatLabel: {}
}
del(tLabels[bestFeat])
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
# 复制;
subLabels = tLabels[:]
# 递归;
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value), subLabels)
return myTree
def classify(inputTree, featLabels, testVec):
firstStr = inputTree.keys()[0]
secondDict = inputTree[firstStr]
featIndex = featLabels.index(firstStr)
# [0, 1]
for key in secondDict.keys():
if testVec[featIndex] == key:
if type(secondDict[key]).__name__ == 'dict':
classLabel = classify(secondDict[key], featLabels, testVec)
else:
classLabel = secondDict[key]
return classLabel
if __name__ == '__main__':
dataSet, labels = createDataSet()
# print(splitDataSet(dataSet, 0, 1))
# print(chooseBestFeatureToSplit(dataSet))
# shannonEnt = calcShannonEnt(dataSet)
# print(shannonEnt)
myTree = createTree(dataSet, labels)
print(classify(myTree, labels, [1, 1]))
| StarcoderdataPython |
6587807 | <reponame>oserikov/dream
import logging
from df_engine.core import Context, Actor
from common.dff.integration import condition as int_cnd
logger = logging.getLogger(__name__)
# ....
def example_lets_talk_about():
def example_lets_talk_about_handler(ctx: Context, actor: Actor, *args, **kwargs) -> str:
return int_cnd.is_lets_chat_about_topic_human_initiative(ctx, actor)
return example_lets_talk_about_handler
| StarcoderdataPython |
3279330 | # coding=utf-8
"""Sopel Spelling correction module
This module will fix spelling errors if someone corrects them
using the sed notation (s///) commonly found in vi/vim.
"""
# Copyright 2011, <NAME>, yanovich.net
# Copyright 2013, <NAME>, embolalia.com
# Licensed under the Eiffel Forum License 2.
# Contributions from: <NAME> and <NAME>
from __future__ import unicode_literals, absolute_import, print_function, division
import re
from sopel.tools import Identifier, SopelMemory
from sopel.module import rule, priority
from sopel.formatting import bold
def setup(bot):
bot.memory['find_lines'] = SopelMemory()
@rule('.*')
@priority('low')
def collectlines(bot, trigger):
"""Create a temporary log of what people say"""
# Don't log things in PM
if trigger.is_privmsg:
return
# Add a log for the channel and nick, if there isn't already one
if trigger.sender not in bot.memory['find_lines']:
bot.memory['find_lines'][trigger.sender] = SopelMemory()
if Identifier(trigger.nick) not in bot.memory['find_lines'][trigger.sender]:
bot.memory['find_lines'][trigger.sender][Identifier(trigger.nick)] = list()
# Create a temporary list of the user's lines in a channel
templist = bot.memory['find_lines'][trigger.sender][Identifier(trigger.nick)]
line = trigger.group()
if line.startswith("s/"): # Don't remember substitutions
return
elif line.startswith("\x01ACTION"): # For /me messages
line = line[:-1]
templist.append(line)
else:
templist.append(line)
del templist[:-10] # Keep the log to 10 lines per person
bot.memory['find_lines'][trigger.sender][Identifier(trigger.nick)] = templist
#Match nick, s/find/replace/flags. Flags and nick are optional, nick can be
#followed by comma or colon, anything after the first space after the third
#slash is ignored, you can escape slashes with backslashes, and if you want to
#search for an actual backslash followed by an actual slash, you're shit out of
#luck because this is the fucking regex of death as it is.
@rule(r"""(?:
(\S+) # Catch a nick in group 1
[:,]\s+)? # Followed by colon/comma and whitespace, if given
s/ # The literal s/
( # Group 2 is the thing to find
(?:\\/ | [^/])+ # One or more non-slashes or escaped slashes
)/( # Group 3 is what to replace with
(?:\\/ | [^/])* # One or more non-slashes or escaped slashes
)
(?:/(\S+))? # Optional slash, followed by group 4 (flags)
""")
@priority('high')
def findandreplace(bot, trigger):
# Don't bother in PM
if trigger.is_privmsg:
return
# Correcting other person vs self.
rnick = Identifier(trigger.group(1) or trigger.nick)
search_dict = bot.memory['find_lines']
# only do something if there is conversation to work with
if trigger.sender not in search_dict:
return
if Identifier(rnick) not in search_dict[trigger.sender]:
return
#TODO rest[0] is find, rest[1] is replace. These should be made variables of
#their own at some point.
rest = [trigger.group(2), trigger.group(3)]
rest[0] = rest[0].replace(r'\/', '/')
rest[1] = rest[1].replace(r'\/', '/')
me = False # /me command
flags = (trigger.group(4) or '')
# If g flag is given, replace all. Otherwise, replace once.
if 'g' in flags:
count = -1
else:
count = 1
# repl is a lambda function which performs the substitution. i flag turns
# off case sensitivity. re.U turns on unicode replacement.
if 'i' in flags:
regex = re.compile(re.escape(rest[0]), re.U | re.I)
repl = lambda s: re.sub(regex, rest[1], s, count == 1)
else:
repl = lambda s: s.replace(rest[0], rest[1], count)
# Look back through the user's lines in the channel until you find a line
# where the replacement works
new_phrase = None
for line in reversed(search_dict[trigger.sender][rnick]):
if line.startswith("\x01ACTION"):
me = True # /me command
line = line[8:]
else:
me = False
new_phrase = repl(line)
if new_phrase != line: # we are done
break
if not new_phrase or new_phrase == line:
return # Didn't find anything
# Save the new "edited" message.
action = (me and '\x01ACTION ') or '' # If /me message, prepend \x01ACTION
templist = search_dict[trigger.sender][rnick]
templist.append(action + new_phrase)
search_dict[trigger.sender][rnick] = templist
bot.memory['find_lines'] = search_dict
# output
if not me:
new_phrase = '%s to say: %s' % (bold('meant'), new_phrase)
if trigger.group(1):
phrase = '%s thinks %s %s' % (trigger.nick, rnick, new_phrase)
else:
phrase = '%s %s' % (trigger.nick, new_phrase)
bot.say(phrase)
| StarcoderdataPython |
87814 | # Importamos smtplib
import smtplib
# Importamos los modulos necesarios
from email.mime.text import MIMEText
def send_mail(user, pwd, to_who, subjet, message):
# Creamos el mensaje
msg = MIMEText(message)
# Conexion con el server
msg['Subject'] = subjet
msg['From'] = user
msg['To'] = to_who
# Autenticamos
mailServer = smtplib.SMTP('smtp.gmail.com',587)
mailServer.ehlo()
mailServer.starttls()
mailServer.ehlo()
mailServer.login(user,pwd)
# Enviamos
mailServer.sendmail(user, to_who, msg.as_string())
# Cerramos conexion
mailServer.close()
| StarcoderdataPython |
8078221 | num = int(input('Insira aqui um número (0~9999):'))
#print('A unidade é igual a: {}'.format(num[3]))
#print('A dezena é igual a: {}'.format(num[2]))
#print('A centena é igual a: {}'.format(num[1]))
#print('A milhar é igual a: ()'.format(num[0]))
u = num//1%10
d = num//10%10
c = num//100%10
m = num//1000%10
print('A unidade é igual a: {}'.format(u))
print('A dezena é igual a: {}'.format(d))
print('A centena é igual a: ()'.format(c))
print('A milhar é igual a: {}'.format(m))
| StarcoderdataPython |
1910106 | import sys
import time
import json
import os
import threading
import Queue
import logging
from sys import stdin, stdout
from datetime import datetime
import pymongo
from nyamuk.nyamuk import *
from nyamuk.event import *
class mqtt_rx_thread(threading.Thread):
def __init__(self, threadID, queueLock, workQueue, stat):
threading.Thread.__init__(self)
self.queueLock = queueLock
self.workQueue = workQueue
self.threadID = threadID
self.stat = stat
self.client = Nyamuk("exabgp_mongo_client", server="127.0.0.1", log_level=logging.WARNING)
#ret = client.connect(version=4)
ret = self.client.connect()
ret = self.nloop() # ret should be EventConnack object
if not isinstance(ret, EventConnack) or ret.ret_code != 0:
logging.error("Cannot connect to mqtt server"); sys.exit(1)
def nloop(self):
self.client.packet_write() # flush write buffer (messages sent to MQTT server)
self.client.loop() # fill read buffer (enqueue received messages)
return self.client.pop_event() # return 1st received message (dequeued)
def run(self):
global stop
self.client.subscribe('#', qos=1)
ret = self.nloop()
if not isinstance(ret, EventSuback):
logging.error('SUBACK not received')
sys.exit(2)
logging.debug('granted qos is %s', ret.granted_qos[0])
while True:
evt = self.nloop()
if isinstance(evt, EventPublish):
logging.debug('we received a message: {0} (topic= {1})'.format(evt.msg.payload, evt.msg.topic))
# received message is either qos 0 or 1
# in case of qos 1, we must send back PUBACK message with same packet-id
if evt.msg.qos == 1:
self.client.puback(evt.msg.mid)
try:
json_msg = json.loads(str(evt.msg.payload))
except:
logging.error("JSON decode error: %s", evt.msg.payload)
continue
self.queueLock.acquire()
self.stat['mq_rx'] = self.stat['mq_rx'] + 1
self.workQueue.put(json_msg)
self.queueLock.release()
class mongodb_thread(threading.Thread):
def __init__(self, threadID, queueLock, workQueue, stat):
threading.Thread.__init__(self)
self.queueLock = queueLock
self.workQueue = workQueue
self.threadID = threadID
self.mongo_client = pymongo.MongoClient("mongodb://127.0.0.1:27017")
self.db = self.mongo_client.exabgp
self.stat = stat
def run(self):
while True:
self.queueLock.acquire()
while not self.workQueue.empty():
json_msg = self.workQueue.get()
msg_type = json_msg.get('type')
if msg_type == "notification":
neighbor = json_msg.get('neighbor')
if neighbor:
neighbor_ip = neighbor.get('ip')
logging.warning("Receive BGP Notification from %s, flushing DB", neighbor_ip)
self.db[neighbor_ip].drop()
self.db[neighbor_ip].create_index([("prefix", pymongo.ASCENDING)])
self.stat['db_drop'] = self.stat['db_drop'] + 1
if msg_type == "open":
neighbor_ip = json_msg.get('neighbor').get('ip')
logging.warning("Receive BGP open from %s, flushing DB", neighbor_ip)
self.db[neighbor_ip].drop()
self.db[neighbor_ip].create_index([("prefix", pymongo.ASCENDING)])
self.stat['db_drop'] = self.stat['db_drop'] + 1
if msg_type == "update":
neighbor_ip = json_msg.get('neighbor').get('ip')
logging.debug("Receive BGP Update from %s", neighbor_ip)
r = self.db[neighbor_ip]
withdraw = json_msg.get('neighbor').get('message').get('update').get('withdraw')
if withdraw:
attr = json_msg.get('neighbor').get('message').get('update').get('attribute')
for family in json_msg.get('neighbor').get('message').get('update').get('withdraw'):
for prefix in json_msg.get('neighbor').get('message').get('update').get('withdraw').get(family):
result = r.delete_many({'prefix': prefix, 'family':family})
logging.debug("del family %s, prefix %s, #records %s", family, prefix, result.deleted_count)
self.stat['db_delete'] = self.stat['db_delete'] + 1
announce = json_msg.get('neighbor').get('message').get('update').get('announce')
if announce:
attr = json_msg.get('neighbor').get('message').get('update').get('attribute')
for family in json_msg.get('neighbor').get('message').get('update').get('announce'):
for nh in json_msg.get('neighbor').get('message').get('update').get('announce').get(family):
for prefix in json_msg.get('neighbor').get('message').get('update').get('announce').get(family).get(nh):
result = r.replace_one({'prefix' : prefix}, dict(prefix=prefix, nh=nh, family=family, attribute=attr), upsert=True)
if result.upserted_id:
logging.debug("add famiy %s i, prefix %s, nh %s, insert id %s", family, prefix, nh, result.upserted_id)
else :
logging.debug("add famiy %s i, prefix %s, nh %s, #updated records %s", family, prefix, nh, result.modified_count)
self.stat['db_replace'] = self.stat['db_replace'] + 1
self.queueLock.release()
#main thread starts here:
def main():
logging.basicConfig(level=logging.INFO)
queueLock = threading.Lock()
workQueue = Queue.Queue(100000)
stat = dict(mq_rx=0, db_replace=0, db_delete=0, db_drop=0)
stop = False
#Start message_parser_thread
db_thread = mongodb_thread("db_threqd", queueLock, workQueue, stat)
db_thread.daemon=True
db_thread.start()
mq_thread = mqtt_rx_thread("mq_thread", queueLock, workQueue, stat)
mq_thread.daemon=True
mq_thread.start()
try:
while True:
queueLock.acquire()
logging.info("Queue Length: %s ", workQueue.qsize())
logging.info("Stat: %s ", stat)
queueLock.release()
time.sleep(10)
except (KeyboardInterrupt, SystemExit):
logging.error('\n! Received keyboard interrupt, quitting threads.\n')
main() | StarcoderdataPython |
11379824 | from erddapClient import ERDDAP_Tabledap
from collections import OrderedDict
from ..waterframe import WaterFrame
def from_erddap(server, dataset_id, variables=None, constraints=None, rcsvkwargs={}, auth=None):
"""
Get a WaterFrame from an ERDDAP server tabledap dataset.
Parameters
----------
server : The ERDDAP server URL
dataset_id : The dataset id to query
variables : List of variables to get from ERDDAP server, it can be comma separated string or a list.
constraints : Query constraints to appy to the ERDDAP query, this can be list or dictionary.
read_csv_kwargs : Dictionary with the parameters to pass to the read_csv function that converts the ERDDAP response to pandas DataFrame
auth : Tupple with username and password to authenticate to a protected ERDDAP server.
"""
remote = ERDDAP_Tabledap(server, dataset_id, auth=auth)
# Build the ERDDAP query
if variables is None:
variables = list(remote.variables.keys())
remote.setResultVariables(variables)
if constraints:
remote.addConstraints(constraints)
# parameters for the pandas read_csv method, which its used in the getDataFrame method
_rcsvkwargs = { **{ 'header' : 0,
'names' : variables } , **rcsvkwargs }
if 'time' in variables:
_rcsvkwargs['parse_dates'] = ['time']
# actual erddap data request, returning a DataFrame
_df = remote.getDataFrame(**_rcsvkwargs)
# vocabulary subset from erddap
_vocabulary = OrderedDict([ (key,val) for key, val in remote.variables.items() if key in variables ])
# Handle index columns names
for posible_depth_name in ['depth', 'altitude']: # Special variable names in ERDDAP
if posible_depth_name in variables:
_df.rename(columns={posible_depth_name: 'DEPTH'}, inplace=True)
break
if not 'DEPTH' in _df.keys():
_df['DEPTH'] = 0
# time is a special variable in ERDDAP, it's always called time
_df.rename(columns={'time': 'TIME'}, inplace=True)
# Add QC columns
keys = _df.keys()
for key in keys:
if key.endswith('_QC'):
continue
if f'{key}_QC' in keys:
continue
else:
_df[f'{key}_QC'] = 0
# Set index
_df.set_index(['DEPTH', 'TIME'], drop=True, inplace=True)
# TODO variable names should be standarized? Ej. time -> TIME , temperature -> TEMP , etc?
# TODO Should this method include arguments to request ERDDAP server side operations ? Ej. orderBy, orderByClosest, orderByMean, etc?
return WaterFrame(df=_df,
metadata=remote.info,
vocabulary=_vocabulary)
| StarcoderdataPython |
1985258 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from flowai.constants.url import URL
from flowai.constants.model import Model
import requests
import json
from os.path import join
class AppBase(object):
def __init__(self, model_name):
pass
def predict_by_url(self):
pass
def is_valid_api_key(api_key: str = None):
# Make a validation check on server
call_url = join(URL.BASE_URL, URL.CHECK_API_KEY)
response = requests.post(call_url, json={"api_key": api_key})
# Get into dictionary
response_dict = json.loads(response.text)
# Get success response value
success = response_dict["success"]
# Return validity status
return True if success else False | StarcoderdataPython |
3319777 | import os
import matplotlib.pyplot as plt
import numpy as np
from pyplanscoring.core.calculation import DVHCalculation, PyStructure
from pyplanscoring.core.dicom_reader import PyDicomParser
from pyplanscoring.core.types import Dose3D, DoseUnit
def plot_dvh(dvh, title):
x_label = 'Dose [Gy]'
y_label = 'Volume [cc]'
plt.figure()
x = np.arange(len(dvh['data'])) * float(dvh['scaling'])
plt.plot(x, dvh['data'])
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.title(title)
def plot_dvh_comp(dvh_calc, dvh, title):
x_label = 'Dose [Gy]'
y_label = 'Volume [cc]'
plt.figure()
x_calc = np.arange(len(dvh_calc['data'])) * float(dvh_calc['scaling'])
x = np.arange(len(dvh['data'])) * float(dvh['scaling'])
plt.plot(x_calc, dvh_calc['data'] / dvh_calc['data'][0], label='PyPlanScoring')
plt.plot(x, dvh['data'] / dvh['data'][0], label='Eclipse')
plt.xlabel(x_label)
plt.ylabel(y_label)
plt.legend()
plt.title(title)
def test_calculation_with_end_cap(dose_3d, optic_chiasm, body, ptv70, lens):
# dvh calculation with no upsampling - body no end-cap
bodyi = PyStructure(body, end_cap=0.2)
dvh_calc = DVHCalculation(bodyi, dose_3d)
dvh = dvh_calc.calculate()
# dvh calculation with no upsampling - brain no end-cap
braini = PyStructure(ptv70, end_cap=0.2)
dvh_calci = DVHCalculation(braini, dose_3d)
dvhb = dvh_calci.calculate()
# SMALL VOLUMES STRUCTURES
bodyi = PyStructure(lens, end_cap=0.2)
dvh_calc = DVHCalculation(bodyi, dose_3d)
dvh = dvh_calc.calculate()
# dvh calculation with no upsampling - brain no end-cap
braini = PyStructure(optic_chiasm, end_cap=0.2)
dvh_calci = DVHCalculation(braini, dose_3d)
dvhb = dvh_calci.calculate()
def test_calculation_with_up_sampling_end_cap(dose_3d, optic_chiasm, lens):
# SMALL VOLUMES STRUCTURES
bodyi = PyStructure(lens, end_cap=0.2)
dvh_calc = DVHCalculation(bodyi, dose_3d, calc_grid=(0.2, 0.2, 0.2))
dvh = dvh_calc.calculate()
# dvh calculation with no upsampling - brain no end-cap
braini = PyStructure(optic_chiasm, end_cap=0.2)
dvh_calci = DVHCalculation(braini, dose_3d, calc_grid=(0.2, 0.2, 0.2))
dvhb = dvh_calci.calculate()
def test_calculate(structures, optic_chiasm, body, ptv70, lens, plot_flag, rd_dcm, dose_3d):
# dvh calculation with no upsampling - body no end-cap
bodyi = PyStructure(body)
dvh_calc = DVHCalculation(bodyi, dose_3d)
dvh = dvh_calc.calculate()
# dvh calculation with no upsampling - brain no end-cap
braini = PyStructure(ptv70)
dvh_calci = DVHCalculation(braini, dose_3d)
dvhb = dvh_calci.calculate()
# small volume
braini = PyStructure(lens)
dvh_calci = DVHCalculation(braini, dose_3d)
dvh_l = dvh_calci.calculate()
# Small volume no end cap and upsampling
braini = PyStructure(lens)
dvh_calc_cpu = DVHCalculation(braini, dose_3d, calc_grid=(0.2, 0.2, 0.2))
dvh_lu = dvh_calc_cpu.calculate()
# Small volume no end cap and upsampling
braini = PyStructure(optic_chiasm)
dvh_calc_cpu = DVHCalculation(braini, dose_3d, calc_grid=(0.2, 0.2, 0.2))
dvh_lu = dvh_calc_cpu.calculate()
# Small volume no end cap and upsampling and GPU
# braini = PyStructure(lens)
# dvh_calc_gpu = DVHCalculation(braini, dose_3d, calc_grid=(0.05, 0.05, 0.05))
# dvh_lu_gpu = dvh_calc_gpu.calculate_gpu()
if plot_flag:
plot_dvh(dvh, "BODY")
plot_dvh(dvhb, "PTV 70")
plot_dvh(dvh_l, "LENS LT")
plot_dvh(dvh_lu, "LENS LT - voxel size [mm3]: (0.1, 0.1, 0.1)")
# plot_dvh(dvh_lu_gpu, "GPU LENS LT - voxel size [mm3]: (0.1, 0.1, 0.1)")
# compare with TPS DVH
dvhs = rd_dcm.GetDVHs()
dvh_calculated = {}
for roi_number in dvhs.keys():
struc_i = PyStructure(structures[roi_number])
if struc_i.volume < 100:
dvh_calc = DVHCalculation(struc_i, dose_3d, calc_grid=(.5, .5, .5))
else:
dvh_calc = DVHCalculation(struc_i, dose_3d)
dvh = dvh_calc.calculate(verbose=True)
dvh_calculated[roi_number] = dvh
for roi_number in dvhs.keys():
plot_dvh_comp(dvh_calculated[roi_number], dvhs[roi_number], structures[roi_number]['name'])
plt.show()
# TODO REFACTOR
# def test_calc_structure_rings(dicom_folder):
# """
# Test case to lung SBRT structures.
# roi_number: 34,
# name: D2CM PRIMARY,
# roi_number: 35,
# name: D2CM LN
# """
# # given
# rs_dvh = os.path.join(dicom_folder, 'RS.dcm')
# rd = os.path.join(dicom_folder,'RD.dcm')
#
# # 3D dose matrix
# dose_dcm = PyDicomParser(filename=rd)
# dose_values = dose_dcm.get_dose_matrix()
# grid = dose_dcm.get_grid_3d()
# dose_3d = Dose3D(dose_values, grid, DoseUnit.Gy)
#
# # structures
# structures = PyDicomParser(filename=rs_dvh).GetStructures()
# d2cm_prim = PyStructure(structures[34])
#
# dvh_calc = DVHCalculation(d2cm_prim, dose_3d)
# d2cm_prim_dvh = dvh_calc.calculate()
# pass
| StarcoderdataPython |
322516 | # Copyright 2016 NTT DATA
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from unittest import mock
from oslo_utils import timeutils
from masakari.api import utils as api_utils
from masakari import exception
from masakari.objects import fields
from masakari.objects import segment
from masakari.tests.unit.objects import test_objects
from masakari.tests import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
fake_segment = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.fake_segment,
'name': 'foo-segment',
'service_type': 'COMPUTE',
'description': 'fake-description',
'recovery_method': 'auto',
'enabled': True
}
class TestFailoverSegmentObject(test_objects._LocalTest):
@mock.patch('masakari.db.failover_segment_get_by_name')
def test_get_by_name(self, mock_api_get):
mock_api_get.return_value = fake_segment
segment_obj = segment.FailoverSegment.get_by_name(self.context,
'foo-segment')
self.compare_obj(segment_obj, fake_segment)
mock_api_get.assert_called_once_with(self.context, 'foo-segment')
@mock.patch('masakari.db.failover_segment_get_by_uuid')
def test_get_by_uuid(self, mock_api_get):
mock_api_get.return_value = fake_segment
segment_obj = (segment.FailoverSegment.
get_by_uuid(self.context, uuidsentinel.fake_segment))
self.compare_obj(segment_obj, fake_segment)
mock_api_get.assert_called_once_with(self.context,
uuidsentinel.fake_segment)
@mock.patch('masakari.db.failover_segment_get_by_id')
def test_get_by_id(self, mock_api_get):
mock_api_get.return_value = fake_segment
fake_id = 123
segment_obj = segment.FailoverSegment.get_by_id(self.context, fake_id)
self.compare_obj(segment_obj, fake_segment)
mock_api_get.assert_called_once_with(self.context, fake_id)
def _segment_create_attribute(self):
segment_obj = segment.FailoverSegment(context=self.context)
segment_obj.name = 'foo-segment'
segment_obj.description = 'keydata'
segment_obj.service_type = 'fake-user'
segment_obj.recovery_method = 'auto'
segment_obj.uuid = uuidsentinel.fake_segment
return segment_obj
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_create')
def test_create(self, mock_segment_create, mock_notify_about_segment_api):
mock_segment_create.return_value = fake_segment
segment_obj = self._segment_create_attribute()
segment_obj.create()
self.compare_obj(segment_obj, fake_segment)
mock_segment_create.assert_called_once_with(self.context, {
'uuid': uuidsentinel.fake_segment, 'name': 'foo-segment',
'description': 'keydata', 'service_type': 'fake-user',
'recovery_method': 'auto'})
action = fields.EventNotificationAction.SEGMENT_CREATE
phase_start = fields.EventNotificationPhase.START
phase_end = fields.EventNotificationPhase.END
notify_calls = [
mock.call(self.context, segment_obj, action=action,
phase=phase_start),
mock.call(self.context, segment_obj, action=action,
phase=phase_end)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_create')
def test_recreate_fails(self, mock_segment_create,
mock_notify_about_segment_api):
mock_segment_create.return_value = fake_segment
segment_obj = self._segment_create_attribute()
segment_obj.create()
self.assertRaises(exception.ObjectActionError, segment_obj.create)
mock_segment_create.assert_called_once_with(self.context, {
'uuid': uuidsentinel.fake_segment, 'name': 'foo-segment',
'description': 'keydata', 'service_type': 'fake-user',
'recovery_method': 'auto'})
action = fields.EventNotificationAction.SEGMENT_CREATE
phase_start = fields.EventNotificationPhase.START
phase_end = fields.EventNotificationPhase.END
notify_calls = [
mock.call(self.context, segment_obj, action=action,
phase=phase_start),
mock.call(self.context, segment_obj, action=action,
phase=phase_end)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_delete')
def test_destroy(self, mock_segment_destroy,
mock_notify_about_segment_api):
segment_obj = self._segment_create_attribute()
segment_obj.id = 123
segment_obj.destroy()
mock_segment_destroy.assert_called_once_with(
self.context, uuidsentinel.fake_segment)
action = fields.EventNotificationAction.SEGMENT_DELETE
phase_start = fields.EventNotificationPhase.START
phase_end = fields.EventNotificationPhase.END
notify_calls = [
mock.call(self.context, segment_obj, action=action,
phase=phase_start),
mock.call(self.context, segment_obj, action=action,
phase=phase_end)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_delete')
def test_destroy_failover_segment_found(self, mock_segment_destroy,
mock_notify_about_segment_api):
mock_segment_destroy.side_effect = exception.FailoverSegmentNotFound(
id=123)
segment_obj = self._segment_create_attribute()
segment_obj.id = 123
self.assertRaises(exception.FailoverSegmentNotFound,
segment_obj.destroy)
action = fields.EventNotificationAction.SEGMENT_DELETE
phase_start = fields.EventNotificationPhase.START
notify_calls = [
mock.call(self.context, segment_obj, action=action,
phase=phase_start)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch('masakari.db.failover_segment_get_all_by_filters')
def test_get_segment_by_recovery_method(self, mock_api_get):
fake_segment2 = copy.deepcopy(fake_segment)
fake_segment2['name'] = 'fake_segment2'
mock_api_get.return_value = [fake_segment2, fake_segment]
segment_result = (segment.FailoverSegmentList.
get_all(self.context,
filters={'recovery_method': 'auto'}))
self.assertEqual(2, len(segment_result))
self.compare_obj(segment_result[0], fake_segment2)
self.compare_obj(segment_result[1], fake_segment)
mock_api_get.assert_called_once_with(self.context, filters={
'recovery_method': 'auto'
}, limit=None, marker=None, sort_dirs=None, sort_keys=None)
@mock.patch('masakari.db.failover_segment_get_all_by_filters')
def test_get_segment_by_service_type(self, mock_api_get):
fake_segment2 = copy.deepcopy(fake_segment)
fake_segment2['name'] = 'fake_segment'
mock_api_get.return_value = [fake_segment2, fake_segment]
segment_result = (segment.FailoverSegmentList.
get_all(self.context,
filters={'service_type': 'COMPUTE'}))
self.assertEqual(2, len(segment_result))
self.compare_obj(segment_result[0], fake_segment2)
self.compare_obj(segment_result[1], fake_segment)
mock_api_get.assert_called_once_with(self.context, filters={
'service_type': 'COMPUTE'
}, limit=None, marker=None, sort_dirs=None, sort_keys=None)
@mock.patch('masakari.db.failover_segment_get_all_by_filters')
def test_get_limit_and_marker_invalid_marker(self, mock_api_get):
segment_name = 'unknown_segment'
mock_api_get.side_effect = exception.MarkerNotFound(marker=segment_name
)
self.assertRaises(exception.MarkerNotFound,
segment.FailoverSegmentList.get_all,
self.context, limit=5, marker=segment_name)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_update')
def test_save(self, mock_segment_update, mock_notify_about_segment_api):
mock_segment_update.return_value = fake_segment
segment_object = segment.FailoverSegment(context=self.context)
segment_object.name = "foo-segment"
segment_object.id = 123
segment_object.uuid = uuidsentinel.fake_segment
segment_object.save()
self.compare_obj(segment_object, fake_segment)
self.assertTrue(mock_segment_update.called)
action = fields.EventNotificationAction.SEGMENT_UPDATE
phase_start = fields.EventNotificationPhase.START
phase_end = fields.EventNotificationPhase.END
notify_calls = [
mock.call(self.context, segment_object, action=action,
phase=phase_start),
mock.call(self.context, segment_object, action=action,
phase=phase_end)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_update')
def test_save_failover_segment_not_found(self, mock_segment_update,
mock_notify_about_segment_api):
mock_segment_update.side_effect = (
exception.FailoverSegmentNotFound(id=uuidsentinel.fake_segment))
segment_object = segment.FailoverSegment(context=self.context)
segment_object.name = "foo-segment"
segment_object.id = 123
segment_object.uuid = uuidsentinel.fake_segment
self.assertRaises(exception.FailoverSegmentNotFound,
segment_object.save)
action = fields.EventNotificationAction.SEGMENT_UPDATE
phase_start = fields.EventNotificationPhase.START
notify_calls = [
mock.call(self.context, segment_object, action=action,
phase=phase_start)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
@mock.patch.object(api_utils, 'notify_about_segment_api')
@mock.patch('masakari.db.failover_segment_update')
def test_save_failover_segment_already_exists(self, mock_segment_update,
mock_notify_about_segment_api):
mock_segment_update.side_effect = (
exception.FailoverSegmentExists(name="foo-segment"))
segment_object = segment.FailoverSegment(context=self.context)
segment_object.name = "foo-segment"
segment_object.id = 123
segment_object.uuid = uuidsentinel.fake_segment
self.assertRaises(exception.FailoverSegmentExists, segment_object.save)
action = fields.EventNotificationAction.SEGMENT_UPDATE
phase_start = fields.EventNotificationPhase.START
notify_calls = [
mock.call(self.context, segment_object, action=action,
phase=phase_start)]
mock_notify_about_segment_api.assert_has_calls(notify_calls)
def test_obj_make_compatible(self):
segment_obj = segment.FailoverSegment(context=self.context)
segment_obj.name = "foo-segment"
segment_obj.id = 123
segment_obj.uuid = uuidsentinel.fake_segment
segment_obj.enabled = True
primitive = segment_obj.obj_to_primitive('1.1')
self.assertIn('enabled', primitive['masakari_object.data'])
primitive = segment_obj.obj_to_primitive('1.0')
self.assertNotIn('enabled', primitive['masakari_object.data'])
| StarcoderdataPython |
97592 | <reponame>Mikuana/oops_fhir
from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.v3_role_link_status import (
v3RoleLinkStatus as v3RoleLinkStatus_,
)
__all__ = ["v3RoleLinkStatus"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class v3RoleLinkStatus(v3RoleLinkStatus_):
"""
v3 Code System RoleLinkStatus
Description: Codes representing possible states of a RoleLink, as
defined by the RoleLink class state machine.
Status: active - Version: 2018-08-12
http://terminology.hl7.org/ValueSet/v3-RoleLinkStatus
"""
class Meta:
resource = _resource
| StarcoderdataPython |
8002724 | """
Copyright (c) 2014-2015, The University of Texas at Austin.
All rights reserved.
This file is part of BLASpy and is available under the 3-Clause
BSD License, which can be found in the LICENSE file at the top-level
directory or at http://opensource.org/licenses/BSD-3-Clause
"""
from blaspy import scal
from numpy import array, asmatrix
from unittest import TestCase
class TestScal(TestCase):
def test_scalar_as_ndarray(self):
x = array([[1.]])
expected = [[2.]]
self.assertListEqual(scal(2, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_alpha_equal_to_zero_with_scalar(self):
x = array([[1.]])
expected = [[0.]]
self.assertListEqual(scal(0, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_negative_alpha_with_scalar(self):
x = array([[1.]])
expected = [[-1.]]
self.assertListEqual(scal(-1, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_row_vector_as_ndarray(self):
x = array([[1., 2., 3.]])
expected = [[2., 4., 6.]]
self.assertListEqual(scal(2, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_two_vector_as_ndarray(self):
x = array([[1.], [2.], [3.]])
expected = [[2.], [4.], [6.]]
self.assertListEqual(scal(2, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_vector_with_negatives_elements(self):
x = array([[-1., -2., 3.]])
expected = [[-2., -4., 6.]]
self.assertListEqual(scal(2, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_alpha_equal_to_zero_with_vectors(self):
x = array([[-1., -2., 3.]])
expected = [[0., 0., 0.]]
self.assertListEqual(scal(0, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_negative_alpha_with_vectors(self):
x = array([[-1., -2., 3.]])
expected = [[1.5, 3., -4.5]]
self.assertListEqual(scal(-1.5, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_vector_as_matrix(self):
x = asmatrix(array([[1., 2., 3.]]))
expected = [[2., 4., 6.]]
self.assertListEqual(scal(2, x).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_stride_less_than_length(self):
x = array([[1., 2., 3.]])
expected = [[2., 2., 6.]]
self.assertListEqual(scal(2, x, inc_x=2).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_stride_greater_than_length(self):
x = array([[1., 2., 3.]])
expected = [[2., 2., 3.]]
self.assertListEqual(scal(2, x, inc_x=3).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_float32_dtype(self):
x = array([[1., 2., 3.]], dtype='float32')
self.assertEqual(x.dtype, 'float32')
expected = [[2., 4., 6.]]
self.assertListEqual(scal(2, x,).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_float64_dtype(self):
x = array([[1., 2., 3.]], dtype='float64')
self.assertEqual(x.dtype, 'float64')
expected = [[2., 4., 6.]]
self.assertListEqual(scal(2, x,).tolist(), expected)
self.assertListEqual(x.tolist(), expected)
def test_not_numpy_with_list_raises_ValueError(self):
x = [[1., 2., 3.]]
self.assertRaises(ValueError, scal, 1, x)
def test_not_numpy_with_scalar_raises_ValueError(self):
x = 1.
self.assertRaises(ValueError, scal, 1, x)
def test_not_2d_numpy_with_1d__raises_ValueError(self):
x = array([1., 2., 3.])
self.assertRaises(ValueError, scal, 1, x)
def test_not_2d_numpy_with_3d_raises_ValueError(self):
x = array([[[1.], [2.], [3.]]], ndmin=3)
self.assertRaises(ValueError, scal, 1, x)
def test_not_vector_raises_ValueError(self):
x = array([[1., 2.], [3., 4.]])
self.assertRaises(ValueError, scal, 1, x)
def test_integer_dtype_raises_ValueError(self):
x = array([[1., 2., 3.]], dtype='int')
self.assertEqual(x.dtype, 'int')
self.assertRaises(ValueError, scal, 1, x)
def test_complex_dtype_raises_ValueError(self):
x = array([[1., 2., 3.]], dtype='complex')
self.assertEqual(x.dtype, 'complex')
self.assertRaises(ValueError, scal, 1, x) | StarcoderdataPython |
290059 | <reponame>pawelkopka/kopf<filename>tests/e2e/conftest.py
import glob
import os.path
import pathlib
import subprocess
import pytest
root_dir = os.path.relpath(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
examples = sorted(glob.glob(os.path.join(root_dir, 'examples/*/')))
assert examples # if empty, it is just the detection failed
examples = [path for path in examples if not glob.glob((os.path.join(path, 'test*.py')))]
@pytest.fixture(params=examples, ids=[os.path.basename(path.rstrip('/')) for path in examples])
def exampledir(request):
return pathlib.Path(request.param)
@pytest.fixture()
def with_crd():
subprocess.run("kubectl apply -f examples/crd.yaml",
shell=True, check=True, timeout=10, capture_output=True)
@pytest.fixture()
def with_peering():
subprocess.run("kubectl apply -f peering.yaml",
shell=True, check=True, timeout=10, capture_output=True)
@pytest.fixture()
def no_crd():
subprocess.run("kubectl delete customresourcedefinition kopfexamples.zalando.org",
shell=True, check=True, timeout=10, capture_output=True)
@pytest.fixture()
def no_peering():
subprocess.run("kubectl delete customresourcedefinition kopfpeerings.zalando.org",
shell=True, check=True, timeout=10, capture_output=True)
| StarcoderdataPython |
5134983 | <reponame>NIkolayrr/python_fundamentals_exam
# -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-24 11:20
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('cars', '0003_auto_20180823_2305'),
]
operations = [
migrations.AlterField(
model_name='car',
name='year',
field=models.IntegerField(max_length=4),
),
]
| StarcoderdataPython |
5099464 | # -*- coding: iso-8859-1 -*-
"""
MoinMoin - PHP session cookie authentication
Currently supported systems:
* eGroupware 1.2 ("egw")
* You need to configure eGroupware in the "header setup" to use
"php sessions plus restore"
@copyright: 2005 MoinMoin:AlexanderSchremmer (Thanks to Spreadshirt)
@license: GNU GPL, see COPYING for details.
"""
import urllib
from MoinMoin import user
from MoinMoin.auth import _PHPsessionParser, BaseAuth
class PHPSessionAuth(BaseAuth):
""" PHP session cookie authentication """
name = 'php_session'
def __init__(self, apps=['egw'], s_path="/tmp", s_prefix="sess_", autocreate=False):
""" @param apps: A list of the enabled applications. See above for
possible keys.
@param s_path: The path where the PHP sessions are stored.
@param s_prefix: The prefix of the session files.
"""
BaseAuth.__init__(self)
self.s_path = s_path
self.s_prefix = s_prefix
self.apps = apps
self.autocreate = autocreate
def request(self, request, user_obj, **kw):
def handle_egroupware(session):
""" Extracts name, fullname and email from the session. """
username = session['egw_session']['session_lid'].split("@", 1)[0]
known_accounts = session['egw_info_cache']['accounts']['cache']['account_data']
# if the next line breaks, then the cache was not filled with the current
# user information
user_info = [value for key, value in known_accounts.items()
if value['account_lid'] == username][0]
name = user_info.get('fullname', '')
email = user_info.get('email', '')
dec = lambda x: x and x.decode("iso-8859-1")
return dec(username), dec(email), dec(name)
cookie = kw.get('cookie')
if not cookie is None:
for cookiename in cookie:
cookievalue = urllib.unquote(cookie[cookiename].value).decode('iso-8859-1')
session = _PHPsessionParser.loadSession(cookievalue, path=self.s_path, prefix=self.s_prefix)
if session:
if "egw" in self.apps and session.get('egw_session', None):
username, email, name = handle_egroupware(session)
break
else:
return user_obj, True
u = user.User(request, name=username, auth_username=username,
auth_method=self.name)
changed = False
if name != u.aliasname:
u.aliasname = name
changed = True
if email != u.email:
u.email = email
changed = True
if u and self.autocreate:
u.create_or_update(changed)
if u and u.valid:
return u, True # True to get other methods called, too
return user_obj, True # continue with next method in auth list
| StarcoderdataPython |
9691635 | <gh_stars>1-10
#!/usr/bin/python
# -*- coding: utf-8 -*-
# ------------------------------------------------------------------------------
# MODULE INFORMATIONS ----------------------------------------------------------
DOCUMENTATION = '''
---
module: make
short_description: Perform make
author:
- "<NAME>"
'''
EXAMPLES = '''
TODO
'''
# ------------------------------------------------------------------------------
# COMMONS (copy&paste) ---------------------------------------------------------
class BaseObject(object):
import syslog, os
'''Base class for all classes that use AnsibleModule.
Dependencies:
- `chrooted` function.
'''
def __init__(self, module, params=None):
syslog.openlog('ansible-{module}-{name}'.format(
module=os.path.basename(__file__), name=self.__class__.__name__))
self.work_dir = None
self.chroot = None
self._module = module
self._command_prefix = None
if params:
self._parse_params(params)
@property
def command_prefix(self):
return self._command_prefix
@command_prefix.setter
def command_prefix(self, value):
self._command_prefix = value
def run_command(self, command=None, **kwargs):
if not 'check_rc' in kwargs:
kwargs['check_rc'] = True
if command is None and self.command_prefix is None:
self.fail('Invalid command')
if self.command_prefix:
command = '{prefix} {command}'.format(
prefix=self.command_prefix, command=command or '')
if self.work_dir and not self.chroot:
command = 'cd {work_dir}; {command}'.format(
work_dir=self.work_dir, command=command)
if self.chroot:
command = chrooted(command, self.chroot, work_dir=self.work_dir)
self.log('Performing command `{}`'.format(command))
rc, out, err = self._module.run_command(command, **kwargs)
if rc != 0:
self.log('Command `{}` returned invalid status code: `{}`'.format(
command, rc), level=syslog.LOG_WARNING)
return {'rc': rc,
'out': out,
'out_lines': [line for line in out.split('\n') if line],
'err': err,
'err_lines': [line for line in out.split('\n') if line]}
def log(self, msg, level=syslog.LOG_DEBUG):
'''Log to the system logging facility of the target system.'''
if os.name == 'posix': # syslog is unsupported on Windows.
syslog.syslog(level, str(msg))
def fail(self, msg):
self._module.fail_json(msg=msg)
def exit(self, changed=True, msg='', result=None):
self._module.exit_json(changed=changed, msg=msg, result=result)
def _parse_params(self, params):
for param in params:
if param in self._module.params:
value = self._module.params[param]
t = self._module.argument_spec[param].get('type')
if t == 'str' and value in ['None', 'none']:
value = None
setattr(self, param, value)
else:
setattr(self, param, None)
def chrooted(command, path, profile='/etc/profile', work_dir=None):
prefix = "chroot {path} bash -c 'source {profile}; ".format(
path=path, profile=profile)
if work_dir:
prefix += 'cd {work_dir}; '.format(work_dir=work_dir)
prefix += command
prefix += "'"
return prefix
# ------------------------------------------------------------------------------
# EXECUTOR ---------------------------------------------------------------------
class MakeExecutor(BaseObject):
'''Execute `make`.
'''
def __init__(self, module):
super(MakeExecutor, self).__init__(module,
params=['task', 'opts', 'work_dir', 'chroot'])
self.command_prefix = 'make'
def run(self):
command = ''
if self.task:
command += self.task
if self.opts:
for name, value in self.opts.items():
command += ' {name}={value}'.format(name=name, value=value)
self.run_command(command)
# ------------------------------------------------------------------------------
# MAIN FUNCTION ----------------------------------------------------------------
def main():
module = AnsibleModule(argument_spec=dict(
task=dict(type='str', required=False, default=None),
opts=dict(type='dict', required=False, default={}),
work_dir=dict(type='str', required=False, default=None),
chroot=dict(type='str', required=False, default=None)))
make = MakeExecutor(module)
make.run()
module.exit_json(changed=True, msg='Make command successfully executed')
# ------------------------------------------------------------------------------
# ENTRY POINT ------------------------------------------------------------------
from ansible.module_utils.basic import *
if __name__ == '__main__':
main()
# ------------------------------------------------------------------------------
# vim: set filetype=python :
| StarcoderdataPython |
8112571 | <gh_stars>1-10
# This file is part of Indico.
# Copyright (C) 2002 - 2020 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
from __future__ import unicode_literals
from flask import has_request_context, render_template, session
from indico.core import signals
from indico.modules.events.layout.util import MenuEntryData
from indico.modules.users import User
from indico.modules.vc.forms import VCPluginSettingsFormBase
from indico.modules.vc.models.vc_rooms import VCRoom, VCRoomEventAssociation
from indico.modules.vc.plugins import VCPluginMixin
from indico.modules.vc.util import get_managed_vc_plugins, get_vc_plugins
from indico.util.i18n import _
from indico.web.flask.templating import get_overridable_template_name, template_hook
from indico.web.flask.util import url_for
from indico.web.menu import SideMenuItem, TopMenuItem
__all__ = ('VCPluginMixin', 'VCPluginSettingsFormBase', 'VCRoomEventAssociation')
@template_hook('event-header')
def _inject_event_header(event, **kwargs):
res = VCRoomEventAssociation.find_for_event(event, only_linked_to_event=True)
event_vc_rooms = [event_vc_room for event_vc_room in res.all() if event_vc_room.vc_room.plugin is not None]
if event_vc_rooms:
return render_template('vc/event_header.html', event=event, event_vc_rooms=event_vc_rooms)
@template_hook('vc-actions')
def _inject_vc_room_action_buttons(event, item, **kwargs):
event_vc_room = VCRoomEventAssociation.get_linked_for_event(event).get(item)
if event_vc_room and event_vc_room.vc_room.plugin:
plugin = event_vc_room.vc_room.plugin
name = get_overridable_template_name('vc_room_timetable_buttons.html', plugin, core_prefix='vc/')
return render_template(name, event=event, event_vc_room=event_vc_room, **kwargs)
@signals.menu.items.connect_via('event-management-sidemenu')
def _extend_event_management_menu(sender, event, **kwargs):
if not get_vc_plugins():
return
if not event.can_manage(session.user):
return
return SideMenuItem('videoconference', _('Videoconference'), url_for('vc.manage_vc_rooms', event),
section='services')
@signals.event.sidemenu.connect
def _extend_event_menu(sender, **kwargs):
def _visible(event):
return bool(get_vc_plugins()) and VCRoomEventAssociation.find_for_event(event).has_rows()
return MenuEntryData(_('Videoconference Rooms'), 'videoconference_rooms', 'vc.event_videoconference',
position=14, visible=_visible)
@signals.event.contribution_deleted.connect
@signals.event.session_block_deleted.connect
def _link_object_deleted(obj, **kwargs):
for event_vc_room in obj.vc_room_associations:
event_vc_room.link_object = obj.event
@signals.event.session_deleted.connect
def _session_deleted(sess, **kwargs):
for block in sess.blocks:
_link_object_deleted(block)
@signals.event.deleted.connect
def _event_deleted(event, **kwargs):
user = session.user if has_request_context() and session.user else User.get_system_user()
for event_vc_room in VCRoomEventAssociation.find_for_event(event, include_hidden=True, include_deleted=True):
event_vc_room.delete(user)
@signals.menu.items.connect_via('top-menu')
def _topmenu_items(sender, **kwargs):
if not session.user or not get_managed_vc_plugins(session.user):
return
return TopMenuItem('services-vc', _('Videoconference'), url_for('vc.vc_room_list'), section='services')
@signals.event_management.get_cloners.connect
def _get_vc_cloner(sender, **kwargs):
from indico.modules.vc.clone import VCCloner
return VCCloner
@signals.users.merged.connect
def _merge_users(target, source, **kwargs):
VCRoom.find(created_by_id=source.id).update({VCRoom.created_by_id: target.id})
| StarcoderdataPython |
5001536 | """Server start parameters."""
from ipaddress import IPv4Address, IPv6Address, ip_address
from typing import NamedTuple, Iterator, Optional
from dzdsu.constants import CONFIG_FILE
__all__ = ['ServerParams']
class ServerParams(NamedTuple):
"""Available server start parameters."""
config: str = CONFIG_FILE
do_logs: bool = True
admin_log: bool = True
net_log: bool = True
src_allow_file_write: bool = True
no_file_patching: bool = True
freeze_check: bool = True
ip: Optional[IPv4Address | IPv6Address] = None
port: Optional[int] = None
profiles: Optional[str] = None
cpu_count: Optional[int] = None
@classmethod
def from_json(cls, json: dict):
"""Creates a ServerParams instance from a JSON-ish dict."""
return cls(
json.get('config', CONFIG_FILE),
json.get('doLogs', True),
json.get('adminLog', True),
json.get('netLog', True),
json.get('srcAllowFileWrite', True),
json.get('noFilePatching', True),
json.get('freezeCheck', True),
None if (ip := json.get('ip')) is None else ip_address(ip),
json.get('port'),
json.get('profiles'),
json.get('cpuCount')
)
@property
def executable_args(self) -> Iterator[str]:
"""Yields arguments for the server executable."""
yield f'-config={self.config}'
if self.do_logs:
yield '-doLogs'
if self.admin_log:
yield '-adminLog'
if self.net_log:
yield '-netLog'
if self.src_allow_file_write:
yield '-srcAllowFileWrite'
if self.no_file_patching:
yield '-noFilePatching'
if self.freeze_check:
yield '-freezeCheck'
if self.ip is not None:
yield f'-ip={self.ip}'
if self.port is not None:
yield f'-port={self.port}'
if self.profiles is not None:
yield f'-profiles={self.profiles}'
if self.cpu_count is not None:
yield f'-cpuCount={self.cpu_count}'
| StarcoderdataPython |
11348340 | # coding=utf-8
'''
Created on 2015-9-24
@author: Devuser
'''
from doraemon.home.pagefactory.pageworker import DevicePageWorker
from doraemon.home.viewmodels.home_left_nav_bar import HomeTaskLeftNavBar
from doraemon.home.viewmodels.home_sub_nav_bar import HomeTaskSubNavBar
from doraemon.home.pagefactory.home_template_path import HomeTaskPath
from doraemon.project.pagefactory.project_task_pageworker import ProjectTaskPageWorker
from business.project.task_service import TaskService
from business.auth_user.user_service import UserService
from doraemon.project.viewmodels.vm_project_member import VM_ProjectMember
class HomeTaskPageWorker(DevicePageWorker):
'''
项目页面生成器
'''
def __init__(self, request):
'''
Constructor
'''
DevicePageWorker.__init__(self, request)
self.side_bar_model = HomeTaskLeftNavBar
self.sub_side_bar_model = HomeTaskSubNavBar
def get_full_page(self, request, start_index, sub_nav_action):
owner = 0
if sub_nav_action.upper() == "CREATEBYME" or sub_nav_action.upper() == "ASGINME":
owner = request.user.id
task_list = TaskService.all_my_tasks(request, sub_nav_action, owner)
page_index = [start_index, start_index + 8]
sub_leftnav = self.get_task_sub_navbar(request, task_list, sub_nav_action)
left_nav_bar = self.get_task_left_bar(request, sub_nav_action)
task_page_worker = ProjectTaskPageWorker(request)
task_list_webpart = task_page_worker.get_task_list_webpart(task_list, page_index, True, False, True)
page_fileds = {'left_nav_bar': left_nav_bar, 'sub_leftnav': sub_leftnav, 'task_list': task_list_webpart}
return self.get_page(page_fileds, HomeTaskPath.task_index_path, request)
def get_task_left_bar(self, request, sub_nav_action):
return self.get_left_nav_bar(request, self.side_bar_model, HomeTaskPath.left_nav_template_path,
sub_nav_action=sub_nav_action)
def get_task_sub_navbar(self, request, task_list, sub_nav_action):
task_list = TaskService.all_my_tasks(request, sub_nav_action, 0)
owner_id_list = list()
vm_members = list()
for task in task_list:
temp_list = eval(task.Owner)
for owner in temp_list:
if owner != "" and owner not in owner_id_list:
owner_id_list.append(owner)
for owner_id in owner_id_list:
member = UserService.get_user(int(owner_id))
tmp_member = VM_ProjectMember(0, member, 0, None)
vm_members.append(tmp_member)
return self.get_left_nav_bar(request, self.sub_side_bar_model, HomeTaskPath.sub_nav_template_path,
sub_nav_action=sub_nav_action, tasks=task_list, members=vm_members)
def get_more_tasks(self, request, filters, owner, start_index):
task_list = TaskService.project_tasks_byowner(request, 0, owner, filters)[start_index:start_index + 8]
task_page_worker = ProjectTaskPageWorker(request)
return task_page_worker.get_task_listcontrol(task_list, True, False, True)
def get_owner_tasks(self, request, project_id, filters, owner):
task_list = TaskService.project_tasks_byowner(request, project_id, owner, filters)[0:8]
task_page_worker = ProjectTaskPageWorker(request)
return task_page_worker.get_task_listcontrol(task_list, True, False, True)
| StarcoderdataPython |
6517544 | <reponame>deloragaskins/deepchem
# flake8: noqa
try:
from deepchem.metalearning.maml import MAML, MetaLearner
except ModuleNotFoundError:
pass
| StarcoderdataPython |
3313220 | <reponame>bds-ailab/logflow
import unittest
import torch.multiprocessing
from unittest.mock import mock_open, patch
from logflow.relationsdiscover.Saver import Saver
from logflow.relationsdiscover.Model import LSTMLayer
from logflow.relationsdiscover.Result import Result
from logflow.relationsdiscover.Cardinality import Cardinality
import pickle
class UtilTest(unittest.TestCase):
def setUp(self):
self.lock = torch.multiprocessing.get_context('spawn').Lock()
self.model = LSTMLayer(num_classes=5)
cardinality = Cardinality(3, "", "")
cardinality.list_classes = [1,1,1,2,2,3,4,5,6]
cardinality.counter= {1:10, 2:100, 3:100, 4:100, 6:1000, 5:1000}
cardinality.compute_position()
self.result = Result(cardinality)
@patch('os.path.isfile')
def test_saver_no_file(self, mock_isfile):
mock_isfile.return_value = False
self.saver = Saver("test", "./", 3, self.lock)
read_data = ""
mockOpen = mock_open(read_data=read_data)
with patch('builtins.open', mockOpen):
self.saver.save(self.model, condition="Test", result= self.result)
@patch('os.path.isfile')
def test_saver_file(self, mock_isfile):
mock_isfile.return_value = True
self.saver = Saver("test", "./", 3, self.lock)
read_data = pickle.dumps({"LSTM": {3:self.model.state_dict()}})
mockOpen = mock_open(read_data=read_data)
with patch('builtins.open', mockOpen):
self.saver.save(self.model, condition="Test", result= self.result)
@patch('os.path.isfile')
def test_saver_file_empty(self, mock_isfile):
mock_isfile.return_value = True
self.saver = Saver("test", "./", 3, self.lock)
read_data = pickle.dumps({})
mockOpen = mock_open(read_data=read_data)
with patch('builtins.open', mockOpen):
self.saver.save(self.model, condition="Test", result=self.result)
@patch('os.path.isfile')
def test_load_file(self, mock_isfile):
mock_isfile.return_value = True
self.saver = Saver("test", "./", 3, self.lock)
read_data = pickle.dumps({"LSTM": {3:self.model.state_dict()}})
mockOpen = mock_open(read_data=read_data)
with patch('builtins.open', mockOpen):
model = self.saver.load(self.model)
self.assertIsInstance(model, LSTMLayer)
@patch('os.path.isfile')
def test_load_no_file(self, mock_isfile):
mock_isfile.return_value = False
self.saver = Saver("test", "./", 3, self.lock)
read_data = pickle.dumps({"LSTM": {3:self.model.state_dict()}})
mockOpen = mock_open(read_data=read_data)
with self.assertRaises(Exception):
with patch('builtins.open', mockOpen):
self.saver.load(self.model) | StarcoderdataPython |
6614506 | <gh_stars>0
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import argparse
import numpy as np
from sklearn.datasets import fetch_california_housing
from sklearn.model_selection import train_test_split
import pandas as pd
import matplotlib.pyplot as plt
from normal_equation import linear_regression_normal_equation
import matplotlib
matplotlib.style.use('seaborn')
def plot_data():
""" plot chart from prepared data """
# check results file
if not os.path.isfile(FLAGS.results_file):
raise IOError("No such file '{}'".format(FLAGS.results_file))
# read DataFrame from results file
results = pd.read_csv(FLAGS.results_file, index_col='num_objects')
lambda_value = results['lambda'].unique()[0]
results = results.drop('lambda', axis=1)
# plot results
ax = results.plot(alpha=1)
ax.set_title("""California housing with Linear Regression
L2 regularization lambda = {}""".format(lambda_value))
ax.set_xlabel('number of objects')
ax.set_ylabel('MSLE')
ax.set_xscale('log')
ax.legend()
plt.show()
def main():
# create results file if it does not exist
if FLAGS.force or not os.path.isfile(FLAGS.results_file):
os.makedirs(os.path.dirname(FLAGS.results_file), exist_ok=True)
# get data
housing = fetch_california_housing()
# create list of number of object
num_objects_list = [50, 100, 500, 1000, 5000, 10000]
lambda_value = FLAGS.lambda_value
# collect data with different count of objects
train_score_list, test_score_list, lambda_list = [], [], []
for i in num_objects_list:
# split data
trainx, testx, trainy, testy = train_test_split(
housing.data, housing.target, test_size=i, train_size=i,
random_state=100)
# get score
train_score, test_score = linear_regression_normal_equation(
trainx, testx, trainy, testy, lambda_value)
train_score_list.append(train_score[0])
test_score_list.append(test_score[0])
lambda_list.append(lambda_value)
# create DataFrame object
data = pd.DataFrame({'lambda': lambda_list,
'train_score': train_score_list,
'test_score': test_score_list},
index=num_objects_list)
# set num_objects as index
data.index.name = 'num_objects'
# save data to csv file
data.to_csv(FLAGS.results_file, header=True)
plot_data()
if __name__ == '__main__':
# eval filename without extention
filename, _ = os.path.splitext(os.path.basename(__file__))
parser = argparse.ArgumentParser()
parser.add_argument('--force', action='store_true')
parser.add_argument('--test_size', type=float, default=0.5)
parser.add_argument('--lambda_value', type=float, default=0.35)
parser.add_argument(
'--results_file',
type=str,
default=os.path.join(tempfile.gettempdir(),
'fun-with-machine-learning',
filename + '.csv'), # output data has the same name
help='File with results')
FLAGS, unparsed = parser.parse_known_args()
main()
| StarcoderdataPython |
4999528 | <filename>authors/apps/authentication/tests/test_auth.py
from rest_framework import status
from authors.apps.authentication.tests.base_test import BaseTest
class TestGetUser(BaseTest):
"""Test for the login functionality of the app."""
def test_get_users(self):
token = self.authenticate_user(self.auth_user_data).data["token"]
response = self.client.get(self.user_url,
HTTP_AUTHORIZATION=f'token {token}'
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
self.assertEqual(response.data["email"], "<EMAIL>")
def test_wrong_token_header(self):
token = self.authenticate_user(self.auth_user_data).data["token"]
response = self.client.get(self.user_url,
HTTP_AUTHORIZATION=f'token{token}'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_too_many_arguments_in_header(self):
token = self.authenticate_user(self.auth_user_data).data["token"]
response = self.client.get(self.user_url,
HTTP_AUTHORIZATION=f'token dd {token}'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
def test_invalid_token(self):
response = self.client.get(self.user_url,
HTTP_AUTHORIZATION=f'token hjdgjfg ddd'
)
self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)
| StarcoderdataPython |
2449 | <gh_stars>0
__all__ = ['EnemyBucketWithStar',
'Nut',
'Beam',
'Enemy',
'Friend',
'Hero',
'Launcher',
'Rotor',
'SpikeyBuddy',
'Star',
'Wizard',
'EnemyEquipedRotor',
'CyclingEnemyObject',
'Joints',
'Bomb',
'Contacts']
| StarcoderdataPython |
9617493 | ####################################
# Train LBMNet using kitti dataset #
####################################
import os
import cv2
import time
import random
import numpy as np
import torch
from torch import nn
import torch.optim as optim
import torch.nn.utils as torch_utils
from torch.utils.data import DataLoader
from torchsummary import summary
import visdom
vis = visdom.Visdom()
vis.close(env="main")
torch.cuda.empty_cache()
from loader import Train_DataSet, Test_DataSet
from LBM import LBMNet50_Improv as LBMNet50
from utils import acc_check, value_tracker
batch_size = 4
device = 'cuda' if torch.cuda.is_available() else 'cpu'
torch.manual_seed(1377)
if device == 'cuda':
torch.cuda.manual_seed_all(1377)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(1377)
random.seed(1377)
def main():
dataset = Train_DataSet("../path/to/kitti/data_road")
dataset_loader = DataLoader(dataset, batch_size=batch_size, shuffle=True, drop_last=True, num_workers=0)
dataset_test = Test_DataSet("../path/to/kitti/data_road")
dataset_loader_test = DataLoader(dataset_test, batch_size=1, shuffle=False, drop_last=True, num_workers=0)
model = LBMNet50().to(device)
# checkpoint_filename = './Result/model54/model_epoch_1400.pth'
# if os.path.exists(checkpoint_filename):
# model.load_state_dict(torch.load(checkpoint_filename))
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-3, betas=(0.9, 0.999), eps=1e-8, weight_decay=4e-6)
loss_plt = vis.line(Y=torch.Tensor(1).zero_(), opts=dict(title='ResNeSt Test', legend=['loss Semantic Segmentation'], showlegend=True))
scheduler = torch.optim.lr_scheduler.CosineAnnealingWarmRestarts(optimizer, T_0=100, T_mult=2)
iters = len(dataset_loader)
# 부득이하게 학습을 중간에 멈추고 다시 돌려야한다면, 학습을 멈췄을 당시의 epoch의 Learning Rate로 설정하기 위한 반복문
# for epoch in range(1400):
# for i, data in enumerate(dataset_loader, 0):
# scheduler.step(epoch + i / iters)
epochs = 3100
start = time.time()
for epoch in range(epochs):
running_loss = 0.0
for i, data in enumerate(dataset_loader, 0):
images, labels, name = data
images = images.to(device)
labels = labels.to(device)
# zero the parameter gradients
optimizer.zero_grad()
# forward + backward + optimize
output, loss = model(images, labels)
loss.backward()
optimizer.step()
scheduler.step(epoch + i / iters)
# print statistics
running_loss += loss.item()
if i % 20 == 19:
value_tracker(vis, torch.Tensor([i + epoch * len(dataset_loader)]), torch.Tensor([running_loss / 10]), loss_plt)
print("[%d, %5d] loss: %.3f lr: %f, time: %.3f" % (epoch + 1, i + 1, running_loss / 10, optimizer.param_groups[0]['lr'], time.time() - start))
start = time.time()
running_loss = 0.0
del loss, output
# Check Accuracy
if epoch == 0 or epoch % 100 == 99:
save_test_path = "./Result/output/path"
acc_check(model, device, dataset_test, dataset_loader_test, epoch, save_test_path)
torch.save(model.state_dict(), "./Result/model/output/path/model_epoch_{}.pth".format(epoch + 1))
start = time.time()
print("Finished Training...!")
if __name__=='__main__':
main()
| StarcoderdataPython |
1730568 | import torch
import torch.nn as nn
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x):
return self.fn(x) + x
class ConvMixer(nn.Module):
def __init__(self, dim, depth,
kernel_size = 9, patch_size = 7, num_classes = 1000, activation = nn.GELU):
super(ConvMixer, self).__init__()
padding_size = (kernel_size - 1) // 2
self.stem = nn.Sequential(
nn.Conv2d(3, dim, kernel_size = patch_size, stride = patch_size),
activation(),
nn.BatchNorm2d(dim)
)
self.blocks = nn.Sequential(
*[nn.Sequential(
Residual(nn.Sequential(
# Depthwise convolution
nn.Conv2d(dim, dim, kernel_size, groups = dim, padding = padding_size),
activation(),
nn.BatchNorm2d(dim)
)),
# Pointwise convolution
nn.Conv2d(dim, dim, kernel_size = 1),
activation(),
nn.BatchNorm2d(dim)
) for _ in range(depth)]
)
self.head = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
nn.Flatten(),
nn.Linear(dim, num_classes)
)
def forward(self, x):
x = self.stem(x)
x = self.blocks(x)
x = self.head(x)
return x
def ConvMixer1536_20(num_classes = 1000):
return ConvMixer(dim = 1536, depth = 20, kernel_size = 9, patch_size = 7, num_classes = num_classes)
def ConvMixer1536_20_p14_k3(num_classes = 1000):
return ConvMixer(dim = 1536, depth = 20, kernel_size = 3, patch_size = 14, num_classes = num_classes)
def ConvMixer768_32(num_classes = 1000):
return ConvMixer(dim = 768, depth = 32, kernel_size = 7, patch_size = 7, num_classes = num_classes)
def ConvMixer768_32_p14_k3(num_classes = 1000):
return ConvMixer(dim = 768, depth = 32, kernel_size = 3, patch_size = 14, num_classes = num_classes)
if __name__ == "__main__":
net = ConvMixer768_32_p14_k3()
img = torch.randn(1, 3, 224, 224)
print(sum(p.numel() for p in net.parameters()))
assert net(img).shape == (1, 1000) | StarcoderdataPython |
5132871 | <reponame>bushubeke/python-compose
#pip install SQLAlchemy==1.4.3 aiosqlite
import aiosqlite
from sqlalchemy import create_engine
from sqlalchemy.dialects.sqlite import pysqlite
from sqlalchemy.ext.asyncio import create_async_engine, AsyncSession
from sqlalchemy.orm import declarative_base, sessionmaker
DATABASE_URL = "sqlite+aiosqlite:///../fastauth.db"
engine = create_async_engine(DATABASE_URL, future=True, echo=True)
syncengine=create_engine('sqlite:///../fastauth.db', \
convert_unicode=True)
async_session = sessionmaker(engine, expire_on_commit=False, class_=AsyncSession)
Base = declarative_base()
#Base.metadata.create_all(bind=syncengine)
#Base.metadata.drop_all(bind=syncengine)
| StarcoderdataPython |
3512979 | <gh_stars>10-100
import numpy as np
import pandas as pd
from faerun import Faerun
import pickle
faerun = Faerun(view="free", clear_color="#222222")
t = np.linspace(0, 12.0, 326)
s = np.sin(np.pi * t)
c = np.cos(np.pi * t)
sizes = np.linspace(0.1, 2.0, 326)
data = {"x": t, "y": s, "z": c, "c": t / max(t) * 100.0, "s": sizes}
data2 = {"x": t, "y": c, "z": s, "c": t / max(t), "s": sizes, "labels": sizes}
x = np.linspace(0, 12.0, 326)
c = np.random.randint(0, 6, len(x))
data3 = {
"x": x,
"y": np.random.rand(len(x)) - 0.5,
"z": np.random.rand(len(x)) - 0.5,
"c": [c, x],
"cs": np.random.rand(len(x)),
"s": [np.random.rand(len(x)), np.random.rand(len(x))],
"labels": c,
}
legend_labels = [(0, "A"), (1, "B"), (2, "C"), (3, "D"), (4, "E"), (5, "F")]
df = pd.DataFrame.from_dict(data)
df2 = pd.DataFrame.from_dict(data2)
faerun.add_scatter(
"sinus",
df,
shader="circle",
point_scale=5.0,
has_legend=True,
legend_labels=[(0.0, "Low"), (50.0, "Inbetween"), (df["c"].max(), "High")],
)
faerun.add_scatter(
"cosinus", df2, shader="sphere", point_scale=5.0, colormap="jet", has_legend=True
)
faerun.add_scatter(
"categorical",
data3,
shader="sphere",
point_scale=5.0,
colormap=["Set1", "viridis"],
has_legend=True,
categorical=[True, False],
legend_labels=legend_labels,
series_title=["A", "B"],
ondblclick=["console.log(labels[0])", "console.log('B:' + labels[0])"],
)
with open("index.pickle", "wb+") as handle:
pickle.dump(faerun.create_python_data(), handle, protocol=pickle.HIGHEST_PROTOCOL)
file = open("index.pickle", "rb")
obj = pickle.load(file)
file.close()
faerun.plot(template="default")
| StarcoderdataPython |
4949510 | <gh_stars>0
import numpy as np
import scipy.sparse as sp
def reorder(edge_index, edge_weight=None, edge_features=None):
"""
Sorts index in lexicographic order and reorders data accordingly.
:param edge_index: np.array, indices to sort in lexicographic order.
:param edge_weight: np.array or None, edge weight to sort according to the new
order of the indices.
:param edge_features: np.array or None, edge features to sort according to the new
order of the indices.
:return:
- edge_index: np.array, edge_index sorted in lexicographic order.
- edge_weight: If edge_weight is not None, edge_weight sorted according to the
new order of the indices. Otherwise, None.
- edge_features: If edge_features is not None, edge_features sorted according to
the new order of the indices. Otherwise, None.
"""
sort_idx = np.lexsort(np.flipud(edge_index.T))
output = [edge_index[sort_idx]]
if edge_weight is not None:
output.append(edge_weight[sort_idx])
if edge_features is not None:
output.append(edge_features[sort_idx])
return tuple(output)
def edge_index_to_matrix(edge_index, edge_weight, edge_features=None, shape=None):
reordered = reorder(edge_index, edge_weight, edge_features)
a = sp.csr_matrix((reordered[1], reordered[0].T), shape=shape)
if edge_features is not None:
return a, reordered[2]
else:
return a
| StarcoderdataPython |
11214854 | import os
from utils import cpp, table_reader
import glob
def main():
constructs = table_reader.csv_to_list_of_tuples(
csv_filepath="construct_codes.csv",
tuple_name="Construct",
)
header_writer = cpp.HeaderWriter(
name="construct_codes",
)
header_writer.write("constexpr char OPEN = 2;\n"
"constexpr char CLOSE = 3;\n\n")
curr = 1
for con in constructs:
header_writer.write(f"constexpr char {con.name.upper()} = {curr};\n")
curr += 1
header_writer.write("\n")
header_writer.write("#define HOPE_SERIAL_NULLARY_CASES")
for name in [entry.name for entry in constructs if entry.arity == "0"]:
header_writer.write(f" \\\n case {name.upper()}:")
header_writer.write("\n")
header_writer.write("#define HOPE_SERIAL_UNARY_CASES")
for name in [entry.name for entry in constructs if entry.arity == "1"]:
header_writer.write(f" \\\n case {name.upper()}:")
header_writer.write("\n")
header_writer.write("#define HOPE_SERIAL_BINARY_CASES")
for name in [entry.name for entry in constructs if entry.arity == "2"]:
header_writer.write(f" \\\n case {name.upper()}:")
header_writer.write("\n")
header_writer.write("#define HOPE_SERIAL_MATRIX_CASES")
for name in [entry.name for entry in constructs if entry.arity == "nxm"]:
header_writer.write(f" \\\n case {name.upper()}:")
header_writer.write("\n")
header_writer.write("\n")
header_writer.write("#define HOPE_TYPESET_PARSER_CASES")
for entry in constructs:
name = entry.name
header_writer.write(f" \\\n case {name.upper()}: TypesetSetup")
if entry.arity == "0":
header_writer.write(f"Nullary({name});")
elif entry.arity == "nxm":
header_writer.write(f"Matrix({name});")
elif entry.arity == "2xn":
header_writer.write(f"Construct({name}, static_cast<uint8_t>(src[index++]));")
else:
header_writer.write(f"Construct({name},);")
header_writer.write("\n")
header_writer.finalize()
header_writer = cpp.HeaderWriter(
name="all_constructs",
inner_namespace="Typeset",
includes=[f"typeset_{entry.name.lower()}.h" for entry in constructs],
)
header_writer.finalize()
for entry in constructs:
if entry.implemented == "y":
continue
header_writer = cpp.HeaderWriter(
name="typeset_{entry.name.lower()}",
includes=["typeset_construct.h", "typeset_subphrase.h"],
)
header_writer.write(f"class {entry.name} final : public Construct {{ \n")
header_writer.write("public:\n")
if entry.arity == "1":
header_writer.write(f" {entry.name}(){{\n"
" setupUnaryArg();\n"
" }\n\n")
elif entry.arity == "2":
header_writer.write(f" {entry.name}(){{\n"
" setupBinaryArgs();\n"
" }\n\n")
elif entry.arity == "2xn":
header_writer.write(f" {entry.name}(uint8_t n){{\n"
" setupNAargs(2*n);\n"
" }\n\n")
header_writer.write(" virtual void writeArgs(std::string& out, size_t& curr) "
"const noexcept override {\n"
" out[curr++] = static_cast<uint8_t>(numArgs()/2);\n"
" }\n\n")
header_writer.write(" virtual size_t dims() const noexcept override { "
"return 1; }\n")
elif entry.arity == "nxm":
header_writer.write(" uint16_t rows;\n"
" uint16_t cols;\n\n")
header_writer.write(f" {entry.name}(uint16_t rows, uint16_t cols)\n"
" : rows(rows), cols(cols) {\n"
" setupNAargs(rows*cols);\n"
" }\n\n")
header_writer.write(" virtual void writeArgs(std::string& out, size_t& curr) "
"const noexcept override {\n"
" out[curr++] = static_cast<uint8_t>(rows);\n"
" out[curr++] = static_cast<uint8_t>(cols);\n"
" }\n\n")
header_writer.write(" virtual size_t dims() const noexcept override { "
"return 2; }\n")
header_writer.write(" virtual char constructCode() const noexcept override { return ")
header_writer.write(entry.name.upper())
header_writer.write("; }\n")
if entry.script_child == "y":
header_writer.write(" virtual bool increasesScriptDepth() const noexcept override "
"{ return true; }\n")
if entry.parent == "BigSymbol0":
header_writer.write(
"\n virtual void updateSizeSpecific() noexcept override {\n"
f" width = getWidth(SEM_DEFAULT, parent->script_level, \"{entry.label}\");\n"
" above_center = getAboveCenter(SEM_DEFAULT, parent->script_level);\n"
" under_center = getUnderCenter(SEM_DEFAULT, parent->script_level);\n"
" }\n"
"\n"
" virtual void paintSpecific(Painter& painter) const override {\n"
f" painter.drawSymbol(x, y, \"{entry.label}\");\n"
" }\n"
)
elif entry.parent == "BigSymbol1":
header_writer.write(
" double symbol_width;\n"
"\n"
" virtual void updateSizeSpecific() noexcept override {\n"
f" symbol_width = getWidth(SEM_DEFAULT, parent->script_level, \"{entry.label}\");\n"
" width = std::max(symbol_width, child()->width);\n"
" above_center = getAboveCenter(SEM_DEFAULT, parent->script_level);\n"
" under_center = getUnderCenter(SEM_DEFAULT, parent->script_level) + child()->height();\n"
" }\n"
"\n"
" virtual void updateChildPositions() override {\n"
" child()->x = x + (width - child()->width)/2;\n"
" child()->y = y + height() - child()->height();\n"
" }\n"
"\n"
" virtual void paintSpecific(Painter& painter) const override {\n"
" double symbol_x = x + (width - symbol_width) / 2;\n"
f" painter.drawSymbol(symbol_x, y, \"{entry.label}\");\n"
" }\n"
)
elif entry.parent == "BigSymbol2":
header_writer.write(
" double symbol_width;\n"
"\n"
"virtual void updateSizeSpecific() noexcept override {\n"
f" symbol_width = 1*getWidth(SEM_DEFAULT, parent->script_level, \"{entry.label}\");\n"
" width = std::max(symbol_width, std::max(first()->width, second()->width));\n"
" above_center = getAboveCenter(SEM_DEFAULT, parent->script_level) + first()->height();\n"
" under_center = 1*getUnderCenter(SEM_DEFAULT, parent->script_level) + second()->height();\n"
" }\n"
"\n"
" virtual void updateChildPositions() override {\n"
" first()->x = x + (width - first()->width)/2;\n"
" first()->y = y;\n"
" second()->x = x + (width - second()->width)/2;\n"
" second()->y = y + height() - second()->height();\n"
" }\n"
"\n"
" virtual void paintSpecific(Painter& painter) const override {\n"
" double symbol_x = x + (width - symbol_width) / 2;\n"
f" painter.drawSymbol(symbol_x, y + second()->height(), \"{entry.label}\");\n"
" }\n"
)
header_writer.write("};\n\n")
header_writer.finalize()
old_constructs = table_reader.csv_to_list_of_tuples(
csv_filepath="cache/construct_codes.csv",
tuple_name="OldConstruct",
)
changes = {}
for i in range(0, len(old_constructs)):
oc = old_constructs[i]
for j in range(0, len(constructs)):
c = constructs[j]
if oc.name == c.name:
if i != j:
changes[chr(i+1)] = chr(j+1)
break
if len(changes) > 0:
dirs = ["../test"] #, "../example"
for dir in dirs:
files = glob.iglob(dir + '**/**', recursive=True)
files = [f for f in files if os.path.isfile(f)]
for f in files:
with open(f, 'r', encoding="utf-8") as file:
filedata = file.read()
#Since '' is a construct code, search and replace is complicated
newstr = ""
for i in range(0, len(filedata)):
newstr += filedata[i]
if filedata[i] == '':
i += 1
assert i < len(filedata)
if filedata[i] in changes.keys():
newstr += changes.get(filedata[i])
else:
newstr += filedata[i]
filedata = newstr
with open(f, 'w', encoding="utf-8") as file:
file.write(filedata)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1674287 | <gh_stars>1-10
# pylint: disable-msg=E1101,W0612
import operator
import nose # noqa
from numpy import nan
import numpy as np
import pandas as pd
from pandas import Series, DataFrame, bdate_range, Panel
from pandas.tseries.index import DatetimeIndex
import pandas.core.datetools as datetools
import pandas.util.testing as tm
from pandas.compat import lrange
from pandas import compat
import pandas.sparse.frame as spf
from pandas._sparse import BlockIndex, IntIndex
from pandas.sparse.api import SparseSeries, SparseDataFrame
from pandas.tests.frame.test_misc_api import SharedWithSparse
class TestSparseDataFrame(tm.TestCase, SharedWithSparse):
klass = SparseDataFrame
_multiprocess_can_split_ = True
def setUp(self):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.orig = pd.DataFrame(self.data, index=self.dates)
self.iorig = pd.DataFrame(self.data, index=self.dates)
self.frame = SparseDataFrame(self.data, index=self.dates)
self.iframe = SparseDataFrame(self.data, index=self.dates,
default_kind='integer')
values = self.frame.values.copy()
values[np.isnan(values)] = 0
self.zorig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'],
index=self.dates)
self.zframe = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=0, index=self.dates)
values = self.frame.values.copy()
values[np.isnan(values)] = 2
self.fill_orig = pd.DataFrame(values, columns=['A', 'B', 'C', 'D'],
index=self.dates)
self.fill_frame = SparseDataFrame(values, columns=['A', 'B', 'C', 'D'],
default_fill_value=2,
index=self.dates)
self.empty = SparseDataFrame()
def test_fill_value_when_combine_const(self):
# GH12723
dat = np.array([0, 1, np.nan, 3, 4, 5], dtype='float')
df = SparseDataFrame({'foo': dat}, index=range(6))
exp = df.fillna(0).add(2)
res = df.add(2, fill_value=0)
tm.assert_sp_frame_equal(res, exp)
def test_as_matrix(self):
empty = self.empty.as_matrix()
self.assertEqual(empty.shape, (0, 0))
no_cols = SparseDataFrame(index=np.arange(10))
mat = no_cols.as_matrix()
self.assertEqual(mat.shape, (10, 0))
no_index = SparseDataFrame(columns=np.arange(10))
mat = no_index.as_matrix()
self.assertEqual(mat.shape, (0, 10))
def test_copy(self):
cp = self.frame.copy()
tm.assertIsInstance(cp, SparseDataFrame)
tm.assert_sp_frame_equal(cp, self.frame)
# as of v0.15.0
# this is now identical (but not is_a )
self.assertTrue(cp.index.identical(self.frame.index))
def test_constructor(self):
for col, series in compat.iteritems(self.frame):
tm.assertIsInstance(series, SparseSeries)
tm.assertIsInstance(self.iframe['A'].sp_index, IntIndex)
# constructed zframe from matrix above
self.assertEqual(self.zframe['A'].fill_value, 0)
tm.assert_almost_equal([0, 0, 0, 0, 1, 2, 3, 4, 5, 6],
self.zframe['A'].values)
# construct no data
sdf = SparseDataFrame(columns=np.arange(10), index=np.arange(10))
for col, series in compat.iteritems(sdf):
tm.assertIsInstance(series, SparseSeries)
# construct from nested dict
data = {}
for c, s in compat.iteritems(self.frame):
data[c] = s.to_dict()
sdf = SparseDataFrame(data)
tm.assert_sp_frame_equal(sdf, self.frame)
# TODO: test data is copied from inputs
# init dict with different index
idx = self.frame.index[:5]
cons = SparseDataFrame(
self.frame, index=idx, columns=self.frame.columns,
default_fill_value=self.frame.default_fill_value,
default_kind=self.frame.default_kind, copy=True)
reindexed = self.frame.reindex(idx)
tm.assert_sp_frame_equal(cons, reindexed, exact_indices=False)
# assert level parameter breaks reindex
self.assertRaises(TypeError, self.frame.reindex, idx, level=0)
repr(self.frame)
def test_constructor_ndarray(self):
# no index or columns
sp = SparseDataFrame(self.frame.values)
# 1d
sp = SparseDataFrame(self.data['A'], index=self.dates, columns=['A'])
tm.assert_sp_frame_equal(sp, self.frame.reindex(columns=['A']))
# raise on level argument
self.assertRaises(TypeError, self.frame.reindex, columns=['A'],
level=1)
# wrong length index / columns
with tm.assertRaisesRegexp(ValueError, "^Index length"):
SparseDataFrame(self.frame.values, index=self.frame.index[:-1])
with tm.assertRaisesRegexp(ValueError, "^Column length"):
SparseDataFrame(self.frame.values, columns=self.frame.columns[:-1])
# GH 9272
def test_constructor_empty(self):
sp = SparseDataFrame()
self.assertEqual(len(sp.index), 0)
self.assertEqual(len(sp.columns), 0)
def test_constructor_dataframe(self):
dense = self.frame.to_dense()
sp = SparseDataFrame(dense)
tm.assert_sp_frame_equal(sp, self.frame)
def test_constructor_convert_index_once(self):
arr = np.array([1.5, 2.5, 3.5])
sdf = SparseDataFrame(columns=lrange(4), index=arr)
self.assertTrue(sdf[0].index is sdf[1].index)
def test_constructor_from_series(self):
# GH 2873
x = Series(np.random.randn(10000), name='a')
x = x.to_sparse(fill_value=0)
tm.assertIsInstance(x, SparseSeries)
df = SparseDataFrame(x)
tm.assertIsInstance(df, SparseDataFrame)
x = Series(np.random.randn(10000), name='a')
y = Series(np.random.randn(10000), name='b')
x2 = x.astype(float)
x2.ix[:9998] = np.NaN
# TODO: x_sparse is unused...fix
x_sparse = x2.to_sparse(fill_value=np.NaN) # noqa
# Currently fails too with weird ufunc error
# df1 = SparseDataFrame([x_sparse, y])
y.ix[:9998] = 0
# TODO: y_sparse is unsused...fix
y_sparse = y.to_sparse(fill_value=0) # noqa
# without sparse value raises error
# df2 = SparseDataFrame([x2_sparse, y])
def test_dtypes(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
result = sdf.get_dtype_counts()
expected = Series({'float64': 4})
tm.assert_series_equal(result, expected)
def test_shape(self):
# GH 10452
self.assertEqual(self.frame.shape, (10, 4))
self.assertEqual(self.iframe.shape, (10, 4))
self.assertEqual(self.zframe.shape, (10, 4))
self.assertEqual(self.fill_frame.shape, (10, 4))
def test_str(self):
df = DataFrame(np.random.randn(10000, 4))
df.ix[:9998] = np.nan
sdf = df.to_sparse()
str(sdf)
def test_array_interface(self):
res = np.sqrt(self.frame)
dres = np.sqrt(self.frame.to_dense())
tm.assert_frame_equal(res.to_dense(), dres)
def test_pickle(self):
def _test_roundtrip(frame, orig):
result = self.round_trip_pickle(frame)
tm.assert_sp_frame_equal(frame, result)
tm.assert_frame_equal(result.to_dense(), orig, check_dtype=False)
_test_roundtrip(SparseDataFrame(), DataFrame())
self._check_all(_test_roundtrip)
def test_dense_to_sparse(self):
df = DataFrame({'A': [nan, nan, nan, 1, 2],
'B': [1, 2, nan, nan, nan]})
sdf = df.to_sparse()
tm.assertIsInstance(sdf, SparseDataFrame)
self.assertTrue(np.isnan(sdf.default_fill_value))
tm.assertIsInstance(sdf['A'].sp_index, BlockIndex)
tm.assert_frame_equal(sdf.to_dense(), df)
sdf = df.to_sparse(kind='integer')
tm.assertIsInstance(sdf['A'].sp_index, IntIndex)
df = DataFrame({'A': [0, 0, 0, 1, 2],
'B': [1, 2, 0, 0, 0]}, dtype=float)
sdf = df.to_sparse(fill_value=0)
self.assertEqual(sdf.default_fill_value, 0)
tm.assert_frame_equal(sdf.to_dense(), df)
def test_density(self):
df = SparseSeries([nan, nan, nan, 0, 1, 2, 3, 4, 5, 6])
self.assertEqual(df.density, 0.7)
df = SparseDataFrame({'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]})
self.assertEqual(df.density, 0.75)
def test_sparse_to_dense(self):
pass
def test_sparse_series_ops(self):
self._check_frame_ops(self.frame)
def test_sparse_series_ops_i(self):
self._check_frame_ops(self.iframe)
def test_sparse_series_ops_z(self):
self._check_frame_ops(self.zframe)
def test_sparse_series_ops_fill(self):
self._check_frame_ops(self.fill_frame)
def _check_frame_ops(self, frame):
def _compare_to_dense(a, b, da, db, op):
sparse_result = op(a, b)
dense_result = op(da, db)
fill = sparse_result.default_fill_value
dense_result = dense_result.to_sparse(fill_value=fill)
tm.assert_sp_frame_equal(sparse_result, dense_result,
exact_indices=False)
if isinstance(a, DataFrame) and isinstance(db, DataFrame):
mixed_result = op(a, db)
tm.assertIsInstance(mixed_result, SparseDataFrame)
tm.assert_sp_frame_equal(mixed_result, sparse_result,
exact_indices=False)
opnames = ['add', 'sub', 'mul', 'truediv', 'floordiv']
ops = [getattr(operator, name) for name in opnames]
fidx = frame.index
# time series operations
series = [frame['A'], frame['B'], frame['C'], frame['D'],
frame['A'].reindex(fidx[:7]), frame['A'].reindex(fidx[::2]),
SparseSeries(
[], index=[])]
for op in opnames:
_compare_to_dense(frame, frame[::2], frame.to_dense(),
frame[::2].to_dense(), getattr(operator, op))
# 2304, no auto-broadcasting
for i, s in enumerate(series):
f = lambda a, b: getattr(a, op)(b, axis='index')
_compare_to_dense(frame, s, frame.to_dense(), s.to_dense(), f)
# rops are not implemented
# _compare_to_dense(s, frame, s.to_dense(),
# frame.to_dense(), f)
# cross-sectional operations
series = [frame.xs(fidx[0]), frame.xs(fidx[3]), frame.xs(fidx[5]),
frame.xs(fidx[7]), frame.xs(fidx[5])[:2]]
for op in ops:
for s in series:
_compare_to_dense(frame, s, frame.to_dense(), s, op)
_compare_to_dense(s, frame, s, frame.to_dense(), op)
# it works!
result = self.frame + self.frame.ix[:, ['A', 'B']] # noqa
def test_op_corners(self):
empty = self.empty + self.empty
self.assertTrue(empty.empty)
foo = self.frame + self.empty
tm.assertIsInstance(foo.index, DatetimeIndex)
tm.assert_frame_equal(foo, self.frame * np.nan)
foo = self.empty + self.frame
tm.assert_frame_equal(foo, self.frame * np.nan)
def test_scalar_ops(self):
pass
def test_getitem(self):
# 1585 select multiple columns
sdf = SparseDataFrame(index=[0, 1, 2], columns=['a', 'b', 'c'])
result = sdf[['a', 'b']]
exp = sdf.reindex(columns=['a', 'b'])
tm.assert_sp_frame_equal(result, exp)
self.assertRaises(Exception, sdf.__getitem__, ['a', 'd'])
def test_icol(self):
# 10711 deprecated
# 2227
result = self.frame.iloc[:, 0]
self.assertTrue(isinstance(result, SparseSeries))
tm.assert_sp_series_equal(result, self.frame['A'])
# preserve sparse index type. #2251
data = {'A': [0, 1]}
iframe = SparseDataFrame(data, default_kind='integer')
self.assertEqual(type(iframe['A'].sp_index),
type(iframe.iloc[:, 0].sp_index))
def test_set_value(self):
# ok as the index gets conver to object
frame = self.frame.copy()
res = frame.set_value('foobar', 'B', 1.5)
self.assertEqual(res.index.dtype, 'object')
res = self.frame
res.index = res.index.astype(object)
res = self.frame.set_value('foobar', 'B', 1.5)
self.assertIsNot(res, self.frame)
self.assertEqual(res.index[-1], 'foobar')
self.assertEqual(res.get_value('foobar', 'B'), 1.5)
res2 = res.set_value('foobar', 'qux', 1.5)
self.assertIsNot(res2, res)
self.assert_numpy_array_equal(res2.columns,
list(self.frame.columns) + ['qux'])
self.assertEqual(res2.get_value('foobar', 'qux'), 1.5)
def test_fancy_index_misc(self):
# axis = 0
sliced = self.frame.ix[-2:, :]
expected = self.frame.reindex(index=self.frame.index[-2:])
tm.assert_sp_frame_equal(sliced, expected)
# axis = 1
sliced = self.frame.ix[:, -2:]
expected = self.frame.reindex(columns=self.frame.columns[-2:])
tm.assert_sp_frame_equal(sliced, expected)
def test_getitem_overload(self):
# slicing
sl = self.frame[:20]
tm.assert_sp_frame_equal(sl, self.frame.reindex(self.frame.index[:20]))
# boolean indexing
d = self.frame.index[5]
indexer = self.frame.index > d
subindex = self.frame.index[indexer]
subframe = self.frame[indexer]
self.assert_numpy_array_equal(subindex, subframe.index)
self.assertRaises(Exception, self.frame.__getitem__, indexer[:-1])
def test_setitem(self):
def _check_frame(frame, orig):
N = len(frame)
# insert SparseSeries
frame['E'] = frame['A']
tm.assertIsInstance(frame['E'], SparseSeries)
tm.assert_sp_series_equal(frame['E'], frame['A'],
check_names=False)
# insert SparseSeries differently-indexed
to_insert = frame['A'][::2]
frame['E'] = to_insert
expected = to_insert.to_dense().reindex(frame.index)
result = frame['E'].to_dense()
tm.assert_series_equal(result, expected, check_names=False)
self.assertEqual(result.name, 'E')
# insert Series
frame['F'] = frame['A'].to_dense()
tm.assertIsInstance(frame['F'], SparseSeries)
tm.assert_sp_series_equal(frame['F'], frame['A'],
check_names=False)
# insert Series differently-indexed
to_insert = frame['A'].to_dense()[::2]
frame['G'] = to_insert
expected = to_insert.reindex(frame.index)
expected.name = 'G'
tm.assert_series_equal(frame['G'].to_dense(), expected)
# insert ndarray
frame['H'] = np.random.randn(N)
tm.assertIsInstance(frame['H'], SparseSeries)
to_sparsify = np.random.randn(N)
to_sparsify[N // 2:] = frame.default_fill_value
frame['I'] = to_sparsify
self.assertEqual(len(frame['I'].sp_values), N // 2)
# insert ndarray wrong size
self.assertRaises(Exception, frame.__setitem__, 'foo',
np.random.randn(N - 1))
# scalar value
frame['J'] = 5
self.assertEqual(len(frame['J'].sp_values), N)
self.assertTrue((frame['J'].sp_values == 5).all())
frame['K'] = frame.default_fill_value
self.assertEqual(len(frame['K'].sp_values), 0)
self._check_all(_check_frame)
def test_setitem_corner(self):
self.frame['a'] = self.frame['B']
tm.assert_sp_series_equal(self.frame['a'], self.frame['B'],
check_names=False)
def test_setitem_array(self):
arr = self.frame['B']
self.frame['E'] = arr
tm.assert_sp_series_equal(self.frame['E'], self.frame['B'],
check_names=False)
self.frame['F'] = arr[:-1]
index = self.frame.index[:-1]
tm.assert_sp_series_equal(self.frame['E'].reindex(index),
self.frame['F'].reindex(index),
check_names=False)
def test_delitem(self):
A = self.frame['A']
C = self.frame['C']
del self.frame['B']
self.assertNotIn('B', self.frame)
tm.assert_sp_series_equal(self.frame['A'], A)
tm.assert_sp_series_equal(self.frame['C'], C)
del self.frame['D']
self.assertNotIn('D', self.frame)
del self.frame['A']
self.assertNotIn('A', self.frame)
def test_set_columns(self):
self.frame.columns = self.frame.columns
self.assertRaises(Exception, setattr, self.frame, 'columns',
self.frame.columns[:-1])
def test_set_index(self):
self.frame.index = self.frame.index
self.assertRaises(Exception, setattr, self.frame, 'index',
self.frame.index[:-1])
def test_append(self):
a = self.frame[:5]
b = self.frame[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended, self.frame, exact_indices=False)
a = self.frame.ix[:5, :3]
b = self.frame.ix[5:]
appended = a.append(b)
tm.assert_sp_frame_equal(appended.ix[:, :3], self.frame.ix[:, :3],
exact_indices=False)
def test_apply(self):
applied = self.frame.apply(np.sqrt)
tm.assertIsInstance(applied, SparseDataFrame)
tm.assert_almost_equal(applied.values, np.sqrt(self.frame.values))
applied = self.fill_frame.apply(np.sqrt)
self.assertEqual(applied['A'].fill_value, np.sqrt(2))
# agg / broadcast
broadcasted = self.frame.apply(np.sum, broadcast=True)
tm.assertIsInstance(broadcasted, SparseDataFrame)
exp = self.frame.to_dense().apply(np.sum, broadcast=True)
tm.assert_frame_equal(broadcasted.to_dense(), exp)
self.assertIs(self.empty.apply(np.sqrt), self.empty)
from pandas.core import nanops
applied = self.frame.apply(np.sum)
tm.assert_series_equal(applied,
self.frame.to_dense().apply(nanops.nansum))
def test_apply_nonuq(self):
df_orig = DataFrame(
[[1, 2, 3], [4, 5, 6], [7, 8, 9]], index=['a', 'a', 'c'])
df = df_orig.to_sparse()
rs = df.apply(lambda s: s[0], axis=1)
xp = Series([1., 4., 7.], ['a', 'a', 'c'])
tm.assert_series_equal(rs, xp)
# df.T breaks
df = df_orig.T.to_sparse()
rs = df.apply(lambda s: s[0], axis=0) # noqa
# TODO: no non-unique columns supported in sparse yet
# assert_series_equal(rs, xp)
def test_applymap(self):
# just test that it works
result = self.frame.applymap(lambda x: x * 2)
tm.assertIsInstance(result, SparseDataFrame)
def test_astype(self):
self.assertRaises(Exception, self.frame.astype, np.int64)
def test_fillna(self):
df = self.zframe.reindex(lrange(5))
dense = self.zorig.reindex(lrange(5))
result = df.fillna(0)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result.fillna(0, inplace=True)
expected = dense.fillna(0)
tm.assert_sp_frame_equal(result, expected.to_sparse(fill_value=0),
exact_indices=False)
tm.assert_frame_equal(result.to_dense(), expected)
result = df.copy()
result = df['A']
result.fillna(0, inplace=True)
expected = dense['A'].fillna(0)
# this changes internal SparseArray repr
# tm.assert_sp_series_equal(result, expected.to_sparse(fill_value=0))
tm.assert_series_equal(result.to_dense(), expected)
def test_fillna_fill_value(self):
df = pd.DataFrame({'A': [1, 0, 0], 'B': [np.nan, np.nan, 4]})
sparse = pd.SparseDataFrame(df)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
sparse = pd.SparseDataFrame(df, default_fill_value=0)
tm.assert_frame_equal(sparse.fillna(-1).to_dense(),
df.fillna(-1), check_dtype=False)
def test_rename(self):
# just check this works
renamed = self.frame.rename(index=str) # noqa
renamed = self.frame.rename(columns=lambda x: '%s%d' % (x, len(x))) # noqa
def test_corr(self):
res = self.frame.corr()
tm.assert_frame_equal(res, self.frame.to_dense().corr())
def test_describe(self):
self.frame['foo'] = np.nan
self.frame.get_dtype_counts()
str(self.frame)
desc = self.frame.describe() # noqa
def test_join(self):
left = self.frame.ix[:, ['A', 'B']]
right = self.frame.ix[:, ['C', 'D']]
joined = left.join(right)
tm.assert_sp_frame_equal(joined, self.frame, exact_indices=False)
right = self.frame.ix[:, ['B', 'D']]
self.assertRaises(Exception, left.join, right)
with tm.assertRaisesRegexp(ValueError,
'Other Series must have a name'):
self.frame.join(Series(
np.random.randn(len(self.frame)), index=self.frame.index))
def test_reindex(self):
def _check_frame(frame):
index = frame.index
sidx = index[::2]
sidx2 = index[:5] # noqa
sparse_result = frame.reindex(sidx)
dense_result = frame.to_dense().reindex(sidx)
tm.assert_frame_equal(sparse_result.to_dense(), dense_result)
tm.assert_frame_equal(frame.reindex(list(sidx)).to_dense(),
dense_result)
sparse_result2 = sparse_result.reindex(index)
dense_result2 = dense_result.reindex(index)
tm.assert_frame_equal(sparse_result2.to_dense(), dense_result2)
# propagate CORRECT fill value
tm.assert_almost_equal(sparse_result.default_fill_value,
frame.default_fill_value)
tm.assert_almost_equal(sparse_result['A'].fill_value,
frame['A'].fill_value)
# length zero
length_zero = frame.reindex([])
self.assertEqual(len(length_zero), 0)
self.assertEqual(len(length_zero.columns), len(frame.columns))
self.assertEqual(len(length_zero['A']), 0)
# frame being reindexed has length zero
length_n = length_zero.reindex(index)
self.assertEqual(len(length_n), len(frame))
self.assertEqual(len(length_n.columns), len(frame.columns))
self.assertEqual(len(length_n['A']), len(frame))
# reindex columns
reindexed = frame.reindex(columns=['A', 'B', 'Z'])
self.assertEqual(len(reindexed.columns), 3)
tm.assert_almost_equal(reindexed['Z'].fill_value,
frame.default_fill_value)
self.assertTrue(np.isnan(reindexed['Z'].sp_values).all())
_check_frame(self.frame)
_check_frame(self.iframe)
_check_frame(self.zframe)
_check_frame(self.fill_frame)
# with copy=False
reindexed = self.frame.reindex(self.frame.index, copy=False)
reindexed['F'] = reindexed['A']
self.assertIn('F', self.frame)
reindexed = self.frame.reindex(self.frame.index)
reindexed['G'] = reindexed['A']
self.assertNotIn('G', self.frame)
def test_reindex_fill_value(self):
rng = bdate_range('20110110', periods=20)
result = self.zframe.reindex(rng, fill_value=0)
exp = self.zorig.reindex(rng, fill_value=0)
exp = exp.to_sparse(self.zframe.default_fill_value)
tm.assert_sp_frame_equal(result, exp)
def test_take(self):
result = self.frame.take([1, 0, 2], axis=1)
expected = self.frame.reindex(columns=['B', 'A', 'C'])
tm.assert_sp_frame_equal(result, expected)
def test_to_dense(self):
def _check(frame, orig):
dense_dm = frame.to_dense()
tm.assert_frame_equal(frame, dense_dm)
tm.assert_frame_equal(dense_dm, orig, check_dtype=False)
self._check_all(_check)
def test_stack_sparse_frame(self):
def _check(frame):
dense_frame = frame.to_dense() # noqa
wp = Panel.from_dict({'foo': frame})
from_dense_lp = wp.to_frame()
from_sparse_lp = spf.stack_sparse_frame(frame)
self.assert_numpy_array_equal(from_dense_lp.values,
from_sparse_lp.values)
_check(self.frame)
_check(self.iframe)
# for now
self.assertRaises(Exception, _check, self.zframe)
self.assertRaises(Exception, _check, self.fill_frame)
def test_transpose(self):
def _check(frame, orig):
transposed = frame.T
untransposed = transposed.T
tm.assert_sp_frame_equal(frame, untransposed)
self._check_all(_check)
def test_shift(self):
def _check(frame, orig):
shifted = frame.shift(0)
exp = orig.shift(0)
# int is coerced to float dtype
tm.assert_frame_equal(shifted.to_dense(), exp, check_dtype=False)
shifted = frame.shift(1)
exp = orig.shift(1)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(-2)
exp = orig.shift(-2)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq='B')
exp = orig.shift(2, freq='B')
exp = exp.to_sparse(frame.default_fill_value)
tm.assert_frame_equal(shifted, exp)
shifted = frame.shift(2, freq=datetools.bday)
exp = orig.shift(2, freq=datetools.bday)
exp = exp.to_sparse(frame.default_fill_value)
tm.assert_frame_equal(shifted, exp)
self._check_all(_check)
def test_count(self):
result = self.frame.count()
dense_result = self.frame.to_dense().count()
tm.assert_series_equal(result, dense_result)
result = self.frame.count(1)
dense_result = self.frame.to_dense().count(1)
# win32 don't check dtype
tm.assert_series_equal(result, dense_result, check_dtype=False)
def _check_all(self, check_func):
check_func(self.frame, self.orig)
check_func(self.iframe, self.iorig)
check_func(self.zframe, self.zorig)
check_func(self.fill_frame, self.fill_orig)
def test_numpy_transpose(self):
sdf = SparseDataFrame([1, 2, 3], index=[1, 2, 3], columns=['a'])
result = np.transpose(np.transpose(sdf))
tm.assert_sp_frame_equal(result, sdf)
msg = "the 'axes' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.transpose, sdf, axes=1)
def test_combine_first(self):
df = self.frame
result = df[::2].combine_first(df)
result2 = df[::2].combine_first(df.to_dense())
expected = df[::2].to_dense().combine_first(df.to_dense())
expected = expected.to_sparse(fill_value=df.default_fill_value)
tm.assert_sp_frame_equal(result, result2)
tm.assert_sp_frame_equal(result, expected)
def test_combine_add(self):
df = self.frame.to_dense()
df2 = df.copy()
df2['C'][:3] = np.nan
df['A'][:3] = 5.7
result = df.to_sparse().add(df2.to_sparse(), fill_value=0)
expected = df.add(df2, fill_value=0).to_sparse()
tm.assert_sp_frame_equal(result, expected)
def test_isin(self):
sparse_df = DataFrame({'flag': [1., 0., 1.]}).to_sparse(fill_value=0.)
xp = sparse_df[sparse_df.flag == 1.]
rs = sparse_df[sparse_df.flag.isin([1.])]
tm.assert_frame_equal(xp, rs)
def test_sparse_pow_issue(self):
# 2220
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [2.5, -3.9]})
# note : no error without nan
df = SparseDataFrame({'A': [nan, 0, 1]})
# note that 2 ** df works fine, also df ** 1
result = 1**df
r1 = result.take([0], 1)['A']
r2 = result['A']
self.assertEqual(len(r2.sp_values), len(r1.sp_values))
def test_as_blocks(self):
df = SparseDataFrame({'A': [1.1, 3.3], 'B': [nan, -3.9]},
dtype='float64')
df_blocks = df.blocks
self.assertEqual(list(df_blocks.keys()), ['float64'])
tm.assert_frame_equal(df_blocks['float64'], df)
def test_nan_columnname(self):
# GH 8822
nan_colname = DataFrame(Series(1.0, index=[0]), columns=[nan])
nan_colname_sparse = nan_colname.to_sparse()
self.assertTrue(np.isnan(nan_colname_sparse.columns[0]))
class TestSparseDataFrameAnalytics(tm.TestCase):
def setUp(self):
self.data = {'A': [nan, nan, nan, 0, 1, 2, 3, 4, 5, 6],
'B': [0, 1, 2, nan, nan, nan, 3, 4, 5, 6],
'C': np.arange(10),
'D': [0, 1, 2, 3, 4, 5, nan, nan, nan, nan]}
self.dates = bdate_range('1/1/2011', periods=10)
self.frame = SparseDataFrame(self.data, index=self.dates)
def test_cumsum(self):
result = self.frame.cumsum()
expected = SparseDataFrame(self.frame.to_dense().cumsum())
tm.assert_sp_frame_equal(result, expected)
def test_numpy_cumsum(self):
result = np.cumsum(self.frame, axis=0)
expected = SparseDataFrame(self.frame.to_dense().cumsum())
tm.assert_sp_frame_equal(result, expected)
msg = "the 'dtype' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.cumsum,
self.frame, dtype=np.int64)
msg = "the 'out' parameter is not supported"
tm.assertRaisesRegexp(ValueError, msg, np.cumsum,
self.frame, out=result)
if __name__ == '__main__':
import nose # noqa
nose.runmodule(argv=[__file__, '-vvs', '-x', '--pdb', '--pdb-failure'],
exit=False)
| StarcoderdataPython |
194642 | # stdlib
import subprocess
# third party
from PyInquirer import prompt
import click
# grid relative
from ..deploy import base_setup
from ..tf import *
from ..utils import Config
from ..utils import styles
from .provider import *
class GCloud:
def projects_list(self):
proc = subprocess.Popen(
'gcloud projects list --format="value(projectId)"',
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
projects = proc.stdout.read()
return projects.split()
def regions_list(self):
proc = subprocess.Popen(
'gcloud compute regions list --format="value(NAME)"',
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
regions = proc.stdout.read()
return regions.split()
def zones_list(self, region):
proc = subprocess.Popen(
f'gcloud compute zones list --filter="REGION:( {region} )" --format="value(NAME)"',
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
zones = proc.stdout.read()
return zones.split()
def machines_type(self, zone):
proc = subprocess.Popen(
f'gcloud compute machine-types list --filter="ZONE:( {zone} )" --format="value(NAME)"',
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
machines = proc.stdout.read()
return machines.split()
def images_type(self):
proc = subprocess.Popen(
f'gcloud compute images list --format="value(NAME,PROJECT,FAMILY)"',
shell=True,
stdout=subprocess.PIPE,
universal_newlines=True,
)
images = proc.stdout.read().split()
images = {
images[i]: (images[i + 1], images[i + 2]) for i in range(0, len(images), 3)
}
return images
class GCP(Provider):
"""Google Cloud Provider."""
def __init__(self, config):
super().__init__(config)
self.config.gcp = self.get_gcp_config()
self.tfscript += terrascript.provider.google(
project=self.config.gcp.project_id,
region=self.config.gcp.region,
zone=self.config.gcp.zone,
)
self.update_script()
click.echo("Initializing GCP Provider")
TF.init()
build = self.build()
if build == 0:
click.echo("Main Infrastructure has built Successfully!\n\n")
def build(self) -> bool:
app = self.config.app.name
self.firewall = terrascript.resource.google_compute_firewall(
f"firewall-{app}",
name=f"firewall-{app}",
network="default",
allow={
"protocol": "tcp",
"ports": ["80", "443", "5000-5999", "6000-6999", "7000-7999"],
},
)
self.tfscript += self.firewall
self.pygrid_ip = terrascript.resource.google_compute_address(
f"pygrid-{app}", name=f"pygrid-{app}"
)
self.tfscript += self.pygrid_ip
self.tfscript += terrascript.output(
f"pygrid-{app}_ip", value="${" + self.pygrid_ip.address + "}"
)
self.update_script()
return TF.validate()
def deploy_network(self, name: str = "pygridnetwork", apply: bool = True):
images = self.config.gcp.images
image_type = self.config.gcp.image_type
image = terrascript.data.google_compute_image(
name + image_type,
project=images[image_type][0],
family=images[image_type][1],
)
self.tfscript += image
network = terrascript.resource.google_compute_instance(
name,
name=name,
machine_type=self.config.gcp.machine_type,
zone=self.config.gcp.zone,
boot_disk={"initialize_params": {"image": "${" + image.self_link + "}"}},
network_interface={
"network": "default",
"access_config": {"nat_ip": "${" + self.pygrid_ip.address + "}"},
},
metadata_startup_script=f"""
{base_setup}
\ncd /PyGrid/apps/network
\npoetry install
\nnohup ./run.sh --port {self.config.app.port} --host {self.config.app.host} {'--start_local_db' if self.config.app.start_local_db else ''}
""",
)
self.tfscript += network
self.update_script()
return TF.apply()
def deploy_domain(self, name: str = "pygriddomain", apply: bool = True):
images = self.config.gcp.images
image_type = self.config.gcp.image_type
image = terrascript.data.google_compute_image(
name + image_type,
project=images[image_type][0],
family=images[image_type][1],
)
self.tfscript += image
network = terrascript.resource.google_compute_instance(
name,
name=name,
machine_type=self.config.gcp.machine_type,
zone=self.config.gcp.zone,
boot_disk={"initialize_params": {"image": "${" + image.self_link + "}"}},
network_interface={"network": "default", "access_config": {}},
metadata_startup_script=f"""
{base_setup}
\ncd /PyGrid/apps/domain
\npoetry install
\nnohup ./run.sh --id {self.config.app.id} --port {self.config.app.port} --host {self.config.app.host} --network {self.config.app.network} --num_replicas {self.config.app.num_replicas} {'--start_local_db' if self.config.app.start_local_db else ''}
""",
)
self.tfscript += network
self.update_script()
return TF.apply()
def get_gcp_config(self) -> Config:
"""Getting the configration required for deployment on GCP.
Returns:
Config: Simple Config with the user inputs
"""
gcp = GCloud()
project_id = prompt(
[
{
"type": "list",
"name": "project_id",
"message": "Please select your project_id",
"choices": gcp.projects_list(),
}
],
style=styles.second,
)["project_id"]
region = prompt(
[
{
"type": "list",
"name": "region",
"message": "Please select your desired GCP region",
"default": "us-central1",
"choices": gcp.regions_list(),
}
],
style=styles.second,
)["region"]
zone = prompt(
[
{
"type": "list",
"name": "zone",
"message": "Please select your desired GCP zone",
"choices": gcp.zones_list(region),
}
],
style=styles.second,
)["zone"]
machine_type = prompt(
[
{
"type": "list",
"name": "machine_type",
"message": "Please select your desired Machine type",
"choices": gcp.machines_type(zone),
}
],
style=styles.second,
)["machine_type"]
images = gcp.images_type()
image_type = prompt(
[
{
"type": "list",
"name": "image_type",
"message": "Please select your desired Machine type",
"choices": images.keys(),
}
],
style=styles.second,
)["image_type"]
return Config(
project_id=project_id,
region=region,
zone=zone,
machine_type=machine_type,
images=images,
image_type=image_type,
)
| StarcoderdataPython |
12864386 | #-*-coding: utf-8 -*-
"""
/dms/edumediaitem/views_manage.py
.. enthaelt den View fuer die Management-Ansicht des Medienpaketes
Django content Management System
<NAME>
<EMAIL>
Die Programme des dms-Systems koennen frei genutzt und den spezifischen
Beduerfnissen entsprechend angepasst werden.
0.01 11.09.2007 Beginn der Arbeit
"""
from django.utils.translation import ugettext as _
from dms.queries import get_site_url
from dms.roles import require_permission
from dms.roles import UserEditPerms
from dms.folder.views_manage import do_manage
from dms_ext.extension import * # dms-Funktionen ueberschreiben
# -----------------------------------------------------
@require_permission('perm_edit_folderish')
def edumediaitem_manage(request, item_container):
""" Pflegemodus des Medienpakets """
user_perms = UserEditPerms(request.user.username, request.path)
add_ons = {}
add_ons[0] = [ { 'url' : get_site_url(item_container,
'index.html/add/edufileitem/'),
'info': _(u'Datei')},
{ 'url' : get_site_url(item_container, 'index.html/add/edutextitem/'),
'info': _(u'Textdokument')},
{ 'url' : get_site_url(item_container, 'index.html/add/edulinkitem/'),
'info': _(u'Verweis')},
]
add_ons[1] = [
{ 'url' : get_site_url(item_container,
'index.html/add/imagethumb/?' + \
'max_width=120&max_height=80'),
'info': _(u'Minibild für Verweise etc.')},
{ 'url' : get_site_url(item_container,
'index.html/add/image/'),
'info': _(u'Bild, Foto, Grafik')},
]
add_ons[2] = [ { 'url' : get_site_url(item_container, 'index.html/add/userfolder/'),
'info': _(u'Community-Mitglieder eintragen, löschen, Rechte ändern ...')}, ]
add_ons[3] = []
app_name = 'edumediaitem'
my_title = _(u'Medienpaket pflegen')
my_title_own = _(u'Eigene Ressourcen etc. pflegen')
dont = { 'navigation_left_mode': False, }
return do_manage(request, item_container, user_perms, add_ons, app_name,
my_title, my_title_own, dont)
| StarcoderdataPython |
12862112 | <gh_stars>0
x = (input("enters hours"))
y = (input("enters rate"))
def compute_pay(hours, rate):
"""The try block ensures that the user enters a
value between from 0-1 otherwise an error message pops up"""
try:
hours = float(x)
rate = float(y)
if hours <= 40:
pay= float(hours * rate)
else:
pay = float(40 * rate + (hours - 40) * 1.5 * rate)
return pay
except ValueError:
return "INVALID ENTRY"
pay = compute_pay(x, y)
print(pay)
| StarcoderdataPython |
4959184 | <filename>k8smtool/__init__.py
from .filter import Filter
from .table import Table
| StarcoderdataPython |
9620194 | <gh_stars>0
from django.urls import path
from . import views
app_name = 'lists'
urlpatterns = [
path('', views.my_list, name='my_list'),
path('remove/<int:pk>/', views.remove_item, name='remove'),
path('update/<int:pk>/', views.update_item, name='update'),
] | StarcoderdataPython |
3378925 | <filename>initialise_short.py
from armor import pattern
from armor import defaultParameters as dp
from armor.defaultParameters import *
from armor.misc import *
| StarcoderdataPython |
3405 | """Set the build version to be 'qa', 'rc', 'release'"""
import sys
import os
import re
import logging
log = logging.getLogger()
log.addHandler(logging.StreamHandler())
log.setLevel(logging.DEBUG)
def get_build_type(travis_tag=None):
if not travis_tag:
return "qa"
log.debug("getting build type for tag: \"%s\"", travis_tag)
if re.match(r'v\d+\.\d+\.\d+rc\d+$', travis_tag):
return 'rc'
elif re.match(r'v\d+\.\d+\.\d+$', travis_tag):
return 'release'
return 'qa'
def main():
root_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
build_type_path = os.path.join(root_dir, 'lbry', 'build_type.py')
log.debug("configuring build type file: %s", build_type_path)
travis_commit = os.environ['TRAVIS_COMMIT'][:6]
build_type = get_build_type(os.environ.get('TRAVIS_TAG', None))
log.debug("setting build type=%s, build commit=%s", build_type, travis_commit)
with open(build_type_path, 'w') as f:
f.write(f"BUILD = \"{build_type}\"\nBUILD_COMMIT = \"{travis_commit}\"\n")
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
234104 | #! /usr/bin/python3
import json
import requests
class hdns():
def __init__(self, token):
self.baseUrl = "https://dns.hetzner.com/api/v1"
self.token = token
def getAllZones(self):
__name__ = "getAllZones"
try:
response = requests.get(
url="{baseUrl}/zones".format(baseUrl=self.baseUrl),
headers={
"Auth-API-Token": self.token,
},
)
print('[{name}] Response HTTP Status Code: {status_code}'.format(
name=__name__, status_code=response.status_code))
return json.loads(response.content)["zones"]
except requests.exceptions.RequestException:
print('HTTP Request failed')
def getAllRecords(self, zoneId, record_type="All"):
__name__ = "getAllRecords"
try:
response = requests.get(
url="{baseUrl}/records".format(baseUrl=self.baseUrl),
params={
"zone_id": zoneId,
},
headers={
"Auth-API-Token": self.token,
},
)
print('[{name}] Response HTTP Status Code: {status_code}'.format(
name=__name__, status_code=response.status_code))
records = json.loads(response.content)["records"]
if record_type:
return [record for record in records if record["type"] == record_type]
elif record_type == "All":
return records
except requests.exceptions.RequestException:
print('HTTP Request failed')
def updateRecord(self, recordId, zoneId, name, value, record_type="A", ttl=86400):
__name__ = "updateRecord"
try:
response = requests.put(
url="{baseUrl}/records/{recordId}".format(baseUrl=self.baseUrl, recordId=recordId),
headers={
"Content-Type": "application/json",
"Auth-API-Token": self.token,
},
data=json.dumps({
"value": value,
"ttl": ttl,
"type": record_type,
"name": name,
"zone_id": zoneId
})
)
print('[{name}] Response HTTP Status Code: {status_code}'.format(
name=__name__, status_code=response.status_code))
except requests.exceptions.RequestException:
print('HTTP Request failed')
def createRecord(self, zoneId, name, value, record_type="A", ttl=86400):
__name__ = "createRecord"
try:
response = requests.post(
url="{baseUrl}/records".format(baseUrl=self.baseUrl),
headers={
"Content-Type": "application/json",
"Auth-API-Token": self.token,
},
data=json.dumps({
"value": value,
"ttl": ttl,
"type": record_type,
"name": name,
"zone_id": zoneId
})
)
print('[{name}] Response HTTP Status Code: {status_code}'.format(
name=__name__, status_code=response.status_code))
except requests.exceptions.RequestException:
print('HTTP Request failed')
if __name__ == "__main__":
exit()
| StarcoderdataPython |
1946508 | import sys
sys.setrecursionlimit(2**20)
def solve(correct, student, j, i, end):
if (i == end or j == end):
return(0)
best = solve(correct, student, j + 1, i, end)
aux, aux2 = solve(correct, student, j, i + 1, end), 0
if (correct[j] == student[i]):
aux2 = solve(correct, student, j + 1, i + 1, end) + 1
best = max(best, aux, aux2)
return(best)
events = int(input())
correct = list(map(int, input().split()))
while (True):
try:
student = list(map(int, input().split()))
answer = solve(correct, student, 0, 0, events)
print(answer)
except EOFError as e:
break
| StarcoderdataPython |
5012664 | from rest_framework.fields import Field
class IsCommunityReportedField(Field):
def __init__(self, **kwargs):
kwargs['source'] = '*'
kwargs['read_only'] = True
super(IsCommunityReportedField, self).__init__(**kwargs)
def to_representation(self, value):
request = self.context.get('request')
if not request.user.is_anonymous:
if request.user.pk == value.pk:
return False
return request.user.has_reported_community_with_id(value.pk)
return False
class CommunityPostsCountField(Field):
def __init__(self, **kwargs):
kwargs['source'] = '*'
kwargs['read_only'] = True
super(CommunityPostsCountField, self).__init__(**kwargs)
def to_representation(self, community):
request = self.context.get('request')
request_user = request.user
if request_user.is_anonymous:
return None
if community.is_community_with_name_private(community_name=community.name) and \
not request_user.is_member_of_community_with_name(community_name=community.name):
return None
return request_user.count_posts_for_community(community)
| StarcoderdataPython |
9772323 | <filename>python-language/fip.py
from functools import lru_cache
@lru_cache(None)
def fib(n):
if n <= 1:
return n
return fib(n-1) + fib(n -2)
for i in range(300):
print(i, fib(i)) | StarcoderdataPython |
1976463 | import numpy as np
from PIL import Image
import torch
from torchvision import models
import torchvision.transforms as T
import os
def get_image(path, crop = []):
img = Image.open(path)
img = img.rotate(90, expand = 1)
return img.crop(crop)
def crop_grid(img, box_size = [900,500], top_offset = 0):
# can you split the image into small boxes of this size ?
H,W = img.size
nrows = int(np.floor(H / box_size[0]))
ncols = int(np.floor(W / box_size[1]))
imgs = []
left = 0
up = 0
low = up + box_size[1]
right = left + box_size[0]
for i in range(nrows):
for j in range(ncols):
I = img.crop((left, up, right, low))
imgs.append(I)
left += box_size[0]
right = left + box_size[0]
up += box_size[1]
low = up + box_size[1]
left = 0
right = left + box_size[0]
return imgs
| StarcoderdataPython |
4870171 | <reponame>lukius/ptc<filename>test/test_retransmission.py
import socket
import threading
import time
from base import ConnectedSocketTestCase, PTCTestCase
from ptc.constants import INITIAL_RTO, CLOCK_TICK,\
MAX_RETRANSMISSION_ATTEMPTS,\
BOGUS_RTT_RETRANSMISSIONS
from ptc.packet import SYNFlag, ACKFlag
class RetransmissionTestMixin(object):
def get_retransmitted_packets(self):
packets = list()
while True:
try:
packet = self.receive(self.DEFAULT_TIMEOUT)
packets.append(packet)
except Exception:
break
# The first packet should be the original one.
return packets[1:]
def wait_until_retransmission_timer_expires(self):
time.sleep(INITIAL_RTO * CLOCK_TICK)
# TODO: refactor tests.
class RetransmissionTest(ConnectedSocketTestCase, RetransmissionTestMixin):
def assert_retransmission(self, first_packet, second_packet):
self.assertEquals(first_packet.get_seq_number(),
second_packet.get_seq_number())
self.assertEquals(first_packet.get_ack_number(),
second_packet.get_ack_number())
self.assertEquals(first_packet.get_payload(),
second_packet.get_payload())
def test_retransmission_after_lost_packet(self):
self.socket.send(self.DEFAULT_DATA)
first_packet = self.receive(self.DEFAULT_TIMEOUT)
self.wait_until_retransmission_timer_expires()
second_packet = self.receive(self.DEFAULT_TIMEOUT)
self.assert_retransmission(first_packet, second_packet)
def test_give_up_after_enough_retransmissions(self):
self.socket.send(self.DEFAULT_DATA)
self.receive()
# This will make the protocol think that it has already retransmitted
# that number of times.
self.socket.protocol.retransmissions = MAX_RETRANSMISSION_ATTEMPTS
self.wait_until_retransmission_timer_expires()
self.assertRaises(socket.timeout, self.receive, self.DEFAULT_TIMEOUT)
self.assertFalse(self.socket.is_connected())
def test_packet_removed_from_retransmission_queue_after_ack(self):
size = 10
data = self.DEFAULT_DATA[:size]
ack_number = self.DEFAULT_ISS + size
ack_packet = self.packet_builder.build(flags=[ACKFlag],
seq=self.DEFAULT_IRS,
ack=ack_number,
window=self.DEFAULT_IW)
self.socket.send(data)
self.receive()
self.send(ack_packet)
self.wait_until_retransmission_timer_expires()
packets = self.get_retransmitted_packets()
self.assertEquals(0, len(packets))
self.assertTrue(self.socket.is_connected())
def test_unaccepted_ack_ignored_when_updating_retransmission_queue(self):
ack_number = self.DEFAULT_ISS + self.DEFAULT_IW + 1
ack_packet = self.packet_builder.build(flags=[ACKFlag],
seq=self.DEFAULT_IRS,
ack=ack_number,
window=self.DEFAULT_IW)
self.socket.send(self.DEFAULT_DATA)
self.send(ack_packet)
self.wait_until_retransmission_timer_expires()
packets = self.get_retransmitted_packets()
self.assertEquals(1, len(packets))
self.assertTrue(self.socket.is_connected())
def test_retransmission_timer_off_after_acking_all_data(self):
size = 10
data = self.DEFAULT_DATA[:size]
ack_number = self.DEFAULT_ISS + size
ack_packet = self.packet_builder.build(flags=[ACKFlag],
seq=self.DEFAULT_IRS,
ack=ack_number,
window=self.DEFAULT_IW)
self.socket.send(data)
self.receive()
self.send(ack_packet)
timer = self.socket.protocol.retransmission_timer
self.assertFalse(timer.is_running())
def test_retransmission_time_backed_off_after_retransmission(self):
rto_estimator = self.socket.protocol.rto_estimator
first_rto = rto_estimator.get_current_rto()
self.socket.send(self.DEFAULT_DATA)
self.receive()
self.wait_until_retransmission_timer_expires()
# To ensure that the retransmission happened.
self.receive()
new_rto = rto_estimator.get_current_rto()
self.assertEquals(2*first_rto, new_rto)
def test_rtt_cleared_after_several_retransmissions(self):
rto_estimator = self.socket.protocol.rto_estimator
srtt = rto_estimator.srtt
self.socket.send(self.DEFAULT_DATA)
self.receive()
# This will make the protocol think that it has already retransmitted
# that number of times.
self.socket.protocol.retransmissions = BOGUS_RTT_RETRANSMISSIONS
self.wait_until_retransmission_timer_expires()
self.assertEquals(0, rto_estimator.srtt)
self.assertEquals(srtt, rto_estimator.rttvar)
def test_retransmission_timer_restarted_after_acking_some_data(self):
size = 5
data = self.DEFAULT_DATA[:size]
ack_number = self.DEFAULT_ISS + size
ack_packet = self.packet_builder.build(flags=[ACKFlag],
seq=self.DEFAULT_IRS,
ack=ack_number,
window=self.DEFAULT_IW)
rto_estimator = self.socket.protocol.rto_estimator
first_rto = rto_estimator.get_current_rto()
self.socket.send(data)
self.receive()
self.socket.send(data)
self.receive()
# ACK the first packet but not the second one.
self.send(ack_packet)
timer = self.socket.protocol.retransmission_timer
new_rto = rto_estimator.get_current_rto()
self.assertTrue(timer.is_running())
# The first sampled RTO should be lesser than the initial one, fixed at
# 1 second.
self.assertLess(new_rto, first_rto)
def test_retransmission_queue_empty_when_timer_expires(self):
fake_rto = 100
size = 5
data = self.DEFAULT_DATA[:size]
ack_number = self.DEFAULT_ISS + size
ack_packet = self.packet_builder.build(flags=[ACKFlag],
seq=self.DEFAULT_IRS,
ack=ack_number,
window=self.DEFAULT_IW)
rqueue = self.socket.protocol.rqueue
rto_estimator = self.socket.protocol.rto_estimator
timer = self.socket.protocol.retransmission_timer
rto_estimator.rto = fake_rto
thread_count = threading.active_count()
# Send some data. This will enqueue the packet.
self.socket.send(data)
self.receive()
# Now remove it from rqueue. This is quite ugly but it has to
# be done this way.
snd_una = self.socket.protocol.control_block.get_snd_una()
snd_nxt = self.socket.protocol.control_block.get_snd_nxt()
rqueue.remove_acknowledged_by(ack_packet, snd_una, snd_nxt)
self.assertTrue(rqueue.empty())
self.assertTrue(timer.is_running())
# Wait until timer expires.
time.sleep(2*fake_rto*CLOCK_TICK)
# Check that we have the same number of threads (if the packet sender
# crashed, it will be less).
self.assertEquals(thread_count, threading.active_count())
def test_retransmitted_packet_not_used_for_estimating_rto_1(self):
# Scenario: a packet is transmitted and retransmitted immediately,
# and the ACK comes after.
size = 10
data = self.DEFAULT_DATA[:size]
ack_number = self.DEFAULT_ISS + size
ack_packet = self.packet_builder.build(flags=[ACKFlag],
seq=self.DEFAULT_IRS,
ack=ack_number,
window=self.DEFAULT_IW)
rto_estimator = self.socket.protocol.rto_estimator
self.socket.send(data)
self.receive()
self.wait_until_retransmission_timer_expires()
self.receive()
first_rto = rto_estimator.get_current_rto()
self.send(ack_packet)
new_rto = rto_estimator.get_current_rto()
# Both RTOs should match, since the ACK arrived after the
# retransmission of the packet.
self.assertEquals(first_rto, new_rto)
def test_retransmitted_packet_not_used_for_estimating_rto_2(self):
# Scenario: two packets are transmitted. The first one is ACKed,
# but the second one is retransmitted and ACKed after.
size = 5
data = self.DEFAULT_DATA[:size]
ack_number1 = self.DEFAULT_ISS + size
ack_number2 = self.DEFAULT_ISS + 2*size
ack_packet1 = self.packet_builder.build(flags=[ACKFlag],
seq=self.DEFAULT_IRS,
ack=ack_number1,
window=self.DEFAULT_IW)
ack_packet2 = self.packet_builder.build(flags=[ACKFlag],
seq=self.DEFAULT_IRS,
ack=ack_number2,
window=self.DEFAULT_IW)
rto_estimator = self.socket.protocol.rto_estimator
self.socket.send(data)
self.receive()
self.socket.send(data)
self.receive()
# ACK the first packet but not the second one.
self.send(ack_packet1)
self.wait_until_retransmission_timer_expires()
self.receive()
first_rto = rto_estimator.get_current_rto()
self.send(ack_packet2)
new_rto = rto_estimator.get_current_rto()
# Both RTOs should match, since the ACK arrived after the
# retransmission of the second packet.
self.assertEquals(first_rto, new_rto)
class SYNRetransmissionTest(PTCTestCase, RetransmissionTestMixin):
def test_syn_packet_removed_from_retransmission_queue_after_syn_ack(self):
self.launch_client()
syn_packet = self.receive(self.DEFAULT_TIMEOUT)
received_seq_number = syn_packet.get_seq_number()
seq_number = 1111
syn_ack_packet = self.packet_builder.build(flags=[SYNFlag, ACKFlag],
seq=seq_number,
ack=received_seq_number+1)
self.send(syn_ack_packet)
self.wait_until_retransmission_timer_expires()
packets = self.get_retransmitted_packets()
self.assertEquals(0, len(packets)) | StarcoderdataPython |
5161054 | """
VQA2.0 dataset class
"""
import os
import pickle
from random import randint
from PIL import Image
import numpy as np
import torch.utils.data as data
from Utils.util import pad_sentence
def default_loader(path):
return Image.open(path).convert('RGB')
class VQADataset(data.Dataset):
def __init__(self, opt, data_file, loader=default_loader):
self.img_dir = opt.img_dir
self.loader = loader
self.multi_answer = opt.multi_answer
self.max_c_len = opt.c_max_sentence_len
self.max_q_len = opt.q_max_sentence_len
self.f = pickle.load(open(data_file, "rb"))
self.data = self.f["data"]
# vocabulary dictionaries
self.c_i2w, self.c_w2i = self.f["c_dicts"]
self.q_i2w, self.q_w2i = self.f["q_dicts"]
self.a_i2w, self.a_w2i = self.f["a_dicts"]
self.c_vocab_size = len(self.c_i2w)
self.q_vocab_size = len(self.q_i2w)
self.a_vocab_size = len(self.a_i2w)
self.special_symbols = self.f["special_symbols"]
def __len__(self):
return len(self.data)
def __getitem__(self, index):
sample = self.data[index]
# get question
question = sample['question'][:self.max_q_len]
question_len = len(question)
question = pad_sentence(question, max_len=self.max_q_len, pad_idx=self.q_vocab_size)
# get image
img_path = os.path.join(self.img_dir, "features", str(sample['image_id']) + '.npy')
img = np.load(img_path)
img_file = os.path.join(self.img_dir, sample['img_raw_folder'], sample['img_raw_file'])
# get answers
answers = sample['answers']
if self.multi_answer:
label = np.zeros(self.a_vocab_size)
for word, confidence in answers:
label[word] = float(confidence)
else:
if len(answers) > 0:
label = answers[0][0]
else:
label = randint(0, self.a_vocab_size-1) # set it to random word if there are no answers (hack)
# get reference captions
refs = []
ref_lens = []
for ref in sample['refs']:
cap = ref['caption'][:self.max_c_len]
refs.append(pad_sentence(cap, max_len=self.max_c_len, pad_idx=self.c_vocab_size))
ref_lens.append(len(cap))
return img, np.asarray(question), question_len, label.astype(np.float32), np.asarray(refs), np.asarray(ref_lens), img_file, sample['question_id'] | StarcoderdataPython |
6540026 | <gh_stars>1-10
import pytest
from pynormalizenumexp.expression.abstime import AbstimePattern
from pynormalizenumexp.expression.base import NumberModifier
from pynormalizenumexp.utility.dict_loader import ChineseCharacter, DictLoader
@pytest.fixture(scope="class")
def dict_loader():
return DictLoader("ja")
class TestDictLoader:
def test_load_chinese_character_dict(self, dict_loader: DictLoader):
res = dict_loader.load_chinese_character_dict("chinese_character.json")
expect = ChineseCharacter(character="〇", value=0, notation_type="09")
# 1番目の情報だけ見る
assert res[0] == expect
def test_load_limited_abstime_expr_dict(self, dict_loader: DictLoader):
# 一例としてabstime_expression.jsonを読み込む
res = dict_loader.load_limited_abstime_expr_dict("abstime_expression.json")
expect = AbstimePattern()
expect.pattern = "世紀"
expect.corresponding_time_position = ["seiki"]
expect.process_type = []
expect.ordinary = False
expect.option = ""
# 1番目の情報だけ見る
assert res[0] == expect
def test_load_number_modifier_dict(self, dict_loader: DictLoader):
# 一例としてabstime_prefix.jsonを読み込む
res = dict_loader.load_number_modifier_dict("abstime_prefix.json")
expect = NumberModifier(pattern="だいたい", process_type="about")
# 1番目の情報だけ見る
assert res[0] == expect
| StarcoderdataPython |
4896782 | <gh_stars>0
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import sqlalchemy as sa
from h.models import Group, User
from h.models.group import ReadableBy
from h.util import group as group_util
class GroupService:
def __init__(self, session, user_fetcher):
"""
Create a new groups service.
:param session: the SQLAlchemy session object
:param user_fetcher: a callable for fetching users by userid
:param publish: a callable for publishing events
"""
self.session = session
self.user_fetcher = user_fetcher
def fetch(self, pubid_or_groupid):
"""
Fetch a group using either a groupid or a pubid.
:arg pubid_or_groupid: a string in either :mod:`~h.pubid` format
or as :attr:`h.models.Group.groupid`
:rtype: :class:`~h.models.Group` or ``None``
"""
if group_util.is_groupid(pubid_or_groupid):
return self.fetch_by_groupid(pubid_or_groupid)
return self.fetch_by_pubid(pubid_or_groupid)
def fetch_by_pubid(self, pubid):
"""Return a group with the given ``pubid`` or ``None``."""
return self.session.query(Group).filter_by(pubid=pubid).one_or_none()
def fetch_by_groupid(self, groupid):
"""
Return a group with the given ``groupid`` or ``None``.
:arg groupid: String in groupid format, e.g. ``group:<EMAIL>``.
See :class:`~h.models.Group`
:raises ValueError: if ``groupid`` is not a valid groupid.
See :func:`h.util.group.split_groupid`
:rtype: :class:`~h.models.Group` or ``None``
"""
parts = group_util.split_groupid(groupid)
authority = parts["authority"]
authority_provided_id = parts["authority_provided_id"]
return (
self.session.query(Group)
.filter_by(authority=authority)
.filter_by(authority_provided_id=authority_provided_id)
.one_or_none()
)
def filter_by_name(self, name=None):
"""
Return a Query of all Groups, optionally filtered by name.
If ``name`` is present, groups will be filtered by name. Filtering
is case-insensitive and wildcarded. Otherwise, all groups will be
retrieved.
:rtype: sqlalchemy.orm.query.Query
"""
filter_terms = []
if name:
filter_terms.append(
sa.func.lower(Group.name).like("%{}%".format(name.lower()))
)
return (
self.session.query(Group)
.filter(*filter_terms)
.order_by(Group.created.desc())
)
def groupids_readable_by(self, user):
"""
Return a list of pubids for which the user has read access.
If the passed-in user is ``None``, this returns the list of
world-readable groups.
:type user: `h.models.user.User`
"""
readable = Group.readable_by == ReadableBy.world
if user is not None:
readable_member = sa.and_(
Group.readable_by == ReadableBy.members,
Group.members.any(User.id == user.id),
)
readable = sa.or_(readable, readable_member)
return [
record.pubid for record in self.session.query(Group.pubid).filter(readable)
]
def groupids_created_by(self, user):
"""
Return a list of pubids which the user created.
If the passed-in user is ``None``, this returns an empty list.
:type user: `h.models.user.User` or None
"""
if user is None:
return []
return [
g.pubid for g in self.session.query(Group.pubid).filter_by(creator=user)
]
def groups_factory(context, request):
"""Return a GroupService instance for the passed context and request."""
user_service = request.find_service(name="user")
return GroupService(session=request.db, user_fetcher=user_service.fetch)
| StarcoderdataPython |
6469979 | #!/usr/bin/env python3
import sys, requests
from requests.auth import HTTPBasicAuth
team = sys.argv[1]
repo = sys.argv[2]
##Login
username = None
password = <PASSWORD>
full_repo_list = []
# Request 100 repositories per page (and only their slugs), and the next page URL
#next_page_url = 'https://api.bitbucket.org/2.0/repositories/%s?pagelen=10&fields=next,values.links.clone.href,values.slug' % team
next_page_url = 'https://api.bitbucket.org/2.0/repositories/%s/%s/commits' % (team, repo)
# Keep fetching pages while there's a page to fetch
while next_page_url is not None:
if username != None and password != <PASSWORD>:
response = requests.get(next_page_url, auth=HTTPBasicAuth(username, password))
else:
response = requests.get(next_page_url)
page_json = response.json()
print(response.text)
break
# Parse repositories from the JSON
for repo in page_json['values']:
reponame=repo['slug']
repohttp=repo['links']['clone'][0]['href']
repogit=repo['links']['clone'][1]['href']
print( reponame + "," + repohttp + "," + repogit )
full_repo_list.append(repo['slug'])
# Get the next page URL, if present
# It will include same query parameters, so no need to append them again
next_page_url = page_json.get('next', None)
| StarcoderdataPython |
6533943 | <reponame>rouseguy/cricket-analytics
import streamlit as st
from streamlit_folium import folium_static
import folium
import altair as alt
import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import seaborn as sns
from matplotlib.cm import hot, viridis, Blues, plasma, magma, Greens
import plotly.express as px
sns.set()
import chart_studio.plotly as py
import cufflinks as cf
from plotly.offline import download_plotlyjs, init_notebook_mode, plot, iplot
cf.go_offline()
import plotly.express as px
import plotly.graph_objs as go
absolute_path = os.path.abspath(__file__)
path = os.path.dirname(absolute_path)
matches = pd.read_csv(path+'/matches.csv')
deliveries = pd.read_csv(path+'/deliveries.csv')
matches.replace({'Sunrisers Hyderabad':'Hyderabad Sunriser','Deccan Chargers':'Hyderabad Sunriser',\
'Rising Pune Supergiants':'Pune Supergiant','Delhi Daredevils':'Delhi Capitals',\
'Pune Warriors':'Pune Warriors','Punjab Kings':'Kings XI Punjab',
'Rising Pune Supergiant':'Pune Supergiant'}, inplace= True)
@st.cache(hash_funcs={dict: lambda _: None})
def make_fig():
some_fig = Achart().graph1()
cached_dict = {'f1': some_fig}
return cached_dict
def get_team1_name():
return np.unique(matches['team1'].values)
def get_team2_name():
return np.unique(matches['team2'].values)
def get_city_name():
return np.unique(matches['country'].values)
def comparison(team1,team2):
compare = matches[((matches['team1']==team1) | (matches['team2']==team1)) & ((matches['team1']==team2) | (matches['team2']==team2))]
fig = plt.figure(figsize=(10,5))
sns.countplot(x='season',hue='winner',data=compare)
return st.pyplot(fig)
def winper():
winloss = matches[['team1','team2','winner']]
winloss.head()
winloss['loser'] = winloss.apply(lambda x: x['team2'] if x['team1']== x['winner'] else x['team1'], axis = 1)
teamwins = winloss['winner'].value_counts()
teamloss = winloss['loser'].value_counts()
played = (matches['team1'].value_counts() + matches['team2'].value_counts()).reset_index()
played.columns = ['team','played']
wins = matches['winner'].value_counts().reset_index()
wins.columns = ['team','won']
played = played.merge(wins, left_on='team', right_on='team', how='inner')
loss = winloss['loser'].value_counts().reset_index()
loss.columns = ['team','lost']
played = played.merge(loss, left_on = 'team', right_on = 'team', how='inner')
played['%win'] = round((played['won'] / played['played'])*100,2)
played['%loss'] = round((played['lost'] / played['played']) * 100,2)
played = played.sort_values(by='%win',ascending=False)
return played
def venue(choice):
json1 = path+f"/states_india.geojson"
m = folium.Map(location=[23.47,77.94], tiles='CartoDB Dark Matter', name="Light Map",
zoom_start=5, attr="iplnani.com")
win_venue = path+f"/winner.csv"
win_venue_data = pd.read_csv(win_venue)
choice_selected=choice
folium.Choropleth(
geo_data=json1,
name="choropleth",
data=win_venue_data,
columns=["state_code",choice_selected],
key_on="feature.properties.state_code",
fill_color="YlOrRd",
fill_opacity=0.7,
line_opacity=.1,
legend_name=choice_selected
).add_to(m)
folium.features.GeoJson(path+'/states_india.geojson',name="States", popup=folium.features.GeoJsonPopup(fields=["st_nm"])).add_to(m)
folium_static(m, width=700, height=500) | StarcoderdataPython |
8007251 | def SendMail():
import smtplib
sender = "<EMAIL>"
password = "<PASSWORD>"
receiver = "<EMAIL>"
content = "Subject:TEMPERTURE ALERT\n\nhello mr B4T\nyour temperature is too high.check your room please."
mail = smtplib.SMTP_SSL("smtp.gmail.com")
mail.login(sender, password)
mail.sendmail(sender,
receiver, content)
mail.quit()
| StarcoderdataPython |
1638611 | <filename>app/api/auth.py
from flask import current_app, request
from app.db import get_db
import bcrypt
import jwt
from datetime import datetime, timezone, timedelta
def token_required(access=True):
def wrapper(view):
def wrapped_view(*args, **kwargs):
header = request.headers.get('Authorization', '')
if header is not None:
auth_type, token = '', ''
try:
[auth_type, token] = header.split(' ')
except ValueError:
return {
'status': 401,
'success': False,
'error': 'Invalid header',
'data': None
}
check_result = check_token(token)
if (
auth_type == 'Bearer' and
check_result['success'] and
(
(check_result['data']['access'] and access) or
(not check_result['data']['access'] and not access)
)
):
return view(
check_result['data']['user_id'],
*args,
**kwargs
)
else:
return {
'status': 401,
'success': False,
'error': check_result['error'] or 'Login required',
'data': None
}
else:
return {
'status': 401,
'success': False,
'error': '"Authorization" header required',
'data': None
}
return wrapped_view
return wrapper
def check_token(jwt_token, allowExpired=False):
db = get_db()
tokens = db.execute(
'SELECT * FROM revoked_token WHERE token = ?',
(jwt_token,)
).fetchall()
if len(tokens) > 0:
return {
'success': False,
'error': 'Token revoked',
'data': None
}
try:
payload = jwt.decode(
jwt_token,
current_app.config['SECRET_KEY'],
algorithms=['HS256'],
options={
'verify_exp': not allowExpired
}
)
user = db.execute(
'SELECT * FROM user WHERE id = ?',
(payload['user_id'],)
).fetchone()
if user is None:
raise jwt.DecodeError
return {
'success': True,
'error': None,
'data': payload
}
except (UnicodeDecodeError, jwt.ExpiredSignatureError, jwt.DecodeError):
return {
'success': False,
'error': 'Token invalid',
'data': None
}
def register(request_data):
username = request_data.get('username')
password = request_data.get('password')
db = get_db()
error = None
if username is None or len(username) == 0:
error = 'Username is required'
elif password is None or len(password) == 0:
error = 'Password is required'
else:
user = db.execute(
'SELECT * FROM user WHERE username = ?',
(username, )
).fetchone()
if user is not None:
error = 'User is already registered'
if error is None:
db.execute(
'INSERT INTO user (username, password_hash) ' +
'VALUES (?, ?)',
(
username,
bcrypt.hashpw(password.encode('utf-8'), bcrypt.gensalt())
)
)
db.commit()
return {
'status': 200,
'success': True,
'error': error,
'data': None
}
else:
return {
'status': 400,
'success': False,
'error': error,
'data': None
}
def login(request_data):
username = request_data.get('username')
password = request_data.get('password')
db = get_db()
error = None
user = None
if username is None or len(username) == 0:
error = 'Username is required'
elif password is None or len(password) == 0:
error = 'Password is required'
else:
user = db.execute(
'SELECT * FROM user WHERE username = ?',
(username,)
).fetchone()
if user is None:
error = 'Login is incorrect'
else:
user = dict(user)
if not bcrypt.checkpw(
password.encode('utf-8'),
user['password_hash']
):
error = 'Password is incorrect'
if error is None:
now = datetime.now(timezone.utc)
access_expires = now + timedelta(minutes=15)
refresh_expires = now + timedelta(days=30)
secret = current_app.config['SECRET_KEY']
access_payload = {
'user_id': user['id'],
'iat': now.timestamp(),
'exp': access_expires.timestamp(),
'access': True
}
refresh_payload = {
'user_id': user['id'],
'iat': now.timestamp(),
'exp': refresh_expires.timestamp(),
'access': False
}
return {
'status': 200,
'success': True,
'error': error,
'data': {
'access_token': jwt.encode(
access_payload,
secret,
algorithm='HS256'
),
'refresh_token': jwt.encode(
refresh_payload,
secret,
algorithm='HS256'
)
}
}
else:
return {
'status': 400,
'success': False,
'error': error,
'data': None
}
@token_required(access=False)
def refresh(user_id,):
now = datetime.now(timezone.utc)
expires = now + timedelta(minutes=15)
secret = current_app.config['SECRET_KEY']
payload = {
'user_id': user_id,
'iat': now.timestamp(),
'exp': expires.timestamp(),
'access': True
}
return {
'status': 200,
'success': True,
'error': None,
'data': {
'token': jwt.encode(payload, secret, algorithm='HS256')
}
}
def logout(request_data):
access_token = request_data.get('access_token')
refresh_token = request_data.get('refresh_token')
db = get_db()
error = None
if access_token is None:
error = 'Access token is required'
elif refresh_token is None:
error = 'Refresh token is required'
elif not check_token(access_token, allowExpired=True)['success']:
error = 'Access token is invalid'
elif not check_token(refresh_token, allowExpired=True)['success']:
error = 'Refresh token is invalid'
if error is None:
db.execute(
'INSERT INTO revoked_token (token) ' +
'VALUES (?), (?)',
(access_token, refresh_token)
)
db.commit()
return {
'status': 200,
'success': True,
'error': error,
'data': None
}
else:
return {
'status': 400,
'success': False,
'error': error,
'data': None
}
| StarcoderdataPython |
1890856 | #/usr/bin/env python
# vim: set fileencoding=utf-8
from ghost import Ghost
from config import COOKIE_FILE, LOGIN_ID, LOGIN_PW
import urllib2
import cookielib
import Cookie
class NaverCrawler:
# 새 크롤러를 만듭니다.
def __init__(self, id, pw, displayFlag = False):
# 새 Ghost instance를 만들어서 사용합니다.
self.ghost = Ghost(display = displayFlag, wait_timeout = 20)
self.currentPage = None
self.login(id, pw)
# 주어진 페이지를 엽니다. 이미 그 페이지에 있으면 그대로 있습니다.
def openPage(self, url):
if self.currentPage == url:
return
self.ghost.open(url)
self.ghost.wait_for_page_loaded()
self.currentPage = url
# 네이버 로그인을 수행합니다.
def login(self, id, pw):
# 네이버 메인 페이지를 엽니다.
self.openPage('http://www.naver.com')
# inner frame에 들어있는 로그인 폼에 값을 채워넣고 클릭을 지시합니다.
# 이부분은 javascript를 활용했습니다.
self.ghost.evaluate("""
(function() {
var innerDoc = document.getElementById('loginframe').contentWindow.document;
innerDoc.getElementById('id').value = '%s';
innerDoc.getElementById('pw').value = '%s';
innerDoc.getElementsByClassName('btn_login')[0].click();
})();
""" % (id, pw), expect_loading = True)
# 로그인 결과를 기다립니다.
self.ghost.wait_for_selector('#query')
def cloneCookieJar(self):
cookieJar = cookielib.LWPCookieJar()
self.ghost.save_cookies(cookieJar)
return cookieJar
# 네이버 메인 페이지에서 검색을 수행합니다.
def main_search(self, query):
# 네이버 메인 페이지를 엽니다.
self.openPage('http://www.<EMAIL>')
self.ghost.wait_for_selector('#query')
self.ghost.fill("#sform", { "query": query })
self.ghost.fire_on('#sform', 'submit', expect_loading = True)
if __name__ == "__main__":
crawler = NaverCrawler(LOGIN_ID, LOGIN_PW, False)
cj = crawler.cloneCookieJar()
cj.save(COOKIE_FILE) | StarcoderdataPython |
240082 | from collections import defaultdict
from common.attacks.tools.hash import CollisionGeneratorBase
class MulticollisionGenerator(CollisionGeneratorBase):
# Based on Joux's "Multicollisions in iterated hash functions. Application
# to cascaded constructions. "
def _get_rand_message_and_state(self, state):
message = self.byte_generator.value(self.block_size)
final_state = self._iterate_compress_function(message, state)
return message, final_state
def _find_collisions_for(self, initial_state):
collisions = defaultdict(lambda: list())
while True:
message, state = self._get_rand_message_and_state(initial_state)
collisions[state].append(message)
if len(collisions[state]) > 1:
break
return state, collisions[state]
def value(self, n, collisions=None, state=None):
# This is to resume from a given list of collisions.
collisions = collisions if collisions is not None else [str()]
state = state if state is not None\
else self.resumable_hash.initial_state()
for _ in range(n):
# Find a new collision for current state registers, and then
# combine the results.
state, new_collisions = self._find_collisions_for(state)
collisions = [c1 + c2 for c1 in collisions
for c2 in new_collisions]
return collisions, state | StarcoderdataPython |
12844625 | from pprint import pprint
from st2common.runners.base_action import Action
class PrintConfigAction(Action):
def run(self):
print("=========")
pprint(self.config)
print("=========")
| StarcoderdataPython |
6703113 | from TextSearchEngine.mergeResults import mergeResults
from TextSearchEngine.findInTextJson import findInTextJson
import re
def EXACT_WORD(word, caseSensitive = False):
def matcherFunction(text):
flags = 0
if not caseSensitive:
flags = re.IGNORECASE
result = re.search(r'\b'+ word + r'\b', text, flags)
if result is not None:
return (result.start(), result.end())
else:
return None
def returnFunction(data, finderFunction = findInTextJson):
return finderFunction(data, matcherFunction)
return returnFunction
def PARTIAL_WORD(word, caseSensitive = False):
def matcherFunction(text):
flags = 0
if not caseSensitive:
flags = re.IGNORECASE
result = re.search(word, text, flags)
if result is not None:
return (result.start(), result.end())
else:
return None
def returnFunction(data, finderFunction = findInTextJson):
return finderFunction(data, matcherFunction)
return returnFunction
def AND(*textMatchers):
def returnFunction(data, finderFunction = findInTextJson, mergeFunction = mergeResults):
result = [];
for matcher in textMatchers:
matchResult = matcher(data, finderFunction)
if matchResult is None:
return None
else:
result.append(matchResult)
return mergeFunction(result)
return returnFunction
def OR(*textMatchers):
def returnFunction(data, finderFunction = findInTextJson, mergeFunction = mergeResults):
result = [];
for matcher in textMatchers:
matchResult = matcher(data, finderFunction)
if matchResult is not None:
result.append(matchResult)
if len(result) > 0:
return mergeFunction(result)
else:
return None
return returnFunction
| StarcoderdataPython |
9669592 | <reponame>llduncan/usgs-map-gwmodels
"""
Functions for making plots that compare model input to source data
"""
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from mfsetup.units import convert_volume_units, convert_time_units
def plot_wateruse(wel_files, perioddata, add_data=None,
wel_flux_col='q',
model_volume_units='$m^3$', model_time_units='day',
plot_volume_units='mgal', plot_time_units='day',
outfile=None):
"""
Parameters
----------
wel_files :
A head line with column names is assumed. For example:
#k,i,j,q,boundname
perioddata :
add_data :
model_volume_units :
model_time_units :
plot_volume_units :
plot_time_units :
Returns
-------
"""
# read the stress period information
if not isinstance(perioddata, pd.DataFrame):
perioddata = pd.read_csv(perioddata)
else:
perioddata = perioddata.copy()
perioddata.index = perioddata['per']
dfs = []
for i, f in wel_files.items():
df = pd.read_csv(f, delim_whitespace=True)
df.columns = [c.strip('#') for c in df.columns]
df['per'] = i
df['start_datetime'] = perioddata.loc[i, 'start_datetime']
df['end_datetime'] = perioddata.loc[i, 'end_datetime']
dfs.append(df)
df = pd.concat(dfs)
# sum the model pumping by stress period
period_sums = df.groupby('per').first()
period_sums[wel_flux_col] = df.groupby('per')[wel_flux_col].sum()
# fill nan values (from any periods without wel files) with 0s
period_sums = period_sums.reindex(range(period_sums.index.max()))
period_sums['start_datetime'] = perioddata['start_datetime']
period_sums['end_datetime'] = perioddata['end_datetime']
period_sums[wel_flux_col].fillna(0, inplace=True)
period_sums.index = pd.to_datetime(period_sums['start_datetime'])
period_sums['WEL package input'] = period_sums['q']
period_sums = period_sums[['WEL package input', 'start_datetime', 'end_datetime']]
# convert units
model_vol_conv = convert_volume_units(model_volume_units, plot_volume_units)
model_time_conv = convert_time_units(model_time_units, plot_time_units)
model_conv = model_vol_conv * model_time_conv
# plot any additional comparison data
if add_data is not None:
for label, items in add_data.items():
# read the stress period information
if not isinstance(items['data'], pd.DataFrame):
items['data'] = pd.read_csv(items['data'])
req_cols = {'q', 'start_datetime'}
assert not req_cols.difference(items['data'].columns), \
f"add_data: {label} data must have columns: {req_cols}"
items['data']['start_datetime'] = pd.to_datetime(items['data']['start_datetime'])
aux_period_sums = items['data'].groupby('start_datetime').first()
aux_period_sums[label] = items['data'].groupby('start_datetime')['q'].sum()
# fill nan values (from any periods without wel files) with 0s
#aux_period_sums[label].fillna(0, inplace=True)
aux_period_sums['start_datetime'] = aux_period_sums.index
period_sums = period_sums.join(aux_period_sums[[label]], how='outer')
j=2
# forward fill nan WEL values values
# (where other times may have been inserted)
period_sums['WEL package input'] = period_sums['WEL package input'].ffill()
#period_sums = period_sums.resample('M').mean() #.ffill()
# make a plot
fig, ax = plt.subplots(figsize=(11, 8.5))
ax = period_sums.plot(ax=ax)
units_text = f'{model_volume_units}/{model_time_units}'
ax.set_ylabel(f'Pumpage, in {units_text}')
ax.set_xlabel('')
# second axis with another volume unit
def second_axis_conversion(x):
return x * model_conv
def second_axis_conversion_r(x):
return x * 1 / model_conv
ax2 = ax.secondary_yaxis('right', functions=(second_axis_conversion,
second_axis_conversion_r))
ax2.set_ylabel(f'Pumpage, in {plot_volume_units}/{plot_time_units}')
#format_xtick_labels(period_sums, ax, maxlabels=30, date_format='%Y-%m-%d')
h, l = ax.get_legend_handles_labels()
means = (period_sums.mean(axis=0) * model_conv).to_dict()
plot_units_text = f'{plot_volume_units}/{plot_time_units}'
labels_with_means = []
for label in l:
new_label = label
if label in means:
new_label += f' (mean: {means[label]:g} {plot_units_text})'
labels_with_means.append(new_label)
ax.legend(h, labels_with_means)
if outfile is not None:
Path(outfile).parent.mkdir(parents=True, exist_ok=True)
plt.savefig(outfile)
plt.close()
else:
return ax
def format_xtick_labels(df, ax, maxlabels=30, date_format='%Y-%m-%d'):
"""Clean up the xtick labels on a time axis.
Cap the number of labels to maxlabels, and format
dates to date_format.
"""
xticklabels = df.index.strftime(date_format).tolist()
stride = max(int(np.floor(len(xticklabels) / maxlabels)), 1)
formatted_labels = []
for label in xticklabels[::stride]:
formatted_labels += [label] + [''] * (stride - 1)
formatted_labels = formatted_labels[:len(xticklabels)]
ax.set_xticklabels(formatted_labels) | StarcoderdataPython |
5105967 | <filename>easy_test/metas/meta_delete.py
from easy_test.metas.meta_html import HtmlMeta
from easy_test.util import contains_option
class DeleteMeta(HtmlMeta):
def validate(cls, meta, module, name):
super().validate(cls, meta, module, name)
# url
if not contains_option(meta, 'url'):
raise RuntimeError(
"Test class %s.%s doesn't set the url " % (module, name)
)
| StarcoderdataPython |
9656569 | <reponame>bpbpublications/Programming-Techniques-using-Python
from threading import Condition, Thread
from time import sleep
import random
mylist = []
def my_producer():
mycond_obj.acquire() # C1
print("Items producing starts!!!!") # C2
for i in range(1, 6):
myitem = random.randint(1, 80)
mylist.append(myitem) # C3
print(f"Producer producing item no. {myitem}") # C4
sleep(1)
print("Notification given to consumer")
mycond_obj.notify() # C5
mycond_obj.release() # C6
def my_consumer():
mycond_obj.acquire() # C7
print("Waiting for update by the consumer")
mycond_obj.wait() # C8
print("Notification received from producer and item is getting consumed")
for itemnum in mylist:
print(itemnum, end=' ') # C9
mycond_obj.release() # C10
print()
mycond_obj = Condition()
myt1 = Thread(target=my_consumer)
myt2 = Thread(target=my_producer)
myt1.start()
myt2.start()
myt1.join()
myt2.join()
print("Main Thread")
| StarcoderdataPython |
9756031 | # -*- coding: utf-8 -*-
# Generated by Django 1.10 on 2016-08-25 23:16
from __future__ import unicode_literals
from decimal import Decimal
from django.db import migrations, models
import django.db.models.deletion
import djmoney.models.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('ventures', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AccountsPayable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[('USD', 'US Dollar')], default=b'USD', editable=False, max_length=3)),
('amount', djmoney.models.fields.MoneyField(decimal_places=2, default=Decimal('0.0'), default_currency=b'USD', max_digits=20)),
('venture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ventures.Venture')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='AccountsReceivable',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[('USD', 'US Dollar')], default=b'USD', editable=False, max_length=3)),
('amount', djmoney.models.fields.MoneyField(decimal_places=2, default=Decimal('0.0'), default_currency=b'USD', max_digits=20)),
('venture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ventures.Venture')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='CashReserve',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[('USD', 'US Dollar')], default=b'USD', editable=False, max_length=3)),
('amount', djmoney.models.fields.MoneyField(decimal_places=2, default=Decimal('0.0'), default_currency=b'USD', max_digits=20)),
('venture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ventures.Venture')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GenericCurrentAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[('USD', 'US Dollar')], default=b'USD', editable=False, max_length=3)),
('amount', djmoney.models.fields.MoneyField(decimal_places=2, default=Decimal('0.0'), default_currency=b'USD', max_digits=20)),
('venture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ventures.Venture')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GenericCurrentLiability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[('USD', 'US Dollar')], default=b'USD', editable=False, max_length=3)),
('amount', djmoney.models.fields.MoneyField(decimal_places=2, default=Decimal('0.0'), default_currency=b'USD', max_digits=20)),
('venture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ventures.Venture')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GenericNonCurrentAsset',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[('USD', 'US Dollar')], default=b'USD', editable=False, max_length=3)),
('amount', djmoney.models.fields.MoneyField(decimal_places=2, default=Decimal('0.0'), default_currency=b'USD', max_digits=20)),
('venture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ventures.Venture')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='GenericNonCurrentLiability',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=100)),
('description', models.TextField(blank=True)),
('amount_currency', djmoney.models.fields.CurrencyField(choices=[('USD', 'US Dollar')], default=b'USD', editable=False, max_length=3)),
('amount', djmoney.models.fields.MoneyField(decimal_places=2, default=Decimal('0.0'), default_currency=b'USD', max_digits=20)),
('venture', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='ventures.Venture')),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
3340909 | <filename>classifaedes/hparams_lib_test.py
# Copyright 2019 Verily Life Sciences LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for hparams_lib."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from classifaedes import hparams_lib
import tensorflow.compat.v1 as tf
class HparamsLibTest(tf.test.TestCase):
def testIndentedSerialize(self):
"""Tests that our slightly customized serialization can be parsed.
hparams_lib._human_serialize() uses indented JSON to improve readability.
"""
hps1 = hparams_lib.defaults()
serialized = hparams_lib._human_serialize(hps1)
hps2 = hparams_lib.defaults()
hps2.parse_json(serialized)
self.assertDictEqual(hps1.values(), hps2.values())
if __name__ == '__main__':
tf.test.main()
| StarcoderdataPython |
6603832 | <gh_stars>1000+
# Generated by Django 3.2.11 on 2022-01-31 12:12
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('integrations', '0009_migrate_headers_data'),
]
operations = [
migrations.RemoveField(
model_name='httpexchange',
name='request_headers',
),
migrations.RemoveField(
model_name='httpexchange',
name='response_headers',
),
migrations.RemoveField(
model_name='integration',
name='provider_data',
),
migrations.RenameField(
model_name='httpexchange',
old_name='request_headers_json',
new_name='request_headers',
),
migrations.RenameField(
model_name='httpexchange',
old_name='response_headers_json',
new_name='response_headers',
),
migrations.RenameField(
model_name='integration',
old_name='provider_data_json',
new_name='provider_data',
),
]
| StarcoderdataPython |
4806456 | from bs4 import BeautifulSoup
import re
with open('full-emoji-list.html') as f:
html = f.read()
html = re.sub(r"<td class.*?src='data:image/png;base64.*?</td>", '', html)
html = re.sub(r"\n{2,}", '\n', html)
soup = BeautifulSoup(html, 'html.parser')
lines = []
rows = soup.find_all('tr')
size = len(rows)
for i, row in enumerate(rows):
if row.find('th', {'class':['bighead', 'mediumhead']}):
continue
if 'CLDR Short Name' in row.text.split('\n'):
continue
lines.append([x for x in row.text.split('\n') if x not in ('', '-', '—')])
if i % 100 == 0:
print(f'{i/size:.4f}')
print(lines)
print(len(lines)) | StarcoderdataPython |
3496228 | import sys
import logging
from aiohttp.web import run_app
from sqli.app import init as init_app
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
app = init_app(sys.argv[1:])
run_app(app,
host=app['config']['app']['host'],
port=app['config']['app']['port'])
| StarcoderdataPython |
8084686 | import uuid
from pathlib import Path
from typing import Any
import pytest
from tests.e2e.conftest import Helper
@pytest.fixture
def secret_name() -> str:
return "secret" + str(uuid.uuid4()).replace("-", "")[:10]
@pytest.mark.e2e
def test_create_list_delete(helper: Helper, secret_name: str) -> None:
cap = helper.run_cli(["secret", "ls"])
assert cap.err == ""
assert secret_name not in cap.out
cap = helper.run_cli(["secret", "add", secret_name, "value"])
assert cap.err == ""
cap = helper.run_cli(["secret", "ls"])
assert cap.err == ""
assert secret_name in cap.out
cap = helper.run_cli(["secret", "rm", secret_name])
assert cap.err == ""
cap = helper.run_cli(["secret", "ls"])
assert cap.err == ""
assert secret_name not in cap.out
@pytest.mark.e2e
def test_create_from_file_list_delete(
request: Any, helper: Helper, secret_name: str
) -> None:
cap = helper.run_cli(["secret", "ls"])
assert cap.err == ""
assert secret_name not in cap.out
secret_path = Path(f"~/test-secret-file-{uuid.uuid4()}")
request.addfinalizer(secret_path.expanduser().unlink)
secret_path.expanduser().write_bytes(b"value\xff\x00")
cap = helper.run_cli(["secret", "add", secret_name, f"@{secret_path}"])
assert cap.err == ""
cap = helper.run_cli(["secret", "ls"])
assert cap.err == ""
assert secret_name in cap.out
cap = helper.run_cli(["secret", "rm", secret_name])
assert cap.err == ""
cap = helper.run_cli(["secret", "ls"])
assert cap.err == ""
assert secret_name not in cap.out
| StarcoderdataPython |
3566027 | import torch
from torch import nn
class BaseTripletLoss(nn.Module):
"""Class of Abstract Loss for Triplet"""
def __init__(self, margin: float = 1, regularizers: list = []):
"""Set margin size and ReLU function
Args:
margin (float, optional): safe margin size. Defaults to 1.
regularizers (list, optional): list of regularizer
"""
super().__init__()
self.margin = margin
self.ReLU = nn.ReLU()
self.regularizers = regularizers
def forward(
self, embeddings_dict: dict, batch: torch.Tensor, column_names: dict
) -> torch.Tensor:
"""Method of forward
Args:
embeddings_dict (dict): A dictionary of embddings which has following key and values.
batch (torch.Tensor) : A tensor of batch, size (n_batch, *).
column_names (dict) : A dictionary that maps names to indices of rows of batch.
Raises:
NotImplementedError: [description]
Returns:
torch.Tensor: [description]
---- example code ---
# embeddings_dict = {
# "user_embedding": user_emb,
# "pos_item_embedding": pos_item_emb,
# "neg_item_embedding": neg_item_emb,
#}
loss = some_function(embeddings_dict, batch, column_names)
reg = self.regularize(embeddings_dict)
return loss + reg
"""
raise NotImplementedError
def regularize(self, embeddings_dict: dict):
reg = 0
for regularizer in self.regularizers:
reg += regularizer(embeddings_dict)
return reg
| StarcoderdataPython |
5871 | # Copyright 2015, Rackspace, US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for the swift service.
"""
import os
from django import forms
from django.http import StreamingHttpResponse
from django.utils.http import urlunquote
from django.views.decorators.csrf import csrf_exempt
from django.views import generic
import six
from horizon import exceptions
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
from openstack_dashboard.api import swift
@urls.register
class Info(generic.View):
"""API for information about the Swift installation.
"""
url_regex = r'swift/info/$'
@rest_utils.ajax()
def get(self, request):
"""Get information about the Swift installation.
"""
capabilities = api.swift.swift_get_capabilities(request)
return {'info': capabilities}
@urls.register
class Containers(generic.View):
"""API for swift container listing for an account
"""
url_regex = r'swift/containers/$'
@rest_utils.ajax()
def get(self, request):
"""Get the list of containers for this account
TODO(neillc): Add pagination
"""
containers, has_more = api.swift.swift_get_containers(request)
containers = [container.to_dict() for container in containers]
return {'items': containers, 'has_more': has_more}
@urls.register
class Container(generic.View):
"""API for swift container level information
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get the container details
"""
return api.swift.swift_get_container(request, container).to_dict()
@rest_utils.ajax()
def post(self, request, container):
metadata = {}
if 'is_public' in request.DATA:
metadata['is_public'] = request.DATA['is_public']
# This will raise an exception if the container already exists
try:
api.swift.swift_create_container(request, container,
metadata=metadata)
except exceptions.AlreadyExists as e:
# 409 Conflict
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s' % container,
)
@rest_utils.ajax()
def delete(self, request, container):
try:
api.swift.swift_delete_container(request, container)
except exceptions.Conflict as e:
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
@rest_utils.ajax(data_required=True)
def put(self, request, container):
metadata = {'is_public': request.DATA['is_public']}
api.swift.swift_update_container(request, container, metadata=metadata)
@urls.register
class Objects(generic.View):
"""API for a list of swift objects
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/objects/$'
@rest_utils.ajax()
def get(self, request, container):
"""Get object information.
:param request:
:param container:
:return:
"""
path = request.GET.get('path')
if path is not None:
path = urlunquote(path)
objects = api.swift.swift_get_objects(
request,
container,
prefix=path
)
# filter out the folder from the listing if we're filtering for
# contents of a (pseudo) folder
contents = [{
'path': o.subdir if isinstance(o, swift.PseudoFolder) else o.name,
'name': o.name.split('/')[-1],
'bytes': o.bytes,
'is_subdir': isinstance(o, swift.PseudoFolder),
'is_object': not isinstance(o, swift.PseudoFolder),
'content_type': getattr(o, 'content_type', None)
} for o in objects[0] if o.name != path]
return {'items': contents}
class UploadObjectForm(forms.Form):
file = forms.FileField(required=False)
@urls.register
class Object(generic.View):
"""API for a single swift object or pseudo-folder
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/object/' \
'(?P<object_name>.+)$'
# note: not an AJAX request - the body will be raw file content
@csrf_exempt
def post(self, request, container, object_name):
"""Create or replace an object or pseudo-folder
:param request:
:param container:
:param object_name:
If the object_name (ie. POST path) ends in a '/' then a folder is
created, rather than an object. Any file content passed along with
the request will be ignored in that case.
POST parameter:
:param file: the file data for the upload.
:return:
"""
form = UploadObjectForm(request.POST, request.FILES)
if not form.is_valid():
raise rest_utils.AjaxError(500, 'Invalid request')
data = form.clean()
if object_name[-1] == '/':
result = api.swift.swift_create_pseudo_folder(
request,
container,
object_name
)
else:
result = api.swift.swift_upload_object(
request,
container,
object_name,
data['file']
)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (container, result.name)
)
@rest_utils.ajax()
def delete(self, request, container, object_name):
if object_name[-1] == '/':
try:
api.swift.swift_delete_folder(request, container, object_name)
except exceptions.Conflict as e:
# In case the given object is pseudo folder
# It cannot be deleted if it's not empty.
return rest_utils.JSONResponse(str(e), 409)
else:
api.swift.swift_delete_object(request, container, object_name)
def get(self, request, container, object_name):
"""Get the object contents.
"""
obj = api.swift.swift_get_object(
request,
container,
object_name
)
# Add the original file extension back on if it wasn't preserved in the
# name given to the object.
filename = object_name.rsplit(api.swift.FOLDER_DELIMITER)[-1]
if not os.path.splitext(obj.name)[1] and obj.orig_name:
name, ext = os.path.splitext(obj.orig_name)
filename = "%s%s" % (filename, ext)
response = StreamingHttpResponse(obj.data)
safe = filename.replace(",", "")
if six.PY2:
safe = safe.encode('utf-8')
response['Content-Disposition'] = 'attachment; filename="%s"' % safe
response['Content-Type'] = 'application/octet-stream'
response['Content-Length'] = obj.bytes
return response
@urls.register
class ObjectMetadata(generic.View):
"""API for a single swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/metadata/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def get(self, request, container, object_name):
return api.swift.swift_get_object(
request,
container_name=container,
object_name=object_name,
with_data=False
).to_dict()
@urls.register
class ObjectCopy(generic.View):
"""API to copy a swift object
"""
url_regex = r'swift/containers/(?P<container>[^/]+)/copy/' \
'(?P<object_name>.+)$'
@rest_utils.ajax()
def post(self, request, container, object_name):
dest_container = request.DATA['dest_container']
dest_name = request.DATA['dest_name']
try:
result = api.swift.swift_copy_object(
request,
container,
object_name,
dest_container,
dest_name
)
except exceptions.AlreadyExists as e:
return rest_utils.JSONResponse(str(e), 409)
return rest_utils.CreatedResponse(
u'/api/swift/containers/%s/object/%s' % (dest_container,
result.name)
)
| StarcoderdataPython |
1722986 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import chess.pgn
import json
import re
import io
def main():
""" Main program """
with open('openings.json', encoding='utf-8') as f:
openings = json.load(f)
openings_by_name, openings_by_pgn = {}, {}
for opening in openings:
# Get name, ECO and UCI moves from the openings file
name = opening['n'].lower().replace("-", " ").replace("'", "").replace(",", "").replace(":", "").replace("é", "e")
eco = opening['c'].lower()
moves = opening['m'].split(" ")
# Start a game and keep adding the moves
game = chess.pgn.Game()
node = game.add_variation(chess.Move.from_uci(moves.pop(0)))
for move in moves:
node = node.add_variation(chess.Move.from_uci(move))
# Export the current game as PGN in a string
exporter = chess.pgn.StringExporter(headers=False, variations=False, comments=False)
pgn_string = game.accept(exporter)
# Fix for pieces name
pgn_string = pgn_string.replace("N", "knight ")
pgn_string = pgn_string.replace("B", "bishop ")
pgn_string = pgn_string.replace("R", "rook ")
pgn_string = pgn_string.replace("Q", "queen ")
pgn_string = pgn_string.replace("K", "king ")
pgn_string = pgn_string.replace("x", " takes ")
pgn_string = pgn_string.replace("+", " check ")
pgn_string = pgn_string.replace("#", " checkmate ")
pgn_string = pgn_string.replace("O-O-O", "castles queenside")
pgn_string = pgn_string.replace("O-O", "castles kingside")
pgn_string = pgn_string.replace("1.", "")
pgn_string = re.sub(r"\d+\.", ".", pgn_string)
pgn_string = re.sub("[ ]+\.", ".", pgn_string)
pgn_string = pgn_string.replace('*', '')
pgn_string = pgn_string.replace(" ", " ")
content = {"name": name, "eco": eco, "pgn": pgn_string.strip()}
openings_by_name[name] = content
new_key = re.sub(r"\.", "", content['pgn'])
openings_by_pgn[new_key] = content
keys_by_name = list(openings_by_name.keys())
keys_by_pgn = list(openings_by_pgn.keys())
result = [openings_by_name, openings_by_pgn, keys_by_name, keys_by_pgn]
result_names = ["openings_by_name", "openings_by_pgn", "keys_by_name", "keys_by_pgn"]
i = 0
for name in result_names:
with io.open(f"{name}.json", 'w', encoding='utf8') as outfile:
json.dump(result[i], outfile, ensure_ascii=False)
i += 1
if __name__ == "__main__":
main() | StarcoderdataPython |
1850128 | import math
from coord import *
class Bot:
"""Abstract class to represent a swarm robot and its position in the frame"""
def __init__(self, tl, tr, br, bl, bot_id=None, offset=None):
# Corners
self.__tl = tl
self.__tr = tr
self.__br = br
self.__bl = bl
# ID from ArUco tag
self.__id = bot_id
# Tag offset
self.__tag_offset = offset
# Center point
self.__centre_point = None
# Point of front of bot
self.__front_point = None
def get_corners(self):
return self.__tl, self.__tr, self.__br, self.__bl
def set_corners(self, tl, tr, br, bl):
self.__tl = tl
self.__tr = tr
self.__br = br
self.__bl = bl
def get_id(self):
return self.__id
def set_id(self, bot_id):
self.__id = bot_id
def get_tag_offset(self):
if self.__tag_offset == None:
return 0
return self.__tag_offset
def set_tag_offset(self, offset):
self.__tag_offset = offset
def get_centre(self):
x = int((self.__tl.x + self.__tr.x + self.__br.x + self.__bl.x) / 4)
y = int((self.__tl.y + self.__tr.y + self.__br.y + self.__bl.y) / 4)
self.__centre_point = coord(x, y)
return self.__centre_point
def set_centre(self, x, y):
self.__centre_point = coord(x, y)
def set_front_point(self, x, y):
self.__front_point = coord(x, y)
def get_front_point(self):
# Get centre of the tag
centre = self.get_centre()
cX = centre.x
cY = centre.y
# Get corners as coordinates - easier to work with
tl, tr, br, bl = self.get_corners()
# Get centre of top of tag
tX = int((tl.x + tr.x) / 2)
tY = int((tl.y + tr.y) / 2)
# Translate top point around negative of centre (pivot) point
tX = tX - cX
tY = tY - cY
# Angle to rotate point by
theta = math.radians(self.get_tag_offset())
nX = int(tX * math.cos(theta) - tY * math.sin(theta))
nY = int(tX * math.sin(theta) + tY * math.cos(theta))
# Translate back
nX = nX + cX
nY = nY + cY
self.__front_point = coord(nX, nY)
return self.__front_point
| StarcoderdataPython |
6402730 | <gh_stars>0
from django import forms
from crispy_forms.helper import FormHelper
from crispy_forms.layout import Layout, Submit, Fieldset, Field
from crispy_forms.bootstrap import FormActions
from .models import Etfar
class EtfarForm(forms.ModelForm):
class Meta:
model = Etfar
fields = ('event', 'thought', 'feeling', 'action', 'result')
def __init__(self, *args, **kwargs):
super(EtfarForm, self).__init__(*args, **kwargs)
self.helper = FormHelper(self)
self.helper.form_class='etfar-form form-horizontal'
self.helper.form_id = kwargs.get('form_id', 'etfar_tool')
self.helper.label_class='col-md-3'
self.helper.field_class='col-md-7'
self.helper.form_action='.'
self.helper.layout = Layout(
Field('event', rows=2, placeholder=self.fields['event'].help_text),
Field('thought', rows=2, placeholder=self.fields['thought'].help_text),
Field('feeling', rows=2, placeholder=self.fields['feeling'].help_text),
Field('action', rows=2, placeholder=self.fields['action'].help_text),
Field('result', rows=2, placeholder=self.fields['result'].help_text),
FormActions(
Submit('submit', 'Proceed', css_class='btn btn-primary')
)
) | StarcoderdataPython |
1793038 | <reponame>GmZhang3/data-science-ipython-notebooks<filename>python/python101/basis/distince_test.py
#!/usr/bin/python
# -*- coding: UTF-8 -*-
import numpy as np
from sklearn.cluster import DBSCAN
def distince(vect1,vect2):
dist = (vect1-vect2)*((vect1-vect2).T)
return dist[0,0]
if __name__ == "__main__":
v1 = np.array([1,2])
v2 = np.array([1,1])
print v1 -v2
print (v1 -v2).T
vv = (v1 -v2)*((v1 -v2).T)
print vv
# print type(v1)
# print v2
# print distince(v1,v2) | StarcoderdataPython |
367437 | """
pytest configuration for figcon
"""
from pathlib import Path
import pytest
from figcon import Figcon
# --------- Add key paths to pytest namespace
TEST_PATH = Path(__file__).parent
PKG_PATH = TEST_PATH.parent
TEST_DATA_PATH = TEST_PATH / 'data'
@pytest.fixture
def default_config_1():
""" return a path to the first config file. """
return TEST_DATA_PATH / 'config1.py'
@pytest.fixture
def home_cwd(tmpdir):
""" create two temporary directories simulated cwd and home.
Return dict with contents. """
cwd = Path(tmpdir)
home = cwd / "home"
home.mkdir()
return dict(secondary_path=home, primary_path=cwd)
@pytest.fixture
def figcon(home_cwd, default_config_1):
""" Init an options object. """
return Figcon(default_path=default_config_1, **home_cwd)
@pytest.fixture
def config_paths():
""" return a dict of configs in test data path. """
path = Path(TEST_DATA_PATH) / 'config'
return {x.name: x for x in path.rglob("*.py")}
| StarcoderdataPython |
11326024 | class Solution(object):
def isPowerOfFour(self, num):
"""
:type num: int
:rtype: bool
"""
return (num - 1) & num == 0 and (num - 1) % 3 == 0
| StarcoderdataPython |
4887844 | <reponame>tefra/xsdata-w3c-tests
from output.models.sun_data.wildcard.ps_contents.ps_contents00301m.ps_contents00301m1_xsd.ps_contents00301m1 import (
A,
Date,
)
__all__ = [
"A",
"Date",
]
| StarcoderdataPython |
1956681 | from collections import defaultdict
import json
import os
import sys
import argparse
def format_answer_id(doc_id, pass_id, sent_id):
return f'{doc_id}-C{pass_id:03}-S{sent_id:03}'
def format_answer_span_id(doc_id, pass_id, sent_start_id, sent_end_id):
start_id = format_answer_id(doc_id, pass_id, sent_start_id)
end_id = format_answer_id(doc_id, pass_id, sent_end_id)
return f'{start_id}:{end_id}'
def write_scores(rerank_scores):
num_top = 0
rel_idx = 0
seen_docs = set()
seen_sentences = set()
while num_top < top_k and rel_idx < len(question_scores):
doc_id, pass_id, sent_start_id, sent_end_id, score = question_scores[rel_idx]
sent_ids = [(doc_id, pass_id, sent_id) for sent_id in range(sent_start_id, sent_end_id + 1)]
if (multiple_per_doc or doc_id not in seen_docs) and (
allow_overlap or all([x not in seen_sentences for x in sent_ids])):
answer_id = format_answer_span_id(doc_id, pass_id, sent_start_id, sent_end_id)
f.write(f'{question_id}\tQ0\t{answer_id}\t{num_top + 1}\t{score}\t{run_name}\n')
num_top += 1
seen_docs.add(doc_id)
for x in sent_ids:
seen_sentences.add(x)
rel_idx += 1
def read_scores(run_path):
rerank_scores = defaultdict(list)
with open(run_path) as f:
for line in f:
# {query_id}\tQ0\t{doc_pass_id}\t{rank}\t{score:.4f}\t{run_name}
line = line.strip().split()
if len(line) == 6:
question_id, _, doc_pass_sent_id, rank, score, _ = line
ids = doc_pass_sent_id.split('-')
doc_id, pass_id = ids[0], ids[1]
pass_id = int(pass_id)
if len(ids) == 3:
sent_start_id, sent_end_id = ids[2], ids[2]
elif len(ids) == 4:
sent_start_id, sent_end_id = ids[2], ids[3]
else:
sent_start_id, sent_end_id = ids[2], ids[4]
sent_start_id = int(sent_start_id)
sent_end_id = int(sent_end_id)
pass_id = int(pass_id)
score = float(score)
rerank_scores[question_id].append((doc_id, pass_id, sent_start_id, sent_end_id, score))
return rerank_scores
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-p', '--pred_path', required=True)
parser.add_argument('-o', '--output_path', required=True)
args = parser.parse_args()
# 'runs/consumer/pruned_biobert_msmarco_multi_sentence'
pred_path = args.pred_path
output_path = args.output_path
output_name = output_path.split('/')[-1].replace('.txt', '').replace('.pred', '')
rerank_scores = read_scores(pred_path)
with open(output_path, 'w') as f:
for question_id, question_scores in rerank_scores.items():
write_results(question_id, question_scores, output_name, f, top_k=1000)
| StarcoderdataPython |
11340553 | # Copyright 2017 <NAME>, <EMAIL>
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with or without fee
# is hereby granted, provided that the above copyright notice and this permission notice appear in all
# copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE
# INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE
# FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION,
# ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
import numpy as np
import pickle
import os
from scipy.sparse import csc_matrix
from py_dp.dispersion.binning import abs_vel_log_bins_low_high, make_theta_bins_linear, make_y_bins_linear
from py_dp.dispersion.binning import make_input_for_binning_v_theta_freq, binning_input_v_theta_freq_y
from py_dp.dispersion.convert_to_time_process_with_freq import get_time_dx_dy_array_with_freq
from py_dp.dispersion.trajectory_count_cython import fill_one_trajectory_sparse_with_freq_cython
from py_dp.dispersion.trajectory_count_cython import fill_one_trajectory_sparse_cython
from py_dp.dispersion.mapping import mapping_v_theta_repeat, mapping_v_theta_y
from py_dp.dispersion.convert_to_time_process_with_freq import remove_duplicate_xy
from py_dp.dispersion.average_trajectories import add_repetitions
from py_dp.dispersion.transition_matrix_fcns import normalize_columns
class TransInfoIndependentVThetaDist(object):
"""
a class for extracting binned trasition information for 2d spatial cases with independent processes
for the velocity p(v2|v1) and angle p(theta|y) in time. The angle process depends on the distance
from injection
"""
def __init__(self, input_folder, n_total_realz, mapping, map_input, average_available=True,
time_step=None, raw_folder=None, train_time=None):
"""
:param input_folder: the folder containing the particle tracking data
:param n_binning_realz: number of realizations used for binning the data
:param n_total_realz: total number of realizations
:param n_absv_class: number of velocity classes
:param n_theta_class: number of angle classes
:param n_y_class: number of classes for the transverse distance from injection
:param time_step: stencil time
:param n_slow_class: number of slow classes (refining the velocity bins for very slow velocity values)
:param max_allowed: max bins size allowed for log(v)
"""
if not time_step:
self.time_step = map_input.time_step
self.raw_folder = input_folder
if raw_folder:
self.raw_folder = raw_folder
self.input_folder = input_folder
self.n_total_realz = n_total_realz
# self.make_velocity_bins = make_1d_abs_vel_bins
self.mapping = mapping
self.init_v_class_count, self.init_theta_class_count = self.get_init_class_count(map_input)
self.average_available = average_available
self.train_time = train_time
def get_init_class_count(self, map_input):
"""
:return:
init_v_class_count: initial count of the velocity class. size (n_velocity_class,)
init_v_theta_count: initial count of the angle class. size (n_theta_class,)
"""
new_v, new_theta, new_f = remove_duplicate_xy(map_input.initial_v, map_input.initial_theta,
map_input.initial_f)
init_v_idx = self.mapping.find_1d_class_idx(np.log(new_v), map_input.v_log_edges)
# all the initial paths have zeros distance from injection
init_theta_idx = self.mapping.find_1d_class_idx(new_theta, map_input.theta_edges)
# initialize the count for each class
init_v_class_count, init_theta_class_count = np.zeros(self.mapping.n_abs_v_classes), \
np.zeros(self.mapping.n_theta_classes)
for v_idx, theta_idx in zip(init_v_idx, init_theta_idx):
init_v_class_count[v_idx] += 1
init_theta_class_count[theta_idx] += 1
return init_v_class_count, init_theta_class_count
def get_trans_matrix(self, lag, print_every = 50, verbose=True, avg_available=None):
if avg_available is None:
avg_available = self.average_available
if avg_available:
return self.get_trans_matrix_from_average(lag, print_every, verbose)
else:
raise('Average data does not exist!')
def get_trans_matrix_from_average(self, lag, print_every=50, verbose=True):
dt = self.time_step
mapping = self.mapping
if self.train_time is not None:
cut_off_train = int(self.train_time/dt)
# get the size of the transition matrices
n_v_class, n_theta_class, n_y_class = mapping.n_abs_v_classes, mapping.n_theta_classes, \
mapping.n_y_classes
# initialize the sparse transition matrices
print 'getting transition matrix from averaged data...'
i_list_v, j_list_v, val_list_v = [[] for _ in range(3)]
ij_set_v = set([])
# initialize the theta matrix, each column is the distribution of theta at a given y
theta_mat = np.zeros((n_theta_class, n_y_class))
for j in range(self.n_total_realz):
start_idx = 0
# load the polar coordinates file
data_path = os.path.join(self.input_folder, 'avg_polar_' + str(j) + '.npz')
data = np.load(data_path)
big_v, big_theta, big_f, ptr_list = data['V'], data['Theta'], data['F'], data['ptr']
for i in ptr_list:
new_v, new_theta, new_f = big_v[start_idx:i], big_theta[start_idx:i], big_f[start_idx:i]
# adding in the repetitions because y is changing
v_r = add_repetitions(new_v, new_f)
theta_r = add_repetitions(new_theta, new_f)
dy = np.multiply(v_r, np.sin(theta_r))*dt
new_y = np.hstack((0.0, np.cumsum(dy)))[:-1]
start_idx = i
if len(new_v)>lag:
# simple process for v: v1, v2, v3, ...
v_process_idx = self.mapping.find_1d_class_idx(np.log(new_v), self.mapping.v_log_edges)
if self.train_time is not None:
# filter part of the trajectory
v_process_idx = v_process_idx[:cut_off_train]
new_f = new_f[:cut_off_train]
# fill the transition matrix for this velocity series
fill_one_trajectory_sparse_with_freq_cython(lag, v_process_idx, new_f, i_list_v, j_list_v,
ij_set_v, val_list_v)
# fill the angle matrix
y_class = mapping.find_1d_class_idx(new_y, mapping.y_edges)
theta_class = mapping.find_1d_class_idx(theta_r, mapping.theta_edges)
for yi, thetai in zip(y_class, theta_class):
theta_mat[thetai, yi] += 1
theta_mat = normalize_columns(theta_mat)
print 'done.'
return csc_matrix((val_list_v, (i_list_v, j_list_v)), shape = (n_v_class, n_v_class)), theta_mat
| StarcoderdataPython |
1935579 | <gh_stars>1-10
"""
Decorators for registering tests in different dictionaries.
This simplifies running all the tests as a group, since different tests may
need to be run in different ways.
"""
from functools import wraps
WORD_TESTS = {}
WORD_SET_TESTS = {}
PARAGRAPH_TESTS = {}
def word_test(fn):
WORD_TESTS[fn.__name__] = fn
return fn
def word_set_test(fn):
WORD_SET_TESTS[fn.__name__] = fn
return fn
def paragraph_test(fn):
PARAGRAPH_TESTS[fn.__name__] = fn
return fn
| StarcoderdataPython |
4927229 | #!/usr/bin/env python
# encoding:utf8
"""
Watch_Dogs
远程监控客户端api调用
"""
import yaml
import time
import requests
from conf import setting
Setting = setting.Setting()
logger_client = Setting.logger
class Watch_Dogs_Client(object):
"""远程监控客户端"""
def __init__(self, remote_host, remote_port=8000):
"""构造函数"""
self.remote_host = remote_host
self.remote_port = remote_port
self.apt_root = "http://" + remote_host + ":" + str(remote_port) + "/"
self.watched_process_set = set([])
def __str__(self):
return "Watch_Dogs-Client @ " + self.remote_host + ":" + str(self.remote_port)
def watch_process(self, pid):
"""添加监控进程"""
self.watched_process_set.add(int(pid))
r = self.get_api("/proc/watch/add/{}".format(pid))
if type(r) == dict:
logger_client.error("add not exist process" + str(pid))
return r
elif r is True:
self.process_record_cache(pid) # 初始化一次进程数据
return True
def is_process_watched(self, pid):
if int(pid) in self.watched_process_set:
r = self.get_api("/proc/watch/is/{}".format(int(pid)))
if type(r) == dict:
logger_client.error("is_process_watched error" + str(pid) + " -> " + str(r))
return False
elif r is True:
return True
else:
return False
def remove_watched_process(self, pid):
if int(pid) in self.watched_process_set:
self.watched_process_set.remove(int(pid))
r = self.get_api("/proc/watch/remove/{}".format(int(pid)))
if type(r) == dict:
logger_client.error("remove_watched_process error" + str(pid) + " -> " + str(r))
return False
elif r is False:
return True
else:
return False
def get_api(self, url_path, payload=None, timeout=Setting.API_TIME_OUT):
"""调用远程api"""
global logger_client
request_addr = "http://" + self.remote_host + ":" + str(self.remote_port) + url_path
try:
r = requests.get(request_addr, params=payload, timeout=timeout)
except requests.exceptions.ConnectTimeout as err:
logger_client.error("time out : " + request_addr)
return {"Error": "time out -" + request_addr}
except requests.exceptions.ConnectionError as err:
logger_client.error("connect error : " + request_addr)
return {"Error": "connect error at " + request_addr}
except Exception as err:
logger_client.error("connect error : " + request_addr)
return {"Error": "unexpected error at " + request_addr + " details : " + str(err)}
return yaml.safe_load(r.text.encode("utf8"))
def is_error_happen(self, res):
"""检查是否存在错误"""
if isinstance(res, dict):
if "Error" in res:
return True
return False
# -----api-----
def root(self):
"""/"""
return self.get_api("/")
# -----process-----
def process_record_cache(self, pid):
"""进程数据"""
return self.get_api("/proc/{}".format(str(pid)))
def process_info(self, pid):
"""进程信息"""
process_info = self.get_api("/proc/{}/info".format(str(pid)))
return process_info
# -----system-----
def host_info(self):
"""远程主机信息"""
host_info_data = {}
# 收集主机数据
try:
host_root_data = self.root() # 如果出现获取异常, 则不进行下面的数据获取
if "Error" in host_root_data:
return {"Error": "collect system info error (first) : " + str(host_root_data["Error"])}
host_info_data.update(host_root_data)
host_info_data.update(self.sys_info())
host_info_data["CPU_info"] = self.sys_cpu_info()
host_info_data["mem_KB"] = self.sys_mem_size()
host_info_data.update(self.sys_net_ip())
host_info_data["disk_stat"] = self.sys_disk_stat()
host_info_data["default_net_device"] = self.sys_net_default_device()
except Exception as err:
logger_client.error("collect system info error : " + str(err))
return {"Error": "collect system info error : " + str(err)}
# 删除不必要的数据
if "nethogs env" in host_info_data:
host_info_data.pop("nethogs env")
if "time" in host_info_data:
host_info_data.pop("time")
# 添加连接数据
host_info_data["host"] = self.remote_host
host_info_data["port"] = self.remote_port
return host_info_data
def host_record(self):
"""远程主机情况记录"""
host_record_data = {}
# 收集主机记录
try:
host_record_data["CPU"] = self.sys_cpu_percent()
host_record_data["CPUs"] = self.sys_cpu_percents()
host_record_data["mem"] = self.sys_mem_percent()
host_record_data["read_MBs"], host_record_data["write_BMs"] = self.sys_io()
host_record_data["net_upload_kbps"], host_record_data["net_download_kbps"] = self.sys_net()
host_record_data.update(self.sys_loadavg())
host_record_data.update(self.sys_uptime())
except Exception as err:
logger_client.error("collect system info error : " + str(err))
return {"Error": "collect system info error : " + str(err)}
# 添加连接数据
host_record_data["host"] = self.remote_host
return host_record_data
def sys_info(self):
return self.get_api("/sys/info")
def sys_loadavg(self):
return self.get_api("/sys/loadavg")
def sys_uptime(self):
return self.get_api("/sys/uptime")
def sys_cpu_info(self):
return self.get_api("/sys/cpu/info")
def sys_cpu_percent(self):
return self.get_api("/sys/cpu/percent")
def sys_cpu_percents(self):
return self.get_api("/sys/cpu/percents")
def sys_mem_info(self):
return self.get_api("/sys/mem/info")
def sys_mem_size(self):
return self.get_api("/sys/mem/size")
def sys_mem_percent(self):
return self.get_api("/sys/mem/percent")
def sys_net_devices(self):
return self.get_api("/sys/net/devices")
def sys_net_default_device(self):
return self.get_api("/sys/net/default_device")
def sys_net_ip(self):
return self.get_api("/sys/net/ip")
def sys_net(self):
return self.get_api("/sys/net")
def sys_io(self):
return self.get_api("/sys/io")
def sys_disk_stat(self):
return self.get_api("/sys/disk/stat")
# -----log-----
def get_log_exist(self, path):
"""检测日志文件是否存在"""
payload = {"path": path}
return self.get_api("/log/exist", payload=payload)
def get_log_size(self, path):
"""获取日志文件大小"""
payload = {"path": path}
return self.get_api("/log/size", payload=payload)
def get_log_head(self, path, n=100):
"""获取日志前n行"""
payload = {"path": path, "n": n}
return self.get_api("/log/head", payload=payload)
def get_log_tail(self, path, n=100):
"""获取日志后n行"""
payload = {"path": path, "n": n}
return self.get_api("/log/tail", payload=payload)
def get_log_last_update_time(self, path, n=100):
"""获取日志上次更新时间"""
payload = {"path": path, "n": n}
return self.get_api("/log/last_update_time", payload=payload)
def get_keyword_lines(self, path, key_word):
"""搜索日志关键词"""
payload = {"path": path, "key_word": key_word}
return self.get_api("/log/keyword_lines", payload=payload)
def get_keyword_lines_by_tail_n_line(self, path, key_word, n=100):
"""获取最后n行中包含关键词key_word的行"""
payload = {"path": path, "n": n}
return filter(lambda s: s.find(key_word) != -1, self.get_api("/log/tail", payload=payload))
# -----manage-----
# /proc/all_pid_name/
def get_all_proc_with_name(self):
"""搜索日志关键词"""
return self.get_api("/proc/all_pid_name/")
# 有部分API功能尚未用到或改用SSH方式
# 详见 : https://github.com/Watch-Dogs-HIT/Watch_Dogs-Client
| StarcoderdataPython |
3440853 | from vit.formatter.urgency import Urgency
class UrgencyReal(Urgency):
pass
| StarcoderdataPython |
156719 | <filename>JugandoCodewars/RomanNumeralsHelper.py
# Create a RomanNumerals class that can convert a roman numeral to and from an integer value.
# It should follow the API demonstrated in the examples below.
# Multiple roman numeral values will be tested for each helper method.
# Modern Roman numerals are written by expressing each digit separately starting with
# the left most digit and skipping any digit with a value of zero. In Roman numerals
# 1990 is rendered: 1000=M, 900=CM, 90=XC; resulting in MCMXC.
# 2008 is written as 2000=MM, 8=VIII; or MMVIII.
# 1666 uses each Roman symbol in descending order: MDCLXVI.
# Examples
# RomanNumerals.to_roman(1000) # should return 'M'
# RomanNumerals.from_roman('M') # should return 1000
# Help
# | Symbol | Value | |----------------| | I | 1 | | V | 5 | | X | 10 | | L | 50 | | C | 100 | | D | 500 | | M | 1000 |
| StarcoderdataPython |
4947890 | import typing
from abc import ABC, abstractmethod
from tottle.types.methods import *
from tottle.types.methods import chat
if typing.TYPE_CHECKING:
from tottle.api import API
class APICategories(ABC):
@property
def chats(self) -> chat.ChatCategory:
return chat.ChatCategory(self.api_instance)
@property
def messages(self) -> message.MessageCategory:
return message.MessageCategory(self.api_instance)
@property
def users(self) -> user.UserCategory:
return user.UserCategory(self.api_instance)
@property
@abstractmethod
def api_instance(self) -> "API":
pass
| StarcoderdataPython |
396206 | #!/usr/bin/env python
# encoding: utf-8
'''
asreml.Gmatrix -- shortdesc
asreml.Gmatrix is a description
It defines classes_and_methods
@author: user_name
@copyright: 2020 organization_name. All rights reserved.
@license: license
@contact: user_email
@deffield updated: Updated
'''
import sys
import os
import time
import numpy as np
import array as arr
import pandas as pd
import Utils
from optparse import OptionParser
from logging_utils import setup_logging_to_file, log_exception, log_info,log_warn
__all__ = []
__version__ = 0.1
__date__ = '2020-06-02'
__updated__ = '2020-06-02'
DEBUG = 1
TESTRUN = 0
PROFILE = 0
def getMAFvalue(arr, idx):
if arr == [0,0]:
return 0 if idx == 1 else 2
elif arr == [1,1]:
return 2 if idx == 1 else 0
elif arr == [0,1] or arr == [1,0]:
return 1
else:
return ''
def mafMatrix(infile, outdir, df):
log_info('MAF Matrix')
log_info('Calculate frequencies')
df_freq = pd.DataFrame(columns=['0', '1'], index=df.index)
try:
for index, row in df.iterrows():
freq = [0,0]
count = 0
for val in row.values:
count_0 = np.count_nonzero(np.asarray(val, dtype='i1') == 0)
count_1 = np.count_nonzero(np.asarray(val, dtype='i1') == 1)
count_neg = np.count_nonzero(np.asarray(val, dtype='i1') == -1)
if count_neg > 0:
continue
else:
freq[0] += count_0
freq[1] += count_1
count += 2
df_freq.loc[index] = [freq[0]/count, freq[1]/count]
# gt_freq = gt.count_alleles().to_frequencies()
log_info('Write frequencies')
Utils.saveFile(df_freq, os.path.join(outdir, "freq.alleles.txt"), index=True)
except Exception as e:
log_warn(e)
log_info('Construct MAF matrix')
try:
vector_maf = pd.DataFrame(columns=['MAF'], index=df.index)
for index, row in df.iterrows():
arr = df_freq.loc[index].values
idx = np.argmin(arr)
# idx = np.where(arr == np.amin(arr))
vector_maf.loc[index] = arr[idx]
df.loc[index] = df.loc[index].apply(lambda x: getMAFvalue(x, idx))
log_info('Write MAF matrix')
df = df.transpose()
Utils.saveFile(df, os.path.join(outdir, "matrix.maf.txt"), index=True)
log_info('Write MAF frequencies')
df_maf = pd.DataFrame(columns=df.columns, index=df.index)
for index, row in df.iterrows():
df_maf.loc[index] = list(vector_maf['MAF'].values)
Utils.saveFile(vector_maf, os.path.join(outdir, "freq.maf.txt"), index=True)
Utils.saveFile(df_maf, os.path.join(outdir, "matrix.P.txt"), index=True)
log_info("End MAF")
return df
except Exception as e:
log_warn(e)
def codeGenotype(code):
try:
if code == 1:
return [0, 0]
elif code == 2:
return [0, 1]
elif code == 3:
return [1, 0]
elif code == 4:
return [1, 1]
else:
return [-1,-1]
except Exception as e:
log_warn(e)
def loadrQTL(infile):
log_info('Load rQTL')
files = infile.split(',')
try:
df_final = None
for f in files:
df = Utils.loadFile(f)
df = df.set_index('ID', drop=True)
df = df.iloc[4:]
df = df.astype(dtype='int32')
df = df.applymap(lambda x: codeGenotype(x))
df = df.transpose()
if df_final is None:
df_final = df
else:
df_final = pd.concat([df_final, df], axis=1, sort=False)
return df_final
except Exception as e:
log_exception(e)
def calculateSimilarity(lst0, lst1):
try:
count = 0
for i in range(len(lst0)):
count += 1 if lst0[i] == lst1[i] else 0
return count / len(lst0)
except Exception as e:
log_warn(e)
return 0
def tabulate(df_maf, outdir):
log_info('Construct Relationship Matrix using Tabulation method')
df_grm = pd.DataFrame(columns=df_maf.index, index=df_maf.index)
try:
for i in range(len(df_maf.index)):
lst_i = df_maf.iloc[i].to_list();
for j in range(i, len(df_maf.index)):
lst_j = df_maf.iloc[j].to_list()
rel = calculateSimilarity(lst_i, lst_j)
df_grm.iloc[i, j] = rel
df_grm.iloc[j, i] = rel
Utils.saveFile(df_grm, os.path.join(outdir, "grm.tabular.txt"), index=True)
log_info('End Relationship Matrix')
except Exception as e:
log_warn(e)
def main(argv=None):
'''Command line options.'''
program_name = os.path.basename(sys.argv[0])
program_version = "v0.1"
program_build_date = "%s" % __updated__
program_version_string = '%%prog %s (%s)' % (program_version, program_build_date)
#program_usage = '''usage: spam two eggs''' # optional - will be autogenerated by optparse
program_longdesc = '''''' # optional - give further explanation about what the program does
program_license = "Copyright 2020 user_name (organization_name) \
Licensed under the Apache License 2.0\nhttp://www.apache.org/licenses/LICENSE-2.0"
if argv is None:
argv = sys.argv[1:]
try:
# setup option parser
parser = OptionParser(version=program_version_string, epilog=program_longdesc, description=program_license)
parser.add_option("-i", "--in", dest="infile", help="set input path [default: %default]", metavar="FILE")
parser.add_option("-o", "--out", dest="outdir", help="set output path [default: %default]", metavar="FILE")
# set defaults
parser.set_defaults(outfile="./out.txt", infile="./in.txt")
# process options
(opts, args) = parser.parse_args(argv)
# MAIN BODY #
if not os.path.exists(opts.outdir):
os.makedirs(opts.outdir)
log_file = os.path.join(opts.outdir, 'log.'+ time.strftime("%Y-%m-%d") + '.txt')
setup_logging_to_file(log_file)
# test_data = pd.DataFrame([[[0, 0], [1, 1], [0, 0]],
# [[0, 0], [1, 1], [0, 0]],
# [[1, 1], [0, 0], [1, 1]],
# [[0, 0], [0, 1], [-1, -1]]])
# test_gt = allel.GenotypeArray([[[0, 0], [1, 1], [0, 0]],
# [[0, 0], [1, 1], [0, 0]],
# [[1, 1], [0, 0], [1, 1]],
# [[0, 0], [0, 1], [-1, -1]]], dtype='i1')
df = loadrQTL(opts.infile);
matrix_maf = mafMatrix(opts.infile, opts.outdir, df)
tabulate(matrix_maf, opts.outdir)
log_info("Exit program")
except Exception as e:
indent = len(program_name) * " "
sys.stderr.write(program_name + ": " + repr(e) + "\n")
sys.stderr.write(indent + " for help use --help")
return 2
if __name__ == "__main__":
# if DEBUG:
# sys.argv.append("-h")
if TESTRUN:
import doctest
doctest.testmod()
if PROFILE:
import cProfile
import pstats
profile_filename = 'asreml.Gmatrix_profile.txt'
cProfile.run('main()', profile_filename)
statsfile = open("profile_stats.txt", "wb")
p = pstats.Stats(profile_filename, stream=statsfile)
stats = p.strip_dirs().sort_stats('cumulative')
stats.print_stats()
statsfile.close()
sys.exit(0)
sys.exit(main()) | StarcoderdataPython |
8134565 | <gh_stars>100-1000
__author__ = '<NAME>'
| StarcoderdataPython |
8199846 | import typing as tp
import uvicorn
from fastapi import Depends, FastAPI, HTTPException, status
from fastapi.middleware.cors import CORSMiddleware
from loguru import logger
from _logging import CONSOLE_LOGGING_CONFIG, FILE_LOGGING_CONFIG
from dependencies import (
get_employee_by_card_id,
get_revision_pending_units,
get_schema_by_id,
get_unit_by_internal_id,
identify_sender,
)
from feecc_workbench import models as mdl
from feecc_workbench.Employee import Employee
from feecc_workbench.Unit import Unit
from feecc_workbench.WorkBench import WorkBench
from feecc_workbench.database import MongoDbWrapper
from feecc_workbench.exceptions import EmployeeNotFoundError, StateForbiddenError, UnitNotFoundError
from feecc_workbench.states import State
# apply logging configuration
logger.configure(handlers=[CONSOLE_LOGGING_CONFIG, FILE_LOGGING_CONFIG])
# global variables
app = FastAPI(title="Feecc Workbench daemon")
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
WORKBENCH = WorkBench()
@app.on_event("startup")
def startup_event() -> None:
MongoDbWrapper()
@app.post("/unit/new/{schema_id}", response_model=tp.Union[mdl.UnitOut, mdl.GenericResponse], tags=["unit"]) # type: ignore
async def create_unit(
schema: mdl.ProductionSchema = Depends(get_schema_by_id),
) -> tp.Union[mdl.UnitOut, mdl.GenericResponse]:
"""handle new Unit creation"""
try:
unit: Unit = await WORKBENCH.create_new_unit(schema)
logger.info(f"Initialized new unit with internal ID {unit.internal_id}")
return mdl.UnitOut(
status_code=status.HTTP_200_OK,
detail="New unit created successfully",
unit_internal_id=unit.internal_id,
)
except Exception as e:
logger.error(f"Exception occurred while creating new Unit: {e}")
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e))
@app.get("/unit/{unit_internal_id}/info", response_model=mdl.UnitInfo, tags=["unit"])
def get_unit_data(unit: Unit = Depends(get_unit_by_internal_id)) -> mdl.UnitInfo:
"""return data for a Unit with matching ID"""
return mdl.UnitInfo(
status_code=status.HTTP_200_OK,
detail="Unit data retrieved successfully",
unit_internal_id=unit.internal_id,
unit_status=unit.status.value,
unit_biography_completed=[
mdl.BiographyStage(
stage_name=stage.name,
stage_schema_entry_id=stage.schema_stage_id,
)
for stage in unit.biography
if stage.completed
],
unit_biography_pending=[
mdl.BiographyStage(
stage_name=stage.name,
stage_schema_entry_id=stage.schema_stage_id,
)
for stage in unit.biography
if not stage.completed
],
unit_components=unit.components_schema_ids or None,
schema_id=unit.schema.schema_id,
)
@app.get("/unit/pending_revision", response_model=mdl.UnitOutPending, tags=["unit"])
def get_revision_pending(units: tp.List[Unit] = Depends(get_revision_pending_units)) -> mdl.UnitOutPending:
"""return all units staged for revision"""
return mdl.UnitOutPending(
status_code=status.HTTP_200_OK,
detail=f"{len(units)} units awaiting revision.",
units=[
mdl.UnitOutPendingEntry(unit_internal_id=unit.internal_id, unit_name=unit.schema.unit_name)
for unit in units
],
)
@app.post("/unit/upload", response_model=mdl.GenericResponse, tags=["unit"])
async def unit_upload_record() -> mdl.GenericResponse:
"""handle Unit lifecycle end"""
try:
if WORKBENCH.employee is None:
raise StateForbiddenError("Employee is not authorized on the workbench")
await WORKBENCH.upload_unit_passport()
return mdl.GenericResponse(
status_code=status.HTTP_200_OK, detail=f"Uploaded data for unit {WORKBENCH.unit.internal_id}"
)
except Exception as e:
message: str = f"Can't handle unit upload. An error occurred: {e}"
logger.error(message)
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=message)
@app.post("/unit/assign-component/{unit_internal_id}", response_model=mdl.GenericResponse, tags=["unit"])
async def assign_component(unit: Unit = Depends(get_unit_by_internal_id)) -> mdl.GenericResponse:
"""assign a unit as a component to the composite unit"""
if WORKBENCH.state != State.GATHER_COMPONENTS_STATE:
raise HTTPException(
status_code=status.HTTP_403_FORBIDDEN,
detail="Component assignment can only be done while the workbench is in state 'GatherComponents'",
)
try:
await WORKBENCH.assign_component_to_unit(unit)
return mdl.GenericResponse(status_code=status.HTTP_200_OK, detail="Component has been assigned")
except Exception as e:
message: str = f"An error occurred during component assignment: {e}"
logger.error(message)
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=message)
@app.post("/employee/info", response_model=mdl.EmployeeOut, tags=["employee"])
def get_employee_data(employee: mdl.EmployeeWCardModel = Depends(get_employee_by_card_id)) -> mdl.EmployeeOut:
"""return data for an Employee with matching ID card"""
return mdl.EmployeeOut(
status_code=status.HTTP_200_OK, detail="Employee retrieved successfully", employee_data=employee
)
@app.post("/employee/log-in", response_model=tp.Union[mdl.EmployeeOut, mdl.GenericResponse], tags=["employee"]) # type: ignore
def log_in_employee(
employee: mdl.EmployeeWCardModel = Depends(get_employee_by_card_id),
) -> tp.Union[mdl.EmployeeOut, mdl.GenericResponse]:
"""handle logging in the Employee at a given Workbench"""
try:
WORKBENCH.log_in(Employee(rfid_card_id=employee.rfid_card_id, name=employee.name, position=employee.position))
return mdl.EmployeeOut(
status_code=status.HTTP_200_OK, detail="Employee logged in successfully", employee_data=employee
)
except StateForbiddenError as e:
return mdl.GenericResponse(status_code=status.HTTP_403_FORBIDDEN, detail=str(e))
@app.post("/employee/log-out", response_model=mdl.GenericResponse, tags=["employee"])
def log_out_employee() -> mdl.GenericResponse:
"""handle logging out the Employee at a given Workbench"""
try:
WORKBENCH.log_out()
if WORKBENCH.employee is not None:
raise ValueError("Unable to logout employee")
return mdl.GenericResponse(status_code=status.HTTP_200_OK, detail="Employee logged out successfully")
except Exception as e:
message: str = f"An error occurred while logging out the Employee: {e}"
logger.error(message)
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=message)
@app.get("/workbench/status", response_model=mdl.WorkbenchOut, tags=["workbench"])
def get_workbench_status() -> mdl.WorkbenchOut:
"""handle providing status of the given Workbench"""
unit = WORKBENCH.unit
return mdl.WorkbenchOut(
state=WORKBENCH.state.value,
employee_logged_in=bool(WORKBENCH.employee),
employee=WORKBENCH.employee.data if WORKBENCH.employee else None,
operation_ongoing=WORKBENCH.state.value == State.PRODUCTION_STAGE_ONGOING_STATE.value,
unit_internal_id=unit.internal_id if unit else None,
unit_status=unit.status.value if unit else None,
unit_biography=[stage.name for stage in unit.biography] if unit else None,
unit_components=unit.assigned_components() if unit else None,
)
@app.post("/workbench/assign-unit/{unit_internal_id}", response_model=mdl.GenericResponse, tags=["workbench"])
def assign_unit(unit: Unit = Depends(get_unit_by_internal_id)) -> mdl.GenericResponse:
"""assign the provided unit to the workbench"""
try:
WORKBENCH.assign_unit(unit)
return mdl.GenericResponse(status_code=status.HTTP_200_OK, detail=f"Unit {unit.internal_id} has been assigned")
except Exception as e:
message: str = f"An error occurred during unit assignment: {e}"
logger.error(message)
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=message)
@app.post("/workbench/remove-unit", response_model=mdl.GenericResponse, tags=["workbench"])
def remove_unit() -> mdl.GenericResponse:
"""remove the unit from the workbench"""
try:
WORKBENCH.remove_unit()
return mdl.GenericResponse(status_code=status.HTTP_200_OK, detail="Unit has been removed")
except Exception as e:
message: str = f"An error occurred during unit removal: {e}"
logger.error(message)
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=message)
@app.post("/workbench/start-operation", response_model=mdl.GenericResponse, tags=["workbench"])
async def start_operation(workbench_details: mdl.WorkbenchExtraDetails) -> mdl.GenericResponse:
"""handle start recording operation on a Unit"""
try:
await WORKBENCH.start_operation(workbench_details.additional_info)
unit = WORKBENCH.unit
message: str = f"Started operation '{unit.next_pending_operation.name}' on Unit {unit.internal_id}"
logger.info(message)
return mdl.GenericResponse(status_code=status.HTTP_200_OK, detail=message)
except Exception as e:
message = f"Couldn't handle request. An error occurred: {e}"
logger.error(message)
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=message)
@app.post("/workbench/end-operation", response_model=mdl.GenericResponse, tags=["workbench"])
async def end_operation(workbench_data: mdl.WorkbenchExtraDetailsWithoutStage) -> mdl.GenericResponse:
"""handle end recording operation on a Unit"""
try:
await WORKBENCH.end_operation(workbench_data.additional_info, workbench_data.premature_ending)
unit = WORKBENCH.unit
message: str = f"Ended current operation on unit {unit.internal_id}"
logger.info(message)
return mdl.GenericResponse(status_code=status.HTTP_200_OK, detail=message)
except Exception as e:
message = f"Couldn't handle end record request. An error occurred: {e}"
logger.error(message)
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=message)
@app.get("/workbench/production-schemas/names", response_model=mdl.SchemasList, tags=["workbench"])
async def get_schemas() -> mdl.SchemasList:
"""get all available schemas"""
all_schemas = {schema.schema_id: schema for schema in await MongoDbWrapper().get_all_schemas()}
handled_schemas = set()
def get_schema_list_entry(schema: mdl.ProductionSchema) -> mdl.SchemaListEntry:
nonlocal all_schemas, handled_schemas
included_schemas: tp.Optional[tp.List[mdl.SchemaListEntry]] = (
[get_schema_list_entry(all_schemas[s_id]) for s_id in schema.required_components_schema_ids]
if schema.is_composite
else None
)
handled_schemas.add(schema.schema_id)
return mdl.SchemaListEntry(
schema_id=schema.schema_id,
schema_name=schema.unit_name,
included_schemas=included_schemas,
)
available_schemas = [
get_schema_list_entry(schema)
for schema in sorted(all_schemas.values(), key=lambda s: bool(s.is_composite), reverse=True)
if schema.schema_id not in handled_schemas
]
return mdl.SchemasList(
status_code=status.HTTP_200_OK,
detail=f"Gathered {len(all_schemas)} schemas",
available_schemas=available_schemas,
)
@app.get(
"/workbench/production-schemas/{schema_id}",
response_model=tp.Union[mdl.ProductionSchemaResponse, mdl.GenericResponse], # type: ignore
tags=["workbench"],
)
async def get_schema(
schema: mdl.ProductionSchema = Depends(get_schema_by_id),
) -> tp.Union[mdl.ProductionSchemaResponse, mdl.GenericResponse]:
"""get schema by it's ID"""
return mdl.ProductionSchemaResponse(
status_code=status.HTTP_200_OK,
detail=f"Found schema {schema.schema_id}",
production_schema=schema,
)
@app.post("/workbench/hid-event", response_model=mdl.GenericResponse, tags=["workbench"])
async def handle_hid_event(event: mdl.HidEvent = Depends(identify_sender)) -> mdl.GenericResponse:
"""Parse the event dict JSON"""
logger.debug(f"Received event dict:\n{event.json()}")
try:
if event.name == "rfid_reader":
logger.debug(f"Handling RFID event. String: {event.string}")
if WORKBENCH.employee is not None:
WORKBENCH.log_out()
else:
try:
employee: Employee = await MongoDbWrapper().get_employee_by_card_id(event.string)
except EmployeeNotFoundError as e:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e))
WORKBENCH.log_in(employee)
elif event.name == "barcode_reader":
logger.debug(f"Handling barcode event. String: {event.string}")
if WORKBENCH.state == State.PRODUCTION_STAGE_ONGOING_STATE:
await WORKBENCH.end_operation()
else:
try:
unit = await get_unit_by_internal_id(event.string)
except UnitNotFoundError as e:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail=str(e))
if WORKBENCH.state == State.AUTHORIZED_IDLING_STATE:
WORKBENCH.assign_unit(unit)
elif WORKBENCH.state == State.UNIT_ASSIGNED_IDLING_STATE:
WORKBENCH.remove_unit()
WORKBENCH.assign_unit(unit)
elif WORKBENCH.state == State.GATHER_COMPONENTS_STATE:
await WORKBENCH.assign_component_to_unit(unit)
else:
logger.error(f"Received input {event.string}. Ignoring event since no one is authorized.")
return mdl.GenericResponse(status_code=status.HTTP_200_OK, detail="Hid event has been handled as expected")
except StateForbiddenError as e:
logger.error(e)
return mdl.GenericResponse(status_code=status.HTTP_403_FORBIDDEN, detail=str(e))
except Exception as e:
logger.error(e)
return mdl.GenericResponse(status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, detail=str(e))
if __name__ == "__main__":
uvicorn.run("app:app", port=5000)
| StarcoderdataPython |
6638794 | <filename>reviewboard/reviews/evolutions/file_attachment_comment_diff_id.py
from django_evolution.mutations import AddField
from django.db import models
MUTATIONS = [
AddField('FileAttachmentComment', 'diff_against_file_attachment',
models.ForeignKey, null=True,
related_model='attachments.FileAttachment')
]
| StarcoderdataPython |
6507759 | <gh_stars>0
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
sys.path.append("./src")
from setup_snicar import *
from classes import *
from column_OPs import *
from biooptical_funcs import *
from toon_rt_solver import toon_solver
from adding_doubling_solver import adding_doubling_solver
from validate_inputs import *
from display import *
# define input file
input_file = "./src/inputs.yaml"
###################
# BIO-OPTICAL MODEL
###################
# optionally run the bio-optical model to add new impurity optical properties to
# the BioSNICAR database. Commewnted out by default as we expect our default lap
# database to be sufficient for most users.
#run_biooptical_model(input_file)
###########################
# RADIATIVE TRANSFER MODEL
###########################
# first build classes from config file and validate their contents
(
ice,
illumination,
rt_config,
model_config,
plot_config,
impurities,
) = setup_snicar(input_file)
# validate inputs to ensure no invalid combinations have been chosen
status = validate_inputs(ice, rt_config, model_config, illumination, impurities)
# now get the optical properties of the ice column
ssa_snw, g_snw, mac_snw = get_layer_OPs(ice, model_config)
tau, ssa, g, L_snw = mix_in_impurities(
ssa_snw, g_snw, mac_snw, ice, impurities, model_config
)
# now run one or both of the radiative transfer solvers
outputs1 = adding_doubling_solver(tau, ssa, g, L_snw, ice, illumination, model_config)
outputs2 = toon_solver(tau, ssa, g, L_snw, ice, illumination, model_config, rt_config)
# plot and print output data
plot_albedo(plot_config, model_config, outputs1.albedo)
display_out_data(outputs1)
| StarcoderdataPython |
11282993 | <filename>jack/readers/knowledge_base_population/shared.py
import tensorflow as tf
from jack.core import TensorPort
class KBPPorts:
triple_logits = TensorPort(tf.float32, [None, None], "triple_logits",
"Represents output scores for each candidate", "[batch_size]")
| StarcoderdataPython |
9657364 | """Let users access their own personal Spotify account."""
__requires__ = ['plumeria.core.oauth']
import random
from plumeria import config
from plumeria.command import commands, CommandError
from plumeria.message.lists import build_list
from plumeria.core.oauth import oauth_manager, catch_token_expiration
from plumeria.perms import direct_only
from plumeria.plugin import PluginSetupError
from plumeria.transport import User
from plumeria.util import http
from plumeria.util.ratelimit import rate_limit
from plumeria.util.string import get_best_matching
client_id = config.create("spotify", "client_id",
fallback="",
comment="A Spotify OAuth client ID")
client_secret = config.create("spotify", "client_secret",
fallback="",
comment="A Spotify OAuth client secret")
spotify_endpoint = oauth_manager.create_oauth2(
name="spotify",
client_id=client_id,
client_secret=client_secret,
auth_url="https://accounts.spotify.com/authorize",
token_url="https://accounts.spotify.com/api/token",
requested_scopes=('playlist-read-private', 'playlist-read-collaborative',
'playlist-modify-public', 'playlist-modify-private',
'user-follow-modify', 'user-follow-read',
'user-library-read', 'user-library-modify',
'user-top-read'))
@catch_token_expiration(spotify_endpoint)
async def fetch_api(user: User, *args, **kwargs):
if 'headers' not in kwargs:
kwargs['headers'] = []
kwargs['headers'].append(('Authorization', await spotify_endpoint.get_auth_header(user)))
r = await http.get(*args, **kwargs)
data = r.json()
if 'error' in data:
raise CommandError(data['message'])
return data
async def fetch_paged_list(*args, limit=20, max_page_count=1, **kwargs):
base_params = dict(kwargs['params'] if 'params' in kwargs else [])
base_params['limit'] = limit
items = []
offset = 0
for i in range(max_page_count):
kwargs['params'] = dict(base_params)
kwargs['params']['offset'] = offset
data = await fetch_api(*args, **kwargs)
items += data['items']
if data['next'] is None:
break
else:
offset = data['offset'] + data['limit']
return items
@commands.create("spotify playlists", category="Music")
@direct_only
@rate_limit()
async def playlists(message):
"""
Get a list of your public Spotify playlists.
Example::
/spotify playlists
"""
data = await fetch_api(message.author, "https://api.spotify.com/v1/me/playlists", params=[
('limit', '50'),
])
public_playlists = [e for e in data['items'] if e['public']]
if not len(public_playlists):
raise CommandError("You have no public playlists.")
return build_list(["**{name}** - <{url}>".format(
name=e['name'],
url=e['external_urls']['spotify'],
) for e in public_playlists])
@commands.create("spotify pick", category="Music")
@direct_only
@rate_limit()
async def pick_song(message):
"""
Pick 5 random songs from one of your playlists searched by name.
Example::
/spotify pick classic rock 2010
The command will only check your first 100 playlists to see if they match
your query, and then the command will only pick a random song from the first
200 songs on the playlist.
"""
query = message.content.strip()
if not len(query):
raise CommandError("Supply something to search for in playlist names.")
playlists = await fetch_paged_list(message.author, "https://api.spotify.com/v1/me/playlists", limit=50,
max_page_count=2)
if not len(playlists):
raise CommandError("You have no playlists.")
best_playlists = get_best_matching(playlists, query, key=lambda item: item['name'])
if not len(best_playlists):
raise CommandError("No playlists (out of your first 100) matched your query.")
owner_id = best_playlists[0]['owner']['id']
tracks = await fetch_paged_list(message.author,
"https://api.spotify.com/v1/users/{}/playlists/{}/tracks".format(
owner_id, best_playlists[0]['id']
),
limit=100, max_page_count=2)
if not len(tracks):
raise CommandError("The playlist '{}' has no tracks.".format(best_playlists[0]['name']))
random.shuffle(tracks)
return build_list(["**{artist} - {name}** - <{url}>".format(
artist=e['track']['artists'][0]['name'],
name=e['track']['name'],
url=e['track']['external_urls']['spotify'] if 'spotify' in e['track']['external_urls'] else "local track",
) for e in tracks[:5]])
def setup():
config.add(client_id)
config.add(client_secret)
if not client_id() or not client_secret():
raise PluginSetupError("This plugin requires a client ID and client secret from Spotify. Registration is free. "
"Create an application on https://developer.spotify.com/my-applications/ to get "
"an ID and secret.")
oauth_manager.add(spotify_endpoint)
commands.add(playlists)
commands.add(pick_song)
| StarcoderdataPython |
6503041 | import cv2
from utils import get_3d_sample, get_2d_sample, BGR_2_gray, get_2d_neighbor
import numpy as np
import cupy as cp
import matplotlib.pyplot as plt
def ncc(X, Y):
if isinstance(X, np.ndarray):
X = cp.array(X)
if isinstance(Y, np.ndarray):
Y = cp.array(Y)
n = int(np.prod(X.shape))
# mu_X, mu_Y = cp.average(X), cp.average(Y)
# sigma_X, sigma_Y = cp.power(cp.var(X), 0.5), cp.power(cp.var(Y), 0.5)
# X = (X - mu_X) / sigma_X
# Y = (Y - mu_Y) / sigma_Y
# NCC = cp.sum(cp.multiply((X - mu_X), (Y - mu_Y)) / (sigma_X * sigma_Y)) / (n - 1)
NCC = (cp.sum(X * Y) - (cp.sum(X)*cp.sum(Y) / n)) / cp.power((cp.sum(X * X) - cp.sum(X)**2 / n) * (cp.sum(Y * Y) - cp.sum(Y)**2 / n), 0.5)
return NCC
def ncc_field(neigbor, label, n, M):
L = M - n + 1
ans = np.zeros(shape=(L, L))
for i in range(L):
for j in range(L):
ans[i, j] = ncc(sample_neigbor[i:i + n, j:j+n], sample_label)
return ans
coord_X, coord_Y = 1000, 1000
window_sz = 32
neighbor_sz = 64
input_img1 = cv2.imread('../images/1.png')
input_img2 = cv2.imread('../images/2.png')
sample_neigbor = get_2d_neighbor(BGR_2_gray(input_img1), coord_X, coord_X, window_sz, neighbor_sz )
sample_label = get_2d_sample(BGR_2_gray(input_img2), coord_X, coord_X, window_sz)
ans = ncc_field(sample_neigbor, sample_label, window_sz, neighbor_sz)
print(ans)
plt.imshow(sample_label)
plt.show()
plt.imshow(sample_neigbor)
plt.show()
plt.imshow(ans)
plt.show()
print('finish') | StarcoderdataPython |
6615057 | from typing import Dict, List, Optional, Set
from omnilingual import LanguageCode
from pydantic import BaseModel
class SourceWord(BaseModel):
language: LanguageCode
word: Optional[str]
full: bool
tags: Set[str] = set()
class Sense(BaseModel):
definitions: Dict[LanguageCode, List[str]]
tags: Set[str] = set()
information: List[str] = []
references: List[str] = []
antonyms: List[str] = []
synonyms: List[str] = []
source_language_words: List[SourceWord] = []
def to_bson(self):
return {
"definitions": {
language.value: definitions
for language, definitions in self.definitions.items()
},
"tags": list(self.tags),
"information": self.information,
"references": self.references,
"antonyms": self.antonyms,
"synonyms": self.synonyms,
"source_language_words": self.source_language_words,
}
| StarcoderdataPython |
1848781 | <reponame>arthurguerra/cursoemvideo-python<filename>exercises/CursoemVideo/ex113.py
def leiaInt(msg):
while True:
n = str(input(msg))
try:
int(n)
break
except Exception as erro:
print('\033[1;31mErro: por favor, digite um número inteiro válido.\033[m')
return int(n)
def leiaFloat(msg):
while True:
n = str(input(msg))
if n.strip() == '':
n = 0
try:
float(n)
break
except:
print('ERRO: por favor, digite um número real válido.')
return float(n)
inteiro = leiaInt('Digite um número Inteiro: ')
real = leiaFloat('Didige um número Real:')
print(f'O valor inteiro digitado foi {inteiro} e o real foi {real}') | StarcoderdataPython |
3597067 | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'pin-generator.ui'
#
# Created: Mon Jul 12 16:10:58 2010
# by: PyQt4 UI code generator 4.7.2
#
# WARNING! All changes made in this file will be lost!
from PyQt4 import QtCore, QtGui
class Ui_PinGenerator(object):
def setupUi(self, PinGenerator):
PinGenerator.setObjectName("PinGenerator")
PinGenerator.resize(925, 602)
self.centralwidget = QtGui.QWidget(PinGenerator)
self.centralwidget.setObjectName("centralwidget")
self.textEdit = QtGui.QTextEdit(self.centralwidget)
self.textEdit.setGeometry(QtCore.QRect(20, 60, 301, 511))
self.textEdit.setObjectName("textEdit")
self.formLayoutWidget = QtGui.QWidget(self.centralwidget)
self.formLayoutWidget.setGeometry(QtCore.QRect(330, 240, 194, 181))
self.formLayoutWidget.setObjectName("formLayoutWidget")
self.formLayout = QtGui.QFormLayout(self.formLayoutWidget)
self.formLayout.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout.setObjectName("formLayout")
self.label = QtGui.QLabel(self.formLayoutWidget)
self.label.setObjectName("label")
self.formLayout.setWidget(0, QtGui.QFormLayout.LabelRole, self.label)
self.number_pins = QtGui.QLineEdit(self.formLayoutWidget)
self.number_pins.setObjectName("number_pins")
self.formLayout.setWidget(0, QtGui.QFormLayout.FieldRole, self.number_pins)
self.label_2 = QtGui.QLabel(self.formLayoutWidget)
self.label_2.setObjectName("label_2")
self.formLayout.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_2)
self.start_x = QtGui.QLineEdit(self.formLayoutWidget)
self.start_x.setObjectName("start_x")
self.formLayout.setWidget(1, QtGui.QFormLayout.FieldRole, self.start_x)
self.label_3 = QtGui.QLabel(self.formLayoutWidget)
self.label_3.setObjectName("label_3")
self.formLayout.setWidget(2, QtGui.QFormLayout.LabelRole, self.label_3)
self.start_y = QtGui.QLineEdit(self.formLayoutWidget)
self.start_y.setObjectName("start_y")
self.formLayout.setWidget(2, QtGui.QFormLayout.FieldRole, self.start_y)
self.label_6 = QtGui.QLabel(self.formLayoutWidget)
self.label_6.setObjectName("label_6")
self.formLayout.setWidget(4, QtGui.QFormLayout.LabelRole, self.label_6)
self.direction = QtGui.QComboBox(self.formLayoutWidget)
self.direction.setObjectName("direction")
self.formLayout.setWidget(4, QtGui.QFormLayout.FieldRole, self.direction)
self.pitch = QtGui.QLineEdit(self.formLayoutWidget)
self.pitch.setObjectName("pitch")
self.formLayout.setWidget(3, QtGui.QFormLayout.FieldRole, self.pitch)
self.label_7 = QtGui.QLabel(self.formLayoutWidget)
self.label_7.setObjectName("label_7")
self.formLayout.setWidget(3, QtGui.QFormLayout.LabelRole, self.label_7)
self.formLayoutWidget_2 = QtGui.QWidget(self.centralwidget)
self.formLayoutWidget_2.setGeometry(QtCore.QRect(330, 150, 191, 71))
self.formLayoutWidget_2.setObjectName("formLayoutWidget_2")
self.formLayout_2 = QtGui.QFormLayout(self.formLayoutWidget_2)
self.formLayout_2.setFieldGrowthPolicy(QtGui.QFormLayout.AllNonFixedFieldsGrow)
self.formLayout_2.setObjectName("formLayout_2")
self.label_4 = QtGui.QLabel(self.formLayoutWidget_2)
self.label_4.setObjectName("label_4")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.LabelRole, self.label_4)
self.label_5 = QtGui.QLabel(self.formLayoutWidget_2)
self.label_5.setObjectName("label_5")
self.formLayout_2.setWidget(1, QtGui.QFormLayout.LabelRole, self.label_5)
self.pad_size_x = QtGui.QLineEdit(self.formLayoutWidget_2)
self.pad_size_x.setObjectName("pad_size_x")
self.formLayout_2.setWidget(0, QtGui.QFormLayout.FieldRole, self.pad_size_x)
self.pad_size_y = QtGui.QLineEdit(self.formLayoutWidget_2)
self.pad_size_y.setObjectName("pad_size_y")
self.formLayout_2.setWidget(1, QtGui.QFormLayout.FieldRole, self.pad_size_y)
self.generate = QtGui.QPushButton(self.centralwidget)
self.generate.setGeometry(QtCore.QRect(360, 440, 131, 27))
self.generate.setObjectName("generate")
PinGenerator.setCentralWidget(self.centralwidget)
self.statusbar = QtGui.QStatusBar(PinGenerator)
self.statusbar.setObjectName("statusbar")
PinGenerator.setStatusBar(self.statusbar)
self.retranslateUi(PinGenerator)
QtCore.QMetaObject.connectSlotsByName(PinGenerator)
def retranslateUi(self, PinGenerator):
PinGenerator.setWindowTitle(QtGui.QApplication.translate("PinGenerator", "MainWindow", None, QtGui.QApplication.UnicodeUTF8))
self.label.setText(QtGui.QApplication.translate("PinGenerator", "Number of Pins", None, QtGui.QApplication.UnicodeUTF8))
self.label_2.setText(QtGui.QApplication.translate("PinGenerator", "Start X", None, QtGui.QApplication.UnicodeUTF8))
self.label_3.setText(QtGui.QApplication.translate("PinGenerator", "Start Y", None, QtGui.QApplication.UnicodeUTF8))
self.label_6.setText(QtGui.QApplication.translate("PinGenerator", "Direction", None, QtGui.QApplication.UnicodeUTF8))
self.label_7.setText(QtGui.QApplication.translate("PinGenerator", "Pitch", None, QtGui.QApplication.UnicodeUTF8))
self.label_4.setText(QtGui.QApplication.translate("PinGenerator", "Pad Size X", None, QtGui.QApplication.UnicodeUTF8))
self.label_5.setText(QtGui.QApplication.translate("PinGenerator", "Pad Size Y", None, QtGui.QApplication.UnicodeUTF8))
self.generate.setText(QtGui.QApplication.translate("PinGenerator", "Generate", None, QtGui.QApplication.UnicodeUTF8))
| StarcoderdataPython |
4879019 | <reponame>jimmymalhan/Coding_Interview_Questions_Python_algoexpert<filename>4.hacker_rank/A.30 Days of Code/012_inheritance.py<gh_stars>1-10
# Grading Scale
# O | 90 <= a <= 100
# E | 80 <= a < 90
# A | 70 <= a < 80
# P | 55 <= a < 70
# D | 40 <= a < 55
# T | a < 40
class Person:
def __init__(self, firstName, lastName, idNumber):
self.firstName = firstName
self.lastName = lastName
self.idNumber = idNumber
def printPerson(self):
print("Name:", self.lastName + ",", self.firstName)
print("ID:", self.idNumber)
class Student(Person):
# Class Constructor
def __init__(self, firstName, lastName, idNumber, scores):
Person.__init__(self, firstName, lastName, idNumber)
self.scores = scores
# Write your function here
def calculate(self):
sum = 0
for score in scores:
sum += score
average = sum/len(scores)
if average < 40:
return 'T'
elif average < 55:
return 'D'
elif average < 70:
return 'P'
elif average < 80:
return 'A'
elif average < 90:
return 'E'
else:
return 'O' | StarcoderdataPython |
5128163 | #!/usr/bin/env python
import sys
import pygtk
pygtk.require('2.0')
import gtk
# get the clipboard
clipboard = gtk.clipboard_get()
# read the clipboard text data. you can also read image and
# rich text clipboard data with the
# wait_for_image and wait_for_rich_text methods.
text = clipboard.wait_for_text()
print text
| StarcoderdataPython |
6579845 | '''
General-purpose numerical routines, relating to angular functions defined on
surfaces of spheres, used in other parts of the module.
'''
# Copyright (c) 2015 <NAME>. All rights reserved.
# Restrictions are listed in the LICENSE file distributed with this package.
import math, numpy as np
from scipy import special as spec, sparse
from itertools import count
from . import cutil, poly, quad
class HarmonicSpline(object):
'''
Use cubic basis splines to interpolate a harmonic function defined on
the surface of a sphere.
'''
def __init__(self, thetas, tol = 1e-7):
'''
Prepare to interpolate functions defined on a coarse grid, with
polar samples specified in thetas[0], onto a fine grid whose
polar samples are specified in thetas[1]. For each of the
coarse grid and the fine grid, the azimuthal samples occur at
regular intervals with cardinality
nphi[i] = 2 * (len(thetas[i]) - 2).
The first and last samples of each element in the thetas list
correspond to values at the poles and, therefore, only have one
associated (but arbitrary) azimuthal angle.
For efficiency, the coefficients are only computed to within a
tolerance specified by tol. Set tol < 0 for full precision.
'''
# Compute the number of angular samples
self.nthetas = [len(th) for th in thetas]
self.nphis = [2 * (nt - 2) for nt in self.nthetas]
# Compute the number of output samples
osamp = 2 + (self.nthetas[1] - 2) * self.nphis[1]
# Split the polar arrays
thetac, thetaf = thetas
# Ensure the polar samples are increasing
if thetac[0] > thetac[-1]:
self.reverse = True
thetac = thetac[::-1]
else: self.reverse = False
# Compute the azimuthal interval width
dphi = 2. * math.pi / self.nphis[0]
# The dimensions of the coefficient grid at the coarse level
n, m = 2 * (self.nthetas[0] - 1), self.nphis[0] // 2
# Precompute the pole for the causal and anti-causal filters
zp = math.sqrt(3) - 2.
# Also precompute all of the necessary powers of the pole
self.zpn = zp**np.arange(n + 1)
# Restrict the precision, if desired
if tol > 0:
self.precision = int(math.log(tol) / math.log(abs(zp)))
else: self.precision = max(n, m)
# Initialize the list of weights and indices
weights, idx, rval = [], [], 0
# Loop through all polar samples
for thi, rtheta in enumerate(thetaf):
# Find the interval containing the finer polar sample
i = cutil.rlocate(thetac, rtheta)
# Ensure the interval doesn't wrap around the poles
i = min(max(i, 0), self.nthetas[0] - 2)
# Grab the fractional distance into the interval
dtheta = thetac[i + 1] - thetac[i]
alpha = (rtheta - thetac[i]) / dtheta
# Compute the cubic b-spline weights
w = self.weights(alpha)
# The azimuthal samples degenerate at the poles
if thi == 0 or thi == self.nthetas[1] - 1: nphi = 1
else: nphi = self.nphis[1]
# Loop through the azimuthal samples
for jf in range(nphi):
# Find the fractional coarse interval
pa = float(jf * self.nphis[0]) / self.nphis[1]
# Ensure that the interval doesn't wrap
j = min(max(int(pa), 0), self.nphis[0] - 1)
# Grab the fractional distance into the interval
beta = pa - j
# Compute the cubic b-spline weights
u = self.weights(beta)
# Handle wrapping in the second hemisphere
if j >= m:
thwts = zip(reversed(w), count(-i - 2))
j -= m
else: thwts = zip(w, count(i - 1))
for wv, iv in thwts:
for uv, jv in zip(u, count(j - 1)):
# Compute the contributing weight
weights.append(wv * uv)
# Compute its (wrapped) index
iw = iv if (0 <= jv < m) else -iv
ij = (iw % n) + n * (jv % m)
idx.append([rval, ij])
rval += 1
# Create a CSR matrix representation of the interpolator
self.matrix = sparse.csr_matrix((weights, list(zip(*idx))), shape=(osamp, n * m))
def getcoeff(self, f):
'''
Given a function f defined on the unit sphere at the coarse
sampling rate defined in the constructor, compute and return a
2-D array of coefficients that expand the function in terms of
the cubic b-spline basis.
The coefficients have the polar angle along the rows and the
azimuthal angle along the colums, with the polar angle in the
interval [0, 2 * pi] and the azimuthal angle in [0, pi].
'''
# Note the dimensions of the input grid
ntheta, nphi = self.nthetas[0], self.nphis[0]
# Store the poles
poles = f[0], f[-1]
# Reshape the remaining samples, theta along the rows
f = np.reshape(f[1:-1], (ntheta - 2, nphi), order='C')
# Ensure samples of the polar angle are increasing
if self.reverse:
f = f[::-1, :]
poles = poles[::-1]
# Grab the pole and its powers
zpn = self.zpn
zp = zpn[1]
# Create the coefficient grid
n, m, k = 2 * (ntheta - 1), nphi // 2, ntheta - 1
c = np.zeros((n, m), dtype=f.dtype)
# Copy the first hemisphere of data
c[1:k, :] = f[:, :m]
# Copy the second hemisphere of data with flipped polar angle
c[k+1:, :] = f[-1::-1, m:]
# Copy the poles into the appropriate rows
c[0, :] = poles[0]
c[k, :] = poles[-1]
# Compute the filter coefficients
l = 6. / (1 - zpn[n]), zp / (zpn[n] - 1)
# Limit the number of terms in the sum
p = min(n - 1, self.precision)
# Compute the initial causal polar coefficient
# c[0] is never in the dot product since p < n
c[0] = l[0] * (c[0] + (c[-p:].T @ zpn[p:0:-1]).T)
# Compute the remaining causal polar coefficients
for i in range(1, c.shape[0]):
c[i, :] = 6. * c[i, :] + zp * c[i - 1, :]
# Compute the initial anti-causal polar coefficient
# c[-1] is never in the dot product since p < n
c[-1] = l[1] * (c[-1] + (c[:p,].T @ zpn[1:p+1]).T)
# Compute the remaining anti-causal polar coefficients
for i in reversed(range(c.shape[0] - 1)):
c[i, :] = zp * (c[i + 1, :] - c[i,:])
# Correct the length and coefficients for the azimuthal angle
n, m, k = nphi, ntheta - 1, nphi // 2
l = 6. / (1 - zpn[n]), zp / (zpn[n] - 1)
# Limit the number of terms in the sum
p = min(n - 1, self.precision)
pk = min(k, self.precision)
# The initial causal azimuthal coefficients from the second hemisphere
c[1:m, 0] = l[0] * (c[1:m, 0] + (c[:-m:-1, -pk:] @ zpn[pk:0:-1]))
# High precision may require terms from the first hemisphere
if (p > k): c[1:m, 0] += l[0] * (c[1:m, k-p:] @ zpn[p:k:-1])
# Compute the remaining coefficients of the first hemisphere
for i in range(1, c.shape[1]):
c[1:m, i] = 6. * c[1:m, i] + zp * c[1:m, i - 1]
# Populate the initial coefficients of the second hemisphere
c[:-m:-1, 0] = 6. * c[:-m:-1, 0] + zp * c[1:m, -1]
# Compute the remaining coefficients of the second hemisphere
for i in range(1, c.shape[1]):
c[-m+1:, i] = 6. * c[-m+1:, i] + zp * c[-m+1:, i - 1]
# The initial anti-causal azimuthal coefficients from the first hemisphere
c[:-m:-1, -1] = l[1] * (c[:-m:-1, -1] + (c[1:m, :pk] @ zpn[1:pk+1]))
# High precision may require terms from the second hemisphere
if (p > k): c[:-m:-1, -1] += l[1] * (c[:-m:-1, :p-k] @ zpn[k+1:p+1])
# Compute the remaining coefficients of the second hemisphere
for i in reversed(range(c.shape[1] - 1)):
c[-m+1:, i] = zp * (c[-m+1:, i + 1] - c[-m+1:, i])
# Populate the initial coefficients of the first hemisphere
c[1:m, -1] = zp * (c[:-m:-1, 0] - c[1:m, -1])
# Compute the remaining coefficients of the first hemisphere
for i in reversed(range(c.shape[1] - 1)):
c[1:m, i] = zp * (c[1:m, i + 1] - c[1:m, i])
# The polar azimuthal coefficients are special cases in which
# the period degenerates to pi, rather than 2 pi.
n = nphi // 2
l = 6. / (1. - zpn[n]), zp / (zpn[n] - 1.)
# Limit the number of terms in the sum
p = min(n - 1, self.precision)
# Compute the coefficients for each pole
for i in [0, m]:
# Compute the initial causal azimuthal coefficient
c[i, 0] = l[0] * (c[i, 0] + (c[i, -p:] @ zpn[p:0:-1]))
# Compute the remaining causal azimuthal coefficients
for j in range(1, c.shape[1]):
c[i, j] = 6. * c[i, j] + zp * c[i, j - 1]
# Compute the initial anti-causal azimuthal coefficient
c[i, -1] = l[1] * (c[i, -1] + (c[i, :p] @ zpn[1:p+1]))
# Compute the remaining anti-causal azimuthal coefficients
for j in reversed(range(c.shape[1] - 1)):
c[i, j] = zp * (c[i, j + 1] - c[i, j])
return c
def weights(self, x):
'''
Evaluate the cubic b-spline interpolation weights for a
fractional coordinate 0 <= x <= 1.
'''
tail = lambda y: (2. - abs(y))**3 / 6.
hump = lambda y: (2. / 3.) - 0.5 * abs(y)**2 * (2 - abs(y))
return [tail(1 + x), hump(x), hump(1 - x), tail(2 - x)]
def interpolate(self, f):
'''
Given the angular function f, convert it to cubic b-spline
coefficients and interpolate it on the previously defined grid.
'''
# Grab the cubic b-spline coefficients
c = self.getcoeff(f)
# Return the output
return self.matrix * c.ravel('F')
class SphericalInterpolator(object):
'''
Build a sparse matrix that can be used to interpolate an angular
function defined on the surface of a sphere.
'''
def __init__(self, thetas, order=4):
'''
Build the Lagrange interpolation matrix of a specified order
for a regularly sampled angular function. Interpolation windows
wrap around the pole.
The 2-element list of lists thetas specifies the locations of
polar samples for the coarse (thetas[0]) and fine (thetas[1])
grids. The azimuthal samples at each of the levels have
nphi[i] = 2 * (len(thetas[i]) - 2)
regularly spaced values. The elements thetas[i][0] and
thetas[i][-1] correspond to polar values that will only be
sampled once.
'''
if order > len(thetas[0]):
raise ValueError('Order should not exceed number of coarse samples.')
# Grab the total number of polar samples
ntheta = [len(t) for t in thetas]
# Double the number of samples away form the poles
nphi = [2 * (n - 2) for n in ntheta]
# Don't duplicate azimuthal values at the poles
nsamp = [2 + (nt - 2) * nph for nt, nph in zip(ntheta, nphi)]
# Initialize the sparse matrix to copy the first polar value
data = [1]
ij = [[0, 0]]
rval = 0
# Grab the azimuthal step size
dphi = [2 * math.pi / n for n in nphi]
# Half the Lagrange interval width
offset = (order - 1) // 2
# Adjust wrapping shifts depending on direction of polar samples
if thetas[0][0] < thetas[0][-1]: wraps = 0, 2 * math.pi
else: wraps = 2 * math.pi, 0.
# Adjust indices running off the right of the polar array
tflip = lambda t: (ntheta[0] - t - 2) % ntheta[0]
# Loop through all polar samples away from the poles
for rtheta in thetas[1][1:-1]:
# Find the starting interpolation interval
tbase = cutil.rlocate(thetas[0], rtheta) - offset
# Enumerate all polar indices involved in interpolation
rows = [tbase + l for l in range(order)]
# Build the corresponding angular positions
tharr = []
for i, ti in enumerate(rows):
if ti < 0:
tharr.append(wraps[0] - thetas[0][-ti])
elif ti >= ntheta[0]:
tharr.append(wraps[1] - thetas[0][tflip(ti)])
else: tharr.append(thetas[0][ti])
# Build the Lagrange interpolation coefficients
twts = poly.lagrange(rtheta, tharr)
# Loop over the higher azimuthal sampling rate
for j in range(nphi[1]):
# Increment the row pointer
rval += 1
# Take care of each theta sample
for tw, rv in zip(twts, rows):
# Pole samples are not azimuthally interpolated
if rv == 0:
data.append(tw)
ij.append([rval, 0])
continue
elif rv == ntheta[0] - 1:
data.append(tw)
ij.append([rval, nsamp[0] - 1])
continue
# Compute the angular position
rphi = j * dphi[1]
# Find the starting interpolation interval
k = (j * nphi[0]) // nphi[1] - offset
if rv < 0 or rv >= ntheta[0]:
rphi += math.pi
k += nphi[0] // 2
# Properly wrap the polar values
if rv < 0: rv = -rv
elif rv >= ntheta[0]: rv = tflip(rv)
# Build the wrapped phi indices
cols = [(k + m + nphi[0]) % nphi[0]
for m in range(order)]
# Build the unwrapped phi values
pharr = [(k + m) * dphi[0] for m in range(order)]
# Build the Lagrange interpolation coefficients
pwts = poly.lagrange(rphi, pharr)
# Populate the columns of the sparse array
for pw, cv in zip(pwts, cols):
vpos = 1 + (rv - 1) * nphi[0] + cv
data.append(pw * tw)
ij.append([rval, vpos])
# Add the last pole value
rval += 1
data.append(1)
ij.append([rval, nsamp[0] - 1])
# Create a CSR matrix representation of the interpolator
self.matrix = sparse.csr_matrix((data, list(zip(*ij))), shape=nsamp[::-1])
def interpolate(self, f):
'''
Interpolate the coarsely sampled angular function f.
'''
# Compute the output
return self.matrix * f
def polararray(ntheta, lobatto=True):
'''
Return a numpy array of polar angular samples.
When lobatto is True, the samples correspond to Gauss-Lobatto quadrature
points (including poles) in decreasing order.
When lobatto is false, the samples are in increasing order at regular
intervals that include the poles. Thus, the samples are
theta = math.pi * np.arange(ntheta) / (ntheta - 1.).
'''
if not lobatto: return math.pi * np.arange(ntheta) / (ntheta - 1.)
return quad.gausslob(ntheta)[0][::-1]
def harmorder (maxdeg):
'''
A coroutine that enumerates the orders of a harmonic of a specified
degree in the order expected by Fastsphere.
'''
curdeg = 0
# Count out the positive orders
while curdeg <= maxdeg:
yield curdeg
curdeg += 1
# Initialize the negative orders
curdeg = -maxdeg
# Count out the negative orders
while curdeg < 0:
yield curdeg
curdeg += 1
# No more counting to do
raise StopIteration
def sh2fld (k, clm, r, t, p, reg = True):
'''
Expand spherical harmonic coefficients clm for a wave number k over
the grid range specified by spherical coordinates (r,t,p). Each
coordinate should be a single-dimension array. If reg is False, use
a singular expansion. Otherwise, use a regular one.
'''
# Pull out the maximum degree and the required matrix leading dimension
deg, lda = clm.shape[1], 2 * clm.shape[1] - 1
# If there are not enough harmonic orders, raise an exception
if clm.shape[0] < lda:
raise IndexError('Not enough harmonic coefficients.')
# Otherwise, compress the coefficient matrix to eliminate excess values
if clm.shape[0] > lda:
clm = np.array([[clm[i,j] for j in range(deg)]
for i in harmorder(deg-1)])
# Compute the radial term
if reg:
# Perform a regular expansion
jlr = np.array([spec.sph_jn(deg-1, k*rx)[0] for rx in r])
else:
# Perform a singular expansion
jlr = np.array([spec.sph_jn(deg-1, k*rx)[0] +
1j * spec.sph_yn(deg-1, k*rx)[0] for rx in r])
# Compute the azimuthal term
epm = np.array([[np.exp(1j * m * px) for px in p] for m in harmorder(deg-1)])
shxp = lambda c, y: np.array([[c[m,l] * y[abs(m),l]
for l in range(deg)] for m in harmorder(deg-1)])
# Compute the polar term and multiply by harmonic coefficients
ytlm = np.array([shxp(clm,poly.legassoc(deg-1,deg-1,tx)) for tx in t])
# Return the product on the specified grid
fld = np.tensordot(jlr, np.tensordot(ytlm, epm, axes=(1,0)), axes=(1,1))
return fld.squeeze()
def translator (r, s, phi, theta, l):
'''
Compute the diagonal translator for a translation distance r, a
translation direction s, azimuthal samples specified in the array phi,
polar samples specified in the array theta, and a truncation point l.
'''
# The radial argument
kr = 2. * math.pi * r
# Compute the radial component
hl = spec.sph_jn(l, kr)[0] + 1j * spec.sph_yn(l, kr)[0]
# Multiply the radial component by scale factors in the translator
m = numpy.arange(l + 1)
hl *= (1j / 4. / math.pi) * (1j)**m * (2. * m + 1.)
# Compute Legendre angle argument dot(s,sd) for sample directions sd
stheta = numpy.sin(theta)[:,numpy.newaxis]
sds = (s[0] * stheta * numpy.cos(phi)[numpy.newaxis,:]
+ s[1] * stheta * numpy.sin(phi)[numpy.newaxis,:]
+ s[2] * numpy.cos(theta)[:,numpy.newaxis])
# Initialize the translator
tr = 0
# Sum the terms of the translator
for hv, pv in zip(hl, poly.legpoly(sds, l)): tr += hv * pv
return tr
def exband (a, tol = 1e-6):
'''
Compute the excess bandwidth estimation for an object with radius a
to a tolerance tol.
'''
# Compute the number of desired accurate digits
d0 = -math.log10(tol)
# Return the estimated bandwidth
return int(2. * math.pi * a + 1.8 * (d0**2 * 2. * math.pi * a)**(1./3.))
| StarcoderdataPython |
12839452 | # -*- coding: utf-8 -*-
"""
Test base class with commonly used methods and variables
"""
import json
import re
import unittest
import httpretty
class TestGithubBase(unittest.TestCase):
"""Test Github actions and backing library."""
OAUTH2_TOKEN = '<PASSWORD>'
ORG = 'NOT_REAL'
URL = 'http://localhost/'
TEST_COURSE = 'devops.001'
TEST_TERM = 'Spring_2999'
TEST_NEW_TERM = 'Spring_9999'
TEST_DESCRIPTION = 'foo'
TEST_PREFIX = 'testo'
TEST_REPO = '{0}-{1}-{2}'.format(
TEST_PREFIX, TEST_COURSE.replace('.', ''), TEST_TERM
)
TEST_RERUN_REPO = '{0}-{1}-{2}'.format(
TEST_PREFIX, TEST_COURSE.replace('.', ''), TEST_NEW_TERM
)
TEST_TEAM = 'Test-Deploy'
TEST_TEAM_ID = 1
TEST_TEAM_MEMBERS = ['archlight', 'bizarnage', 'chemistro', 'dreadnought']
TEST_STAGING_GR = 'http://gr/'
TEST_PRODUCTION_GR = 'http://prod-gr/'
def callback_repo_check(self, request, uri, headers, status_code=404):
"""Handle mocked API request for repo existence check."""
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
# Handle the new "rerun" repo differently
if self.TEST_RERUN_REPO in uri:
status_code = 404
return (status_code, headers, json.dumps({'message': 'testing'}))
def callback_repo_create(self, request, uri, headers, status_code=201):
"""Mock repo creation API call."""
# Disabling unused-argument because this is a callback with
# required method signature.
# pylint: disable=unused-argument
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
repo_dict = json.loads(request.body)
self.assertTrue(
repo_dict['name'] in [self.TEST_REPO, self.TEST_RERUN_REPO]
)
self.assertEqual(repo_dict['description'], self.TEST_DESCRIPTION)
self.assertEqual(repo_dict['private'], True)
return (status_code, headers, json.dumps({'html_url': 'testing'}))
def callback_team_list(
self, request, uri, headers, status_code=200, more=False
):
"""Mock team listing API call."""
# All arguments needed for tests
# pylint: disable=too-many-arguments
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
page1 = [
{
'id': 1,
'name': self.TEST_TEAM
},
{
'id': 1,
'name': self.TEST_REPO
}
]
page2 = [
{
'id': 3,
'name': 'Other Team'
},
]
current_page = request.querystring.get('page', [u'1'])
current_page = int(current_page[0])
if current_page == 2:
body = page2
else:
body = page1
if more and current_page == 1:
headers['Link'] = (
'<{uri}?page=2>; rel="next",'
'<{uri}?page=2>; rel="last"'
).format(uri=uri)
if status_code == 404:
return (status_code, headers, json.dumps({'error': 'error'}))
return (status_code, headers, json.dumps(body))
def callback_team_members(
self, request, uri, headers,
status_code=200, members=None
):
"""
Return team membership list
"""
# Disabling unused-argument because this is a callback with
# required method signature.
# pylint: disable=unused-argument,too-many-arguments
if members is None:
members = self.TEST_TEAM_MEMBERS
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
return (status_code, headers, json.dumps(
[dict(login=x) for x in members]
))
def callback_team_create(
self, request, uri, headers, status_code=201, read_only=True
):
"""
Create a new team as requested
"""
# Disabling unused-argument because this is a callback with
# required method signature.
# pylint: disable=unused-argument,too-many-arguments
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
json_body = json.loads(request.body)
for item in ['name', 'permission']:
self.assertTrue(item in json_body.keys())
if read_only:
self.assertEqual(json_body['permission'], 'pull')
else:
self.assertEqual(json_body['permission'], 'push')
return (status_code, headers, json.dumps({'id': 2}))
@staticmethod
def callback_team_membership(
request, uri, headers, success=True, action_list=None
):
"""Manage both add and delete of team membership.
``action_list`` is a list of tuples with (``username``,
``added (bool)``) to track state of membership since this will
get called multiple times in one library call.
"""
# pylint: disable=too-many-arguments
username = uri.rsplit('/', 1)[1]
if not success:
status_code = 500
if request.method == 'DELETE':
if success:
status_code = 204
action_list.append((username, False))
if request.method == 'PUT':
status_code = 200
action_list.append((username, True))
return (status_code, headers, '')
def callback_team_repo(self, request, uri, headers, status_code=204):
"""Mock adding a repo to a team API call."""
self.assertEqual(
request.headers['Authorization'],
'token {0}'.format(self.OAUTH2_TOKEN)
)
self.assertIsNotNone(re.match(
'{url}teams/[13]/repos/{org}/({repo}|{rerun_repo})'.format(
url=re.escape(self.URL),
org=self.ORG,
repo=re.escape(self.TEST_REPO),
rerun_repo=re.escape(self.TEST_RERUN_REPO)
),
uri
))
if status_code == 422:
return (status_code, headers, json.dumps({
"message": "Validation Failed",
}))
return (status_code, headers, '')
def register_repo_check(self, body):
"""Register repo check URL and method."""
httpretty.register_uri(
httpretty.GET,
re.compile(
'^{url}repos/{org}/({repo}|{repo_rerun})$'.format(
url=self.URL,
org=self.ORG,
repo=re.escape(self.TEST_REPO),
repo_rerun=re.escape(self.TEST_RERUN_REPO)
)
),
body=body
)
def register_repo_create(self, body):
"""Register url for repo create."""
httpretty.register_uri(
httpretty.POST,
'{url}orgs/{org}/repos'.format(
url=self.URL,
org=self.ORG,
),
body=body
)
def register_hook_create(self, body, status):
"""
Simple hook creation URL registration.
"""
test_url = '{url}repos/{org}/{repo}/hooks'.format(
url=self.URL,
org=self.ORG,
repo=self.TEST_REPO
)
# Register for hook endpoint
httpretty.register_uri(
httpretty.POST,
test_url,
body=body,
status=status
)
def register_hook_list(self, body=None, status=200):
"""
Simple hook list URL.
"""
if body is None:
body = json.dumps(
[{
'url': '{url}repos/{org}/{repo}/hooks/1'.format(
url=self.URL, org=self.ORG, repo=self.TEST_REPO
)
}]
)
test_url = '{url}repos/{org}/{repo}/hooks'.format(
url=self.URL,
org=self.ORG,
repo=self.TEST_REPO
)
# Register for hook endpoint
httpretty.register_uri(
httpretty.GET,
test_url,
body=body,
status=status
)
def register_hook_delete(self, status=204):
"""
Simple hook list URL.
"""
test_url = '{url}repos/{org}/{repo}/hooks/1'.format(
url=self.URL,
org=self.ORG,
repo=self.TEST_REPO
)
# Register for hook endpoint
httpretty.register_uri(
httpretty.DELETE,
test_url,
body='',
status=status
)
def register_team_list(self, body):
"""
Team listing API.
"""
httpretty.register_uri(
httpretty.GET,
'{url}orgs/{org}/teams'.format(
url=self.URL,
org=self.ORG,
),
body=body
)
def register_team_create(self, body):
"""
Create team URL/method
"""
httpretty.register_uri(
httpretty.POST,
'{url}orgs/{org}/teams'.format(
url=self.URL,
org=self.ORG,
),
body=body
)
def register_team_members(self, body):
"""
Team membership list API.
"""
httpretty.register_uri(
httpretty.GET,
re.compile(
r'^{url}teams/\d+/members$'.format(
url=re.escape(self.URL)
)
),
body=body
)
def register_team_membership(self, body):
"""
Register adding and removing team members.
"""
url_regex = re.compile(r'^{url}teams/\d+/memberships/\w+$'.format(
url=re.escape(self.URL),
))
httpretty.register_uri(
httpretty.PUT, url_regex, body=body
)
httpretty.register_uri(
httpretty.DELETE, url_regex, body=body
)
def register_team_repo_add(self, body):
"""
Register team repo addition.
"""
httpretty.register_uri(
httpretty.PUT,
re.compile(
r'^{url}teams/\d+/repos/{org}/({repo}|{rerun_repo})$'.format(
url=self.URL,
org=self.ORG,
repo=re.escape(self.TEST_REPO),
rerun_repo=re.escape(self.TEST_RERUN_REPO)
)
),
body=body
)
def register_create_file(self, status=201):
"""
File creation API
"""
httpretty.register_uri(
httpretty.PUT,
re.compile(
r'^{url}repos/{org}/{repo}/contents/.+$'.format(
url=re.escape(self.URL),
org=re.escape(self.ORG),
repo=re.escape(self.TEST_REPO),
)
),
status=status
)
| StarcoderdataPython |
271769 | # by amounra 0613 : http://www.aumhaa.com
import Live
import os, __builtin__, __main__, _ast, _codecs, _functools, _md5, _random, _sha, _sha256, _sha512, _socket, _sre, _ssl, _struct, _symtable, _weakref, binascii, cStringIO, collections, datetime, errno, exceptions, gc, imp, itertools, marshal, math, sys, time #_types
#modules = [__builtin__, __main__, _ast, _codecs, _functools, _md5, _random, _sha, _sha256, _sha512, _socket, _sre, _ssl, _struct, _symtable, _types, _weakref, binascii, cStringIO, collections, datetime, errno, exceptions, fcntl, gc, imp, itertools, marshal, math, operator, posix, pwd, select, signal, sys, thread, time, unicodedata, xxsubtype, zipimport, zlib]
#modules = []
#DIRS_TO_REBUILD = ['Debug', 'AumPC20_b995_9', 'AumPC40_b995_9', 'AumPush_b995', 'AumTroll_b995_9', 'AumTroll_b995_9_G', 'Base_9_LE', 'BlockMod_b995_9', 'Codec_b995_9', 'Codex', 'LaunchMod_b995_9', 'Lemur256_b995_9', 'LemurPad_b995_9', 'Livid_Alias8', 'Livid_Base', 'Livid_Block', 'Livid_CNTRLR', 'Livid_CodeGriid', 'Livid_CodeRemoteScriptLinked', 'Livid_Ohm64', 'Livid_OhmModes', 'MonOhm_b995_9', 'Monomodular_b995_9']
#MODS_TO_REBUILD = ['Debug', 'AumPC20', 'AumPC40', 'AumPush', 'AumTroll', 'AumTroll_G', 'Base', 'BlockMod', 'Codec', 'LaunchMod', 'Lemur256', 'LemurPad', 'Alias8', 'Block', 'CNTRLR', 'CodeGriid', 'Ohm64', 'MonOhm', 'Monomodular']
#from re import *
import re
from _Framework.ControlSurface import *
from _Framework.ControlSurfaceComponent import ControlSurfaceComponent
#mod_path = "/Users/amounra/Documents/Max/Packages/mod/Python Scripts"
#livid_path = "/Users/amounra/monomodular_git/Livid Python Scripts"
"""if not (mod_path) in sys.path:
if os.path.isdir(mod_path):
sys.path.append(mod_path)
if not (livid_path) in sys.path:
if os.path.isdir(livid_path):
sys.path.append(livid_path)"""
DEBUG = True
def _normalize_filename(filename):
if filename is not None:
if filename.endswith('.pyc') or filename.endswith('.pyo'):
filename = filename[:-1]
elif filename.endswith('$py.class'):
filename = filename[:-9] + '.py'
return filename
def rebuild_sys():
modnames = []
for module in get_control_surfaces():
if isinstance(module, Debug):
#module._reimport_loaded_modules()
module.rebuild_sys()
break
return modnames
def list_new_modules():
modnames = []
for module in get_control_surfaces():
if isinstance(module, Debug):
#modnames.append['debug found:']
modnames = module.rollbackImporter.newModules
break
return modnames
def rollback_is_enabled():
control_surfaces = get_control_surfaces()
if 'Debug' in control_surfaces:
debug = control_surfaces['Debug']
modnames = debug.rollbackImporter.newModules.keys()
return modnames
def log_sys_modules():
modnames = []
for module in get_control_surfaces():
if isinstance(module, Debug):
#modnames.append['debug found:']
module._log_sys_modules()
break
def print_debug(message):
for module in get_control_surfaces():
if isinstance(module, Debug):
#modnames.append['debug found:']
module.log_message(message)
break
def no_debug(*a, **k):
pass
def initialize_debug():
debug = no_debug
for module in get_control_surfaces():
if isinstance(module, Debug):
debug = module.log_message
return debug
try:
import builtins
except ImportError:
import __builtin__ as builtins
_baseimport = builtins.__import__
_blacklist = None
_dependencies = dict()
_parent = None
# Jython doesn't have imp.reload().
if not hasattr(imp, 'reload'):
imp.reload = reload
# PEP 328 changed the default level to 0 in Python 3.3.
_default_level = -1 if sys.version_info < (3, 3) else 0
class Reloader(object):
def enable(self, blacklist=None):
"""Enable global module dependency tracking.
A blacklist can be specified to exclude specific modules (and their import
hierachies) from the reloading process. The blacklist can be any iterable
listing the fully-qualified names of modules that should be ignored. Note
that blacklisted modules will still appear in the dependency graph; they
will just not be reloaded.
"""
global _blacklist
_blacklist = ['Debug']
builtins.__import__ = self._import
if blacklist is not None:
_blacklist = frozenset(blacklist)
def disable(self):
"""Disable global module dependency tracking."""
global _blacklist, _parent
builtins.__import__ = _baseimport
_blacklist = None
_dependencies.clear()
_parent = None
def get_dependencies(self, m):
"""Get the dependency list for the given imported module."""
try:
name = m.__name__
except:
name = m
#name = m.__name__ if isinstance(m, _types.ModuleType) else m
return _dependencies.get(name, None)
def _deepcopy_module_dict(self, m):
"""Make a deep copy of a module's dictionary."""
import copy
# We can't deepcopy() everything in the module's dictionary because some
# items, such as '__builtins__', aren't deepcopy()-able. To work around
# that, we start by making a shallow copy of the dictionary, giving us a
# way to remove keys before performing the deep copy.
d = vars(m).copy()
del d['__builtins__']
return copy.deepcopy(d)
def _reload(self, m, visited):
"""Internal module reloading routine."""
name = m.__name__
#print_debug('reloading: ' + str(m))
# If this module's name appears in our blacklist, skip its entire
# dependency hierarchy.
if _blacklist and name in _blacklist:
return
# Start by adding this module to our set of visited modules. We use this
# set to avoid running into infinite recursion while walking the module
# dependency graph.
visited.add(m)
# Start by reloading all of our dependencies in reverse order. Note that
# we recursively call ourself to perform the nested reloads.
deps = _dependencies.get(name, None)
if deps is not None:
for dep in reversed(deps):
if dep not in visited:
self._reload(dep, visited)
# Clear this module's list of dependencies. Some import statements may
# have been removed. We'll rebuild the dependency list as part of the
# reload operation below.
try:
del _dependencies[name]
except KeyError:
pass
# Because we're triggering a reload and not an import, the module itself
# won't run through our _import hook below. In order for this module's
# dependencies (which will pass through the _import hook) to be associated
# with this module, we need to set our parent pointer beforehand.
global _parent
_parent = name
# If the module has a __reload__(d) function, we'll call it with a copy of
# the original module's dictionary after it's been reloaded.
callback = getattr(m, '__reload__', None)
if callback is not None:
d = self._deepcopy_module_dict(m)
imp.reload(m)
callback(d)
else:
imp.reload(m)
# Reset our parent pointer now that the reloading operation is complete.
_parent = None
def reload(self, m):
"""Reload an existing module.
Any known dependencies of the module will also be reloaded.
If a module has a __reload__(d) function, it will be called with a copy of
the original module's dictionary after the module is reloaded."""
self._reload(m, set())
def _import(self, name, globals=None, locals=None, fromlist=None, level=_default_level):
"""__import__() replacement function that tracks module dependencies."""
# Track our current parent module. This is used to find our current place
# in the dependency graph.
#print_debug('importing: ' + str(name))
global _parent
parent = _parent
_parent = name
# Perform the actual import work using the base import function.
base = _baseimport(name, globals, locals, fromlist, level)
if base is not None and parent is not None:
m = base
# We manually walk through the imported hierarchy because the import
# function only returns the top-level package reference for a nested
# import statement (e.g. 'package' for `import package.module`) when
# no fromlist has been specified. It's possible that the package
# might not have all of its descendents as attributes, in which case
# we fall back to using the immediate ancestor of the module instead.
if fromlist is None:
for component in name.split('.')[1:]:
try:
m = getattr(m, component)
except AttributeError:
m = sys.modules[m.__name__ + '.' + component]
# If this is a nested import for a reloadable (source-based) module,
# we append ourself to our parent's dependency list.
if hasattr(m, '__file__'):
l = _dependencies.setdefault(parent, [])
l.append(m)
# Lastly, we always restore our global _parent pointer.
_parent = parent
return base
class Debug(ControlSurface):
def __init__(self, *a, **k):
super(Debug, self).__init__(*a, **k)
#self.mtimes = {}
#self.changed_files = []
#self.reloader = Reloader()
#self.reloader.enable()
#self._log_version_data()
#self._log_sys_modules()
#self._log_paths()
#self._log_dirs()
#self.log_filenames()
self.log_message('_^_^_^_^_^_^_^_^_^_^_^_^_^_^_^_^_ OLD DEBUG ON _^_^_^_^_^_^_^_^_^_^_^_^_^_^_^_^_')
#self._scripts = []
#self._scan()
def log_filenames(self):
modules = [m.__file__ for m in sys.modules.values() if m and getattr(m, '__file__', None)]
for mod in modules:
self.log_message('module:' + str(mod))
def _log_paths(self):
for path in sys.path:
#if 'MIDI Remote Scripts' in path:
# self.log_message('path: ' + str(path) + ' is: ' + str(os.listdir(path)))
if not self.rollbackImporter is None:
if 'Python Scripts' in path:
#self.log_message('amounra path: ' + str(path) + ' is: ' + str(os.listdir(path)))
for subdir in os.listdir(path):
self.rollbackImporter._included_rebuild_paths.append(subdir)
if 'Livid Python Scripts' in path:
#self.log_message('Livid path: ' + str(path) + ' is: ' + str(os.listdir(path)))
for subdir in os.listdir(path):
self.rollbackImporter._included_rebuild_paths.append(subdir)
self.log_message('_included_rebuild_paths: ' + str(self.rollbackImporter._included_rebuild_paths))
def _log_dirs(self):
self.log_message(str(sys.path))
#self.log_message(str(__file__) + ' working dir: ' + str(os.listdir(sys.path[5])))
def _log_version_data(self):
self.log_message('modules: ' + str(sys.builtin_module_names))
self.log_message('version: ' + str(sys.version))
self.log_message('sys.path: ' + str(sys.path))
def _log_builtins(self):
for module in dir(module):
self.log_message('--- %s' %(item))
def _log_C_modules(self):
for item in modules:
self.log_message('Module Name: %s' %(item.__name__))
self.log_message('--- %s' %(item.__doc__))
def _log_sys_modules(self):
pairs = ((v, k) for (v, k) in sys.modules.iteritems())
for module in sorted(pairs):
self.log_message('---' + str(module))
for mod in sys.modules.keys():
self.log_message('---------path' + str(sys.modules[mod]))
#for item in dir(gc):
# self.log_message(str(item))
#looks_at = gc.get_referrers(self)
#for item in looks_at:
# self.log_message(str(item))
def _reimport_loaded_modules(self):
self.log_message('reimporting loaded modules.')
for module in sys.modules.keys():
self.log_message('preexisting: ' + str(module))
if module is 'Livid_Base':
newBase = Livid_Base
sys.modules[module] = newBase
self.log_message('replaced Livid_Base with new version!')
if module is 'Livid_Alias8':
import Livid_Alias8
newAlias = Livid_Alias8
sys.modules[module] = newAlias
def _clean_sys(self):
for key, value in sys.modules.items():
if value == None:
del sys.modules[key]
for path in sys.path:
if 'MIDI Remote Scripts' in path:
name_list = os.listdir(path)
for name in name_list:
if name[0] != '_' or '_Mono_Framework' == name[:15]:
for key in sys.modules.keys():
if name == key[:len(name)]:
del sys.modules[key]
#self.log_message('deleting key---' + str(key))
#self._log_sys_modules()
def _scan(self):
# We're only interested in file-based modules (not C extensions).
modules = [m.__file__ for m in sys.modules.values()
if m and getattr(m, '__file__', None)]
for filename in modules:
# We're only interested in the source .py files.
filename = _normalize_filename(filename)
# stat() the file. This might fail if the module is part of a
# bundle (.egg). We simply skip those modules because they're
# not really reloadable anyway.
try:
stat = os.stat(filename)
except OSError:
continue
# Check the modification time. We need to adjust on Windows.
mtime = stat.st_mtime
# Check if we've seen this file before. We don't need to do
# anything for new files.
if filename in self.mtimes:
# If this file's mtime has changed, queue it for reload.
if mtime != self.mtimes[filename]:
#self.queue.put(filename)
if not filename in self.changed_files:
self.changed_files.append(filename)
self.log_message('changed time:' + str(filename))
# Record this filename's current mtime.
self.mtimes[filename] = mtime
#self.log_message('changed files:' + str(self.changed_files))
#self.schedule_message(100, self._scan)
def rebuild_sys(self):
filenames = self.changed_files
modules = [m for m in sys.modules.values() if _normalize_filename(getattr(m, '__file__', None)) in filenames]
for mod in modules:
self.log_message('reloading:' + str(mod) + ' dependencies are: ' + str(self.reloader.get_dependencies(mod)))
#self.reloader.reload(mod)
self.changed_files = []
#self._reimport_loaded_modules()
#try:
# del sys.modules['OhmModes']
# self.log_message('cant del OhmModes')
#except:
# self.log_message('cant del OhmModes')
"""
def connect_script_instances(self, instanciated_scripts):
new_scripts = get_control_surfaces()
removed_scripts = []
for script in self._scripts:
self.log_message('script: ' + str(script))
if not script in new_scripts:
removed_scripts.append(script)
self._scripts = new_scripts
"""
#modulenames = set(sys.modules)&set(globals())
#allmodules = [sys.modules[name] for name in modulenames]
#self.log_message('Debug-> module names' + str(allmodules))
#self.log_message('Debug-> removed scripts' + str(removed_scripts))
def disconnect(self):
#self.reloader.disable()
super(Debug, self).disconnect()
| StarcoderdataPython |
8125240 | <reponame>runette/jump-195016
#!/usr/bin/env python
# Copyright 2018 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# [START imports]
import webapp2
from data import *
# [END Imports]
# [START Global variables]
#[END Global variables]
class Kiosk (webapp2.RequestHandler):
def get(self):
# GET PARAMETERS
dropzone_key = int(self.request.get('dropzone', DEFAULT_DROPZONE_ID))
# Set the Dropzone details
dropzone = Dropzone.get_by_id(dropzone_key)
if dropzone.kiosk_rows:
slice_size = dropzone.kiosk_rows
else:
slice_size = DEFAULT_SLICE_SIZE
if dropzone.kiosk_cols:
load_len = dropzone.kiosk_cols
else:
load_len = DEFAULT_KIOSK_NUMBER_OF_COLUMNS
if dropzone:
load_struct = LoadStructure(dropzone_key)
else:
dropzone = Dropzone.get_by_id(DEFAULT_DROPZONE_ID)
load_struct = LoadStructure(DEFAULT_DROPZONE_ID)
loads = load_struct.loads
slot_mega = load_struct.slot_mega
next_loads = []
for load in loads:
if load.status == LANDED:
continue
else:
next_loads.append(load)
load_len = min(len(next_loads), load_len)
template_values = {
'dropzone': dropzone,
'next_loads': next_loads,
'slot_mega': slot_mega,
'slotsize': load_struct.freeslots(),
'load_len': load_len,
'slice': slice_size,
'dropzone_status': DROPZONE_STATUS,
'load_status': LOAD_STATUS,
'load_colours': LOAD_COLOURS,
}
template = JINJA_ENVIRONMENT.get_template('kiosk.html')
self.response.write(template.render(template_values))
class UpdateKiosk(webapp2.RequestHandler):
def post(self):
dropzone_key = int(self.request.get('dropzone'))
dropzone = Dropzone.get_by_id(dropzone_key)
dropzone.kiosk_cols = int(self.request.get('cols', str(DEFAULT_KIOSK_NUMBER_OF_COLUMNS)))
dropzone.kiosk_rows = int(self.request.get('rows', str(DEFAULT_SLICE_SIZE)))
dropzone.put()
self.redirect('/configdz?dropzone=' + str(dropzone_key) + '&action=kiosk')
| StarcoderdataPython |
6505026 | <gh_stars>1000+
#!/usr/bin/python
#
# This example shows how to use MITIE's text_categorizer from Python.
#
#
import sys, os
# Make sure you put the mitielib folder into the python search path. There are
# a lot of ways to do this, here we do it programmatically with the following
# two statements:
parent = os.path.dirname(os.path.realpath(__file__))
sys.path.append(parent + '/../../mitielib')
from mitie import *
# We will have MITIE predict which of these two sentences express positive sentiments.
test_tokens = ["What","a","black","and","bad","day"]
test_tokens_2 = ["I","am","so","happy"]
# Load a pre-trained text categorizer. This model is generated by
# train_text_categorizer.py so run that example first to get the file.
cat = text_categorizer("new_text_categorizer.dat")
# Call the categorizer with a list of tokens, the response is a label (a string)
# and a score (a number) indicating the confidence of the categorizer
label, score = cat(test_tokens)
print(label,score)
label, score = cat(test_tokens_2)
print(label,score)
| StarcoderdataPython |
11274190 | <reponame>saucetray/st2<gh_stars>1-10
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import eventlet
from six.moves import http_client
from st2common.constants import action as action_constants
from st2common.models.db.execution import ActionExecutionDB
from st2common.models.db.execution import ActionExecutionOutputDB
from st2common.persistence.execution import ActionExecution
from st2common.persistence.execution import ActionExecutionOutput
from st2common.util import date as date_utils
from st2common.stream.listener import get_listener
from .base import FunctionalTest
__all__ = [
'ActionExecutionOutputStreamControllerTestCase'
]
class ActionExecutionOutputStreamControllerTestCase(FunctionalTest):
def test_get_one_id_last_no_executions_in_the_database(self):
ActionExecution.query().delete()
resp = self.app.get('/v1/executions/last/output', expect_errors=True)
self.assertEqual(resp.status_int, http_client.BAD_REQUEST)
self.assertEqual(resp.json['faultstring'], 'No executions found in the database')
def test_get_output_running_execution(self):
# Retrieve lister instance to avoid race with listener connection not being established
# early enough for tests to pass.
# NOTE: This only affects tests where listeners are not pre-initialized.
listener = get_listener(name='execution_output')
eventlet.sleep(1.0)
# Test the execution output API endpoint for execution which is running (blocking)
status = action_constants.LIVEACTION_STATUS_RUNNING
timestamp = date_utils.get_datetime_utc_now()
action_execution_db = ActionExecutionDB(start_timestamp=timestamp,
end_timestamp=timestamp,
status=status,
action={'ref': 'core.local'},
runner={'name': 'local-shell-cmd'},
liveaction={'ref': 'foo'})
action_execution_db = ActionExecution.add_or_update(action_execution_db)
output_params = dict(execution_id=str(action_execution_db.id),
action_ref='core.local',
runner_ref='dummy',
timestamp=timestamp,
output_type='stdout',
data='stdout before start\n')
# Insert mock output object
output_db = ActionExecutionOutputDB(**output_params)
ActionExecutionOutput.add_or_update(output_db, publish=False)
def insert_mock_data():
output_params['data'] = 'stdout mid 1\n'
output_db = ActionExecutionOutputDB(**output_params)
ActionExecutionOutput.add_or_update(output_db)
# Since the API endpoint is blocking (connection is kept open until action finishes), we
# spawn an eventlet which eventually finishes the action.
def publish_action_finished(action_execution_db):
# Insert mock output object
output_params['data'] = 'stdout pre finish 1\n'
output_db = ActionExecutionOutputDB(**output_params)
ActionExecutionOutput.add_or_update(output_db)
eventlet.sleep(1.0)
# Transition execution to completed state so the connection closes
action_execution_db.status = action_constants.LIVEACTION_STATUS_SUCCEEDED
action_execution_db = ActionExecution.add_or_update(action_execution_db)
eventlet.spawn_after(0.2, insert_mock_data)
eventlet.spawn_after(1.5, publish_action_finished, action_execution_db)
# Retrieve data while execution is running - endpoint return new data once it's available
# and block until the execution finishes
resp = self.app.get('/v1/executions/%s/output' % (str(action_execution_db.id)),
expect_errors=False)
self.assertEqual(resp.status_int, 200)
events = self._parse_response(resp.text)
self.assertEqual(len(events), 4)
self.assertEqual(events[0][1]['data'], 'stdout before start\n')
self.assertEqual(events[1][1]['data'], 'stdout mid 1\n')
self.assertEqual(events[2][1]['data'], 'stdout pre finish 1\n')
self.assertEqual(events[3][0], 'EOF')
# Once the execution is in completed state, existing output should be returned immediately
resp = self.app.get('/v1/executions/%s/output' % (str(action_execution_db.id)),
expect_errors=False)
self.assertEqual(resp.status_int, 200)
events = self._parse_response(resp.text)
self.assertEqual(len(events), 4)
self.assertEqual(events[0][1]['data'], 'stdout before start\n')
self.assertEqual(events[1][1]['data'], 'stdout mid 1\n')
self.assertEqual(events[2][1]['data'], 'stdout pre finish 1\n')
self.assertEqual(events[3][0], 'EOF')
listener.shutdown()
def test_get_output_finished_execution(self):
# Test the execution output API endpoint for execution which has finished
for status in action_constants.LIVEACTION_COMPLETED_STATES:
# Insert mock execution and output objects
status = action_constants.LIVEACTION_STATUS_SUCCEEDED
timestamp = date_utils.get_datetime_utc_now()
action_execution_db = ActionExecutionDB(start_timestamp=timestamp,
end_timestamp=timestamp,
status=status,
action={'ref': 'core.local'},
runner={'name': 'local-shell-cmd'},
liveaction={'ref': 'foo'})
action_execution_db = ActionExecution.add_or_update(action_execution_db)
for i in range(1, 6):
stdout_db = ActionExecutionOutputDB(execution_id=str(action_execution_db.id),
action_ref='core.local',
runner_ref='dummy',
timestamp=timestamp,
output_type='stdout',
data='stdout %s\n' % (i))
ActionExecutionOutput.add_or_update(stdout_db)
for i in range(10, 15):
stderr_db = ActionExecutionOutputDB(execution_id=str(action_execution_db.id),
action_ref='core.local',
runner_ref='dummy',
timestamp=timestamp,
output_type='stderr',
data='stderr %s\n' % (i))
ActionExecutionOutput.add_or_update(stderr_db)
resp = self.app.get('/v1/executions/%s/output' % (str(action_execution_db.id)),
expect_errors=False)
self.assertEqual(resp.status_int, 200)
events = self._parse_response(resp.text)
self.assertEqual(len(events), 11)
self.assertEqual(events[0][1]['data'], 'stdout 1\n')
self.assertEqual(events[9][1]['data'], 'stderr 14\n')
self.assertEqual(events[10][0], 'EOF')
# Verify "last" short-hand id works
resp = self.app.get('/v1/executions/last/output', expect_errors=False)
self.assertEqual(resp.status_int, 200)
events = self._parse_response(resp.text)
self.assertEqual(len(events), 11)
self.assertEqual(events[10][0], 'EOF')
def _parse_response(self, response):
"""
Parse event stream response and return a list of events.
"""
events = []
lines = response.strip().split('\n')
for index, line in enumerate(lines):
if 'data:' in line:
e_line = lines[index - 1]
event_name = e_line[e_line.find('event: ') + len('event:'):].strip()
event_data = line[line.find('data: ') + len('data :'):].strip()
event_data = json.loads(event_data) if len(event_data) > 2 else {}
events.append((event_name, event_data))
return events
| StarcoderdataPython |
6647954 | from __future__ import print_function
from builtins import zip
import os
import pytest
from fasttrips import Run
# TEST OPTIONS
test_thetas = [1.0, 0.5, 0.2]
test_size = 5
disperson_rate_util_multiplier_factor = 10.0
# DIRECTORY LOCATIONS
EXAMPLE_DIR = os.path.join(os.getcwd(), 'fasttrips', 'Examples', 'Springfield')
INPUT_NETWORK = os.path.join(EXAMPLE_DIR, 'networks', 'vermont')
INPUT_DEMAND = os.path.join(EXAMPLE_DIR, 'demand', 'general')
INPUT_CONFIG = os.path.join(EXAMPLE_DIR, 'configs', 'A')
OUTPUT_DIR = os.path.join(EXAMPLE_DIR, 'output')
@pytest.fixture(scope='module', params=test_thetas)
def dispersion_rate(request):
return request.param
@pytest.fixture(scope='module')
def passengers_arrived(dispersion_rate):
arrived = dict(list(zip(test_thetas,[test_size]*len(test_thetas))))
return arrived[dispersion_rate]
@pytest.fixture(scope='module')
def utils_conversion_factor(dispersion_rate):
factor = dispersion_rate*disperson_rate_util_multiplier_factor
return factor
@pytest.mark.travis
def test_dispersion(dispersion_rate, utils_conversion_factor, passengers_arrived):
r = Run.run_fasttrips(
input_network_dir = INPUT_NETWORK,
input_demand_dir = INPUT_DEMAND,
run_config = os.path.join(INPUT_CONFIG,"config_ft.txt"),
input_weights = os.path.join(INPUT_CONFIG,"pathweight_ft.txt"),
output_dir = OUTPUT_DIR,
output_folder = "test_dispers_%4.2f" % dispersion_rate,
max_stop_process_count = 2,
utils_conversion_factor = utils_conversion_factor,
pf_iters = 2,
overlap_variable = "None",
pathfinding_type = "stochastic",
iters = 1,
dispersion = dispersion_rate,
num_trips = test_size )
assert passengers_arrived == r["passengers_arrived"]
if __name__ == "__main__":
for dr in test_thetas:
util_factor = dr*disperson_rate_util_multiplier_factor
print("Running test_dispersion.py with: disperson: %f1.2, util_factor: %f2.2, test_size: %d" % (dr, util_factor, test_size))
test_dispersion(dr, util_factor, test_size)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.