hexsha stringlengths 40 40 | size int64 5 2.06M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 248 | max_stars_repo_name stringlengths 5 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 248 | max_issues_repo_name stringlengths 5 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringdate 2015-01-01 00:00:47 2022-03-31 23:42:18 ⌀ | max_issues_repo_issues_event_max_datetime stringdate 2015-01-01 17:43:30 2022-03-31 23:59:58 ⌀ | max_forks_repo_path stringlengths 3 248 | max_forks_repo_name stringlengths 5 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 5 2.06M | avg_line_length float64 1 1.02M | max_line_length int64 3 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
118ff67c852ea38f217b5c566e77f4efa9b7fe30 | 9,368 | py | Python | FirmsLocations/Retrieve/density_assignation.py | tgquintela/Firms_locations | 476680cbc3eb1308811633d24810049e215101a0 | [
"MIT"
] | null | null | null | FirmsLocations/Retrieve/density_assignation.py | tgquintela/Firms_locations | 476680cbc3eb1308811633d24810049e215101a0 | [
"MIT"
] | null | null | null | FirmsLocations/Retrieve/density_assignation.py | tgquintela/Firms_locations | 476680cbc3eb1308811633d24810049e215101a0 | [
"MIT"
] | null | null | null |
"""
Assign geographically density value to a points.
"""
from scipy.spatial import KDTree
from scipy.spatial.distance import cdist
from scipy.stats import norm
from scipy.optimize import minimize
import numpy as np
def general_density_assignation(locs, parameters, values=None, locs2=None):
"Density assignation function."
# Creation of the kdtree for retrieving neighs
if locs2 is None:
leafsize = int(locs.shape[0]/float(10.))
kdtree = KDTree(locs, leafsize=leafsize)
else:
leafsize = int(locs2.shape[0]/float(10.))
kdtree = KDTree(locs2, leafsize=leafsize)
parameters = preparation_parameters(parameters)
M = compute_measure(locs=locs, kdtree=kdtree, values=values, **parameters)
## Recurrent measure (TODO)[better before with the population?]
return M
# method, params (weitghted count, ...)
# method, params (linear, trapezoid,...)
###############################################################################
############################### Compute measure ###############################
###############################################################################
def compute_measure(locs, kdtree, max_r, values, method, params):
"Retrieve the neighs indices and the neighs descriptors and weights."
## Computation of the measure based in the distances as weights.
M = np.zeros(locs.shape[0])
for i in range(locs):
neighs, dist = get_self_neighs_i(locs, kdtree, max_r, i)
M[i] = compute_measure_i(neighs, dist, values[neighs], method, params)
return M
def get_self_neighs_i(locs, kdtree, max_r, i):
"Retrieving neighs and distance."
loc = locs[i, :]
neighs = kdtree.query_ball_point(loc, max_r)
neighs.remove(i)
dist = cdist(locs[i, :], locs[neighs, :])
return neighs, dist
def compute_measure_i(neighs, dist, values, method, params):
"Swither function between different possible options to compute density."
if method == 'weighted_count':
measure = compute_measure_wcount(neighs, dist, params)
elif method == 'weighted_avg':
measure = compute_measure_wavg(neighs, dist, params)
return measure
def compute_measure_wcount(neighs, dist, params):
"""Measure to compute density only based on the weighted count of selected
elements around the point considered.
"""
weights = from_distance_to_weights(dist, **params)
measure = np.sum(weights)
return measure
def compute_measure_wavg(neighs, dist, values, params):
"""Measure to compute density based on the weighted average of selected
elements around the point considered.
"""
weights = from_distance_to_weights(dist, **params)
measure = np.sum(weights * values)
return measure
###############################################################################
############################# Distance to weights #############################
###############################################################################
def from_distance_to_weights(dist, method, params):
"Function which transforms the distance given to weights."
if method == 'linear':
weights = dist2weights_linear(dist, **params)
elif method == 'Trapezoid':
weights = dist2weights_trapez(dist, **params)
elif method == 'inverse_prop':
weights = dist2weights_invers(dist, **params)
elif method == 'exponential':
weights = dist2weights_exp(dist, **params)
elif method == 'gaussian':
weights = dist2weights_gauss(dist, **params)
elif method == 'surgaussian':
weights = dist2weights_surgauss(dist, **params)
elif method == 'sigmoid':
weights = dist2weights_sigmoid(dist, **params)
return weights
def dist2weights_linear(dist, max_r, max_w=1, min_w=0):
"Linear distance weighting."
weights = (max_w - dist)*((max_w-min_w)/float(max_r))+min_w
return weights
def dist2weights_trapez(dist, max_r, r2, max_w=1, min_w=0):
"Trapezoidal distance weighting."
if type(dist) == np.ndarray:
weights = dist2weights_linear(dist-r2, max_r-r2, max_w, min_w)
weights[dist <= r2] = max_w
else:
if dist <= r2:
weights = max_w
else:
weights = dist2weights_linear(dist-r2, max_r-r2, max_w, min_w)
return weights
def dist2weights_invers(dist, max_r, max_w=1, min_w=1e-8, rescale=True):
"Inverse distance weighting."
if min_w == 0:
tau = 1.
else:
tau = (max_w/min_w-1)/max_r
if rescale:
floor_f = 1./float(1.+tau*max_r)
weights = max_w/(1.-floor_f) * (1./float(1.+tau*dist)-floor_f)
else:
weights = max_w/float(1.+tau*dist)
return weights
def dist2weights_exp(dist, max_r, max_w=1, min_w=1e-8, rescale=True):
"Exponential distanve weighting."
if min_w == 0:
C = 1.
else:
C = -np.log(min_w/max_w)
if rescale:
weights = max_w/(1.-np.exp(-C)) * np.exp(-C*dist/max_r)
else:
weights = max_w * np.exp(-C*dist/max_r)
return weights
def dist2weights_gauss(dist, max_r, max_w=1, min_w=1e-3, S=None, rescale=True):
"Gaussian distance weighting."
if S is None:
S = set_scale_surgauss(max_r, max_w, min_w)
if rescale:
A = max_w/(norm.pdf(0)-norm.pdf(max_r, scale=S))
weights = A*norm.pdf(dist, scale=S)
else:
A = max_w/norm.pdf(0)
weights = A*norm.pdf(dist, scale=S)
return weights
def dist2weights_surgauss(dist, max_r, max_w=1, min_w=1e-3, S=None,
rescale=True):
"Survival gaussian distance weighting."
if S is None:
S = set_scale_surgauss(max_r, max_w, min_w)
if rescale:
A = max_w/(norm.sf(0, scale=S)-norm.sf(max_r, scale=S))
weights = A*(norm.sf(dist, scale=S)-norm.sf(max_r, scale=S))
else:
A = max_w/norm.sf(0)
weights = A*norm.sf(dist, scale=S)
return weights
def dist2weights_sigmoid(dist, max_r, max_w=1, min_w=1e-3, r_char=0, B=None,
rescale=True):
"Sigmoid-like distance weighting"
C = r_char*max_r
if B is None:
B = set_scale_sigmoid(max_r, max_w, min_w, r_char)
sigmoid = lambda x: 1./(1.+B*np.exp(x+C))
if rescale:
floor_f = sigmoid(max_r)
weights = max_w/(sigmoid(0)-floor_f)*(sigmoid(dist)-floor_f)
else:
weights = 1./(1.+B*np.exp(dist+C))
return weights
###############################################################################
############################# Set scale functions #############################
###############################################################################
def set_scales_kernel(method, max_r, max_w, min_w, r_char=None):
"Switcher function for set scale functions."
if method == 'surgaussian':
scale = set_scale_surgauss(max_r, max_w, min_w)
elif method == 'gaussian':
scale = set_scale_gauss(max_r, max_w, min_w)
elif method == 'sigmoid':
scale = set_scale_sigmoid(max_r, max_w, min_w, r_char)
return scale
def set_scale_surgauss(max_r, max_w, min_w):
"Set the scale factor of the surgauss kernel."
A = max_w/norm.sf(0)
scale = minimize(lambda x: (A*norm.sf(max_r, scale=x)-min_w)**2,
x0=np.array([max_r]), method='BFGS',
tol=1e-8, bounds=(0, None))
scale = scale['x'][0]
return scale
def set_scale_gauss(max_r, max_w, min_w):
"Set the scale factor of the gauss kernel."
A = max_w/norm.pdf(0)
scale = minimize(lambda x: (A*norm.pdf(max_r, scale=x)-min_w)**2,
x0=np.array([max_r]), method='BFGS',
tol=1e-8, bounds=(0, None))
scale = scale['x'][0]
return scale
def set_scale_sigmoid(max_r, max_w, min_w, r_char):
"Set scale for sigmoidal functions."
C = r_char*max_r
sigmoid_c = lambda B: 1./(1.+B*np.exp(max_r+C)) - min_w
B = minimize((sigmoid_c)**2,
x0=np.array([1]), method='BFGS',
tol=1e-8, bounds=(0, None))
return B
###############################################################################
############################# Preparation inputs #############################
###############################################################################
def preparation_parameters(parameters):
"Function to put into coherence the selected parameters."
method = parameters['params']['method']
params = parameters['params']['params']
if method == 'gaussian':
bool_scale = 'S' in params
if not bool_scale:
scale = set_scale_gauss(params['max_r'], params['max_w'],
params['min_w'])
parameters['params']['params']['S'] = scale
elif method == 'surgaussian':
bool_scale = 'S' in params
if not bool_scale:
scale = set_scale_surgauss(params['max_r'], params['max_w'],
params['min_w'])
parameters['params']['params']['S'] = scale
elif method == 'sigmoid':
bool_scale = 'B' in params
if not bool_scale:
scale = set_scale_sigmoid(params['max_r'], params['max_w'],
params['min_w'], params['r_char'])
parameters['params']['params']['B'] = scale
return parameters
| 34.315018 | 79 | 0.570773 |
1190f1b038385208afbc477445817bdebba87dc5 | 561 | py | Python | nexus/pylon/sources/specific/biorxiv.py | leoll2/hyperboria | 30a0ae466b290208f690560160ef1f5c16e4a744 | [
"Unlicense"
] | null | null | null | nexus/pylon/sources/specific/biorxiv.py | leoll2/hyperboria | 30a0ae466b290208f690560160ef1f5c16e4a744 | [
"Unlicense"
] | null | null | null | nexus/pylon/sources/specific/biorxiv.py | leoll2/hyperboria | 30a0ae466b290208f690560160ef1f5c16e4a744 | [
"Unlicense"
] | null | null | null | from typing import AsyncIterable
from nexus.pylon.sources.base import (
DoiSource,
PreparedRequest,
)
class BiorxivSource(DoiSource):
base_url = 'https://dx.doi.org'
async def resolve(self) -> AsyncIterable[PreparedRequest]:
async with self.get_resolve_session() as session:
url = f'{self.base_url}/{self.doi}'
async with session.get(
url,
timeout=self.resolve_timeout
) as resp:
yield PreparedRequest(method='get', url=str(resp.url) + '.full.pdf')
| 28.05 | 84 | 0.616756 |
1193d717cb8b9aa0587bf651757ef62435cc6b62 | 3,645 | py | Python | addLatLon.py | amnh-sciviz/amnh-time-machine | c75c75c6bd3ee91d81cb4b0181a292de27eab9c8 | [
"MIT"
] | null | null | null | addLatLon.py | amnh-sciviz/amnh-time-machine | c75c75c6bd3ee91d81cb4b0181a292de27eab9c8 | [
"MIT"
] | null | null | null | addLatLon.py | amnh-sciviz/amnh-time-machine | c75c75c6bd3ee91d81cb4b0181a292de27eab9c8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
from difflib import SequenceMatcher
import os
from pprint import pprint
import sys
import lib.eac_utils as eac
import lib.io_utils as io
# input
parser = argparse.ArgumentParser()
parser.add_argument('-in', dest="INPUT_FILE", default="data/eac_dates.csv", help="File with EAC data (from collectDates.py)")
parser.add_argument('-countries', dest="COUNTRIES_FILE", default="data/countries.csv", help="File with countries data")
parser.add_argument('-states', dest="STATES_FILE", default="data/states.csv", help="File with states data")
parser.add_argument('-keys', dest="KEYS", default="name,dateplace,dateevent", help="List of keys to check in order of priority (first is highest priority)")
parser.add_argument('-out', dest="OUTPUT_FILE", default="data/eac_expeditions.csv", help="File for output")
a = parser.parse_args()
MIN_MATCH_LEN = 4
placeKeys = a.KEYS.strip().split(",")
keysToAdd = ["lon", "lat", "match"]
# Make sure output dirs exist
io.makeDirectories(a.OUTPUT_FILE)
_, countries = io.readCsv(a.COUNTRIES_FILE)
_, states = io.readCsv(a.STATES_FILE)
# https://stackoverflow.com/questions/18715688/find-common-substring-between-two-strings
def findLongestCommonSubstring(string1, string2):
match = SequenceMatcher(None, string1, string2).find_longest_match(0, len(string1), 0, len(string2))
if match:
return string1[match.a: match.a + match.size]
else:
return None
def listIntersection(a, b):
return list(set(a).intersection(set(b)))
def isValidMatch(candidate, match):
if match is None:
return False
candidate = candidate.lower()
match = match.lower()
valid = True
stopWords = ["and", "the", "to", "of", "united", "american", "island", "islands", "north", "south", "southern", "northern", "east", "west", "eastern", "western", "central", "columbia", "african"]
aList = [word.strip('[]()') for word in candidate.split()]
bList = [word.strip('[]()') for word in match.split()]
intersections = listIntersection(aList, bList)
intersections = [word for word in intersections if word not in stopWords and len(word) > 3]
if len(intersections) <= 0:
valid = False
return valid
def findPlace(value, pool):
value = value.lower()
matches = []
for i, candidate in enumerate(pool):
match = findLongestCommonSubstring(value, candidate["name"].lower())
if isValidMatch(value, candidate["name"]):
matches.append((i,match))
matches = [m for m in matches if len(m[1]) >= MIN_MATCH_LEN]
if len(matches) > 0:
matches = sorted(matches, key=lambda m:-len(m[1]))
place = pool[matches[0][0]]
print("%s = %s" % (value, place["name"]))
return (place["longitude"], place["latitude"], place["name"])
else:
return (None, None, None)
# retrieve expeditions
expeditions = []
fieldNames, eacData = io.readCsv(a.INPUT_FILE)
for key in keysToAdd:
if key not in fieldNames:
fieldNames.append(key)
# eacData = [e for e in eacData if e["type"]=="Expedition"]
entryCount = len(eacData)
for i, entry in enumerate(eacData):
if entry["type"]=="Expedition":
for key in placeKeys:
lon, lat, match = findPlace(entry[key], countries+states)
if match:
eacData[i].update({
"lon": lon,
"lat": lat,
"match": match
})
break
sys.stdout.write('\r')
sys.stdout.write("%s%%" % round(1.0*(i+1)/entryCount*100,2))
sys.stdout.flush()
io.writeCsv(a.OUTPUT_FILE, eacData, fieldNames)
| 35.735294 | 199 | 0.652126 |
119493a292813a2fe3956c473600e488537134ab | 502 | py | Python | Turtle/MovingPen.py | Williano/Tutorials | c5fb841ba94783ac3b239dbd2be9d04dd6e8d962 | [
"MIT"
] | null | null | null | Turtle/MovingPen.py | Williano/Tutorials | c5fb841ba94783ac3b239dbd2be9d04dd6e8d962 | [
"MIT"
] | null | null | null | Turtle/MovingPen.py | Williano/Tutorials | c5fb841ba94783ac3b239dbd2be9d04dd6e8d962 | [
"MIT"
] | null | null | null | # Script: MovingPen.py
# Description: This program uses python's turtle graphics module to draw shapes,lines,
# circles and text.
# Programmer: William Kpabitey Kwabla.
# Date: 27.05.17
# Importing the turtle module.
import turtle
# It moves the pen to (0, 50) from (0, 0).
turtle.goto(0, 50)
# It moves the pen to (50, -50)
turtle.penup()
turtle.goto(50, -50)
turtle.pendown()
# It changes the color to red and draws a circle with radius 50.
turtle.color("red")
turtle.circle(50)
| 20.08 | 86 | 0.695219 |
11958f77466ed28b0ddf34aab10041bc97b2f55d | 912 | py | Python | Solutions/problem07.py | WalrusCow/euler | b5bfa67c87c7043f521cde32e7212c0fffdbacd9 | [
"MIT"
] | null | null | null | Solutions/problem07.py | WalrusCow/euler | b5bfa67c87c7043f521cde32e7212c0fffdbacd9 | [
"MIT"
] | null | null | null | Solutions/problem07.py | WalrusCow/euler | b5bfa67c87c7043f521cde32e7212c0fffdbacd9 | [
"MIT"
] | null | null | null | # Project Euler Problem 7
# Created on: 2012-06-13
# Created by: William McDonald
import math
import time
# Short list of prime numbers under 20
primeList = [2, 3, 5, 7, 9, 11, 13, 17, 19]
# Returns True if n is prime, otherwise False
def isPrime(n):
prime = True
for i in primeList:
if n % i == 0:
prime = False
break
if i > math.floor(math.sqrt(n)):
break
return prime
# Returns the nth prime number
def getPrime(n):
if n < len(primeList):
return primeList[n - 1]
else:
p = primeList[len(primeList) - 1] + 2
while len(primeList) <= n:
if isPrime(p):
primeList.append(p)
p += 2
return primeList[len(primeList) - 1]
start = time.time()
ans = getPrime(10001)
cost = time.time() - start
print(ans)
print("Time: {}".format(cost)) | 24 | 46 | 0.551535 |
119622f084f9dfff43411b649a9c89be1e105982 | 1,820 | py | Python | cogs/ban.py | QuentiumYT/QuentiumBot | 1673d24d93f13f464b1175424529c4d58abb5c00 | [
"MIT"
] | 9 | 2019-11-14T10:12:00.000Z | 2021-12-17T13:05:40.000Z | cogs/ban.py | QuentiumYT/QuentiumBot | 1673d24d93f13f464b1175424529c4d58abb5c00 | [
"MIT"
] | null | null | null | cogs/ban.py | QuentiumYT/QuentiumBot | 1673d24d93f13f464b1175424529c4d58abb5c00 | [
"MIT"
] | 4 | 2020-08-20T21:24:52.000Z | 2021-12-17T13:05:17.000Z | import discord
from discord.ext import commands
from QuentiumBot import HandleData, get_translations
# Basic command configs
cmd_name = "ban"
tran = get_translations()
aliases = [] if not tran[cmd_name]["fr"]["aliases"] else tran[cmd_name]["fr"]["aliases"].split("/")
class BanAdminRights(commands.Cog):
"""Ban command in Administration Rights section"""
def __init__(self, client):
self.client = client
@commands.command(
name=cmd_name,
aliases=aliases,
pass_context=True
)
@commands.guild_only()
async def ban_cmd(self, ctx, *, member: discord.Member = None):
# Get specific server data
if isinstance(ctx.channel, discord.TextChannel):
data = await HandleData.retrieve_data(self, ctx.message.guild)
lang_server = data[0]
else:
lang_server = "en"
cmd_tran = tran[cmd_name][lang_server]
# Doesn't respond to bots
if not ctx.message.author.bot == True:
# Check user perms
if not ctx.message.author.guild_permissions.ban_members:
return await ctx.send(cmd_tran["msg_perm_ban_user"].format(ctx.message.author.name))
# Check bot perms
if not ctx.message.guild.me.guild_permissions.ban_members:
return await ctx.send(cmd_tran["msg_perm_ban_bot"])
# No member, aborting
if not member:
return await ctx.send(cmd_tran["msg_mention_user"].format(ctx.message.author.name))
# Ban the member
await member.ban()
embed = discord.Embed(color=0xFF1111)
embed.description = cmd_tran["msg_user_baned"].format(member.name)
await ctx.send(embed=embed)
def setup(client):
client.add_cog(BanAdminRights(client))
| 36.4 | 100 | 0.637912 |
1197aab149d31aabbae1ade4691c48988b0c9030 | 778 | py | Python | robotframework-ls/src/robotframework_debug_adapter/listeners.py | anton264/robotframework-lsp | 6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | robotframework-ls/src/robotframework_debug_adapter/listeners.py | anton264/robotframework-lsp | 6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-09-30T15:40:29.000Z | 2021-09-30T15:40:29.000Z | robotframework-ls/src/robotframework_debug_adapter/listeners.py | anton264/robotframework-lsp | 6f8f89b88ec56b767f6d5e9cf0d3fb58847e5844 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | class _Callback(object):
def __init__(self):
self._callbacks = []
def register(self, callback):
self._callbacks.append(callback)
def __call__(self, *args, **kwargs):
for c in self._callbacks:
c(*args, **kwargs)
class DebugListener(object):
ROBOT_LISTENER_API_VERSION = 3
on_start_suite = _Callback()
on_end_suite = _Callback()
on_start_test = _Callback()
on_end_test = _Callback()
def start_suite(self, data, result):
self.on_start_suite(data, result)
def end_suite(self, data, result):
self.on_end_suite(data, result)
def start_test(self, data, result):
self.on_start_test(data, result)
def end_test(self, data, result):
self.on_end_test(data, result)
| 24.3125 | 41 | 0.6491 |
119816fffb1e07970d7a5eddf0acda8930b0e4b4 | 3,057 | py | Python | tpp_tensorflow/models/semisparse.py | gfrogat/tpp_tensorflow | 711dd8cc0a8155ce6b6e5663afb2331b55748d30 | [
"MIT"
] | null | null | null | tpp_tensorflow/models/semisparse.py | gfrogat/tpp_tensorflow | 711dd8cc0a8155ce6b6e5663afb2331b55748d30 | [
"MIT"
] | null | null | null | tpp_tensorflow/models/semisparse.py | gfrogat/tpp_tensorflow | 711dd8cc0a8155ce6b6e5663afb2331b55748d30 | [
"MIT"
] | null | null | null | from tensorflow.keras import Model, layers, regularizers
class SemiSparseInput(Model):
def __init__(self, params):
super(SemiSparseInput, self).__init__()
# Correctly handle SELU
dropout = layers.AlphaDropout if params.activation == "selu" else layers.Dropout
kernel_init = (
"lecun_normal" if params.activation == "selu" else params.kernel_init
)
kernel_reg = (
regularizers.l2(params.reg_l2_rate)
if params.reg_l2_rate is not None
else None
)
self.input_dropout_maccs_fp = dropout(
rate=params.input_dropout_rate,
seed=params.input_dropout_seed,
name="input_dropout_maccs_fp",
)
self.input_maccs_fp = layers.Dense(
256,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_maccs_fp",
)
self.input_dropout_rdkit_fp = dropout(
rate=params.input_dropout_rate,
seed=params.input_dropout_seed,
name="input_dropout_rdkit_fp",
)
self.input_rdkit_fp = layers.Dense(
2048,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_rdkit_fp",
)
self.input_dropout_pubchem_fp = dropout(
rate=params.input_dropout_rate,
seed=params.input_dropout_seed,
name="input_dropout_pubchem_fp",
)
self.input_pubchem_fp = layers.Dense(
1024,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_pubchem_fp",
)
self.input_shed = layers.Dense(
8,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_shed",
)
self.input_dropout_cats2d = dropout(
rate=params.input_dropout_rate,
seed=params.input_dropout_seed,
name="input_dropout_cats2d",
)
self.input_cats2d = layers.Dense(
32,
activation=params.activation,
kernel_initializer=kernel_init,
kernel_regularizer=kernel_reg,
name="dense_cats2d",
)
def call(self, features, training=False):
x1 = self.input_dropout_maccs_fp(features["maccs_fp"])
x1 = self.input_maccs_fp(x1)
x2 = self.input_dropout_rdkit_fp(features["rdkit_fp"])
x2 = self.input_rdkit_fp(x2)
x3 = self.input_dropout_pubchem_fp(features["pubchem_fp"])
x3 = self.input_pubchem_fp(x3)
x4 = self.input_shed(features["shed"])
x5 = self.input_dropout_cats2d(features["cats2d"])
x5 = self.input_cats2d(x5)
x = layers.concatenate([x1, x2, x3, x4, x5], axis=1)
return x
| 31.515464 | 88 | 0.59928 |
119876ff369ecd32448c59ea7ad56ae2b54cfef2 | 4,628 | py | Python | AT/neo4j_functions.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | AT/neo4j_functions.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | AT/neo4j_functions.py | seakers/daphne-brain | 1d703d468cd503a21395f986dd72e67b6e556451 | [
"MIT"
] | null | null | null | # Testing for neo4j query functions
from neo4j import GraphDatabase, basic_auth
# setup neo4j database connection
driver = GraphDatabase.driver("bolt://13.58.54.49:7687", auth=basic_auth("neo4j", "goSEAKers!"))
session = driver.session()
# Function that can take the intersection of multiple symptom queries
# Fabricate detection skill output (list of dictionaries)
symptom1, symptom2, symptom3 = {}, {}, {}
symptom1['measurement'], symptom1['relationship'] = 'ppO2', 'Exceeds_LWL'
symptom2['measurement'], symptom2['relationship'] = 'ppCO2', 'Exceeds_UWL'
symptom3['measurement'], symptom3['relationship'] = 'Water Level', 'Exceeds_UWL'
symptoms = [symptom1, symptom2, symptom3]
def diagnose_symptoms_neo4j(symptoms, session):
# build the query based on the symptoms list
query = ''
for id, symp in enumerate(symptoms):
query = query + 'MATCH (m' + str(id) + ':Measurement)-[r' + str(id) + ':' + symp['relationship'] + ']->(g:Anomaly) '
query = query + 'WHERE '
for id, symp in enumerate(symptoms):
if ((id + 1) < len(symptoms)):
query = query + 'm' + str(id) + '.Name=\'' + symp['measurement'] + '\' and '
else:
query = query + 'm' + str(id) + '.Name=\'' + symp['measurement'] + '\' RETURN DISTINCT g.Title'
print(query)
# query the database
result = session.run(query)
diagnosis = [node[0] for node in result]
return diagnosis
print(diagnose_symptoms_neo4j(symptoms, session))
# Function that can take an anomaly or list of anomalies (names) and query the related procedures
def get_related_procedures(anomaly, session):
query = ''
if type(anomaly) is list:
for id, anom in enumerate(anomaly):
query = query + 'MATCH (a' + str(id) + ':Anomaly)-[r' + str(id) + ':Solution]->(p:Procedure) '
query = query + 'WHERE '
for id, anom in enumerate(anomaly):
if ((id + 1) < len(anomaly)):
query = query + 'a' + str(id) + '.Title=\'' + anom + '\' and '
else:
query = query + 'a' + str(id) + '.Title=\'' + anom + '\' RETURN DISTINCT p.Title'
else:
query = query + 'MATCH (a:Anomaly)-[r:Solution]->(p:Procedure) WHERE a.Title=\'' + str(anomaly) + '\' RETURN DISTINCT p.Title'
print(query)
result = session.run(query)
procedures = [node[0] for node in result]
if not procedures:
return None
else:
return procedures
print(get_related_procedures('CDRA Failure', session))
print(get_related_procedures(['CDRA Failure', 'Excess CO2 in Cabin'], session))
# Function that can take a specific procedure and return all steps, substeps, and subsubsteps as an ordered list
def get_procedure_steps(procedure, session, detail=3):
# detail denotes the level of steps to return. (1->steps, 2->steps&substeps, 3->steps&substeps&subsubsteps)
if (detail == 1):
#Return only highest level steps
query1 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' and (s:Step) RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' and (s:Step) RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
elif (detail == 2):
#Return Steps and substeps
query1 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' and (s:Step OR s:SubStep) RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' and (s:Step OR s:SubStep) RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
else:
#Return all steps, substeps, and subsubsteps
query1 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.Title ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
query2 = 'MATCH(p:Procedure)-[r:Has]->(s) WHERE p.Title=\'' + procedure + '\' RETURN s.Action ORDER BY s.Step, s.SubStep, s.SubSubStep, s.Note'
steps = []
result1 = session.run(query1)
step_titles = [node[0] for node in result1]
result2 = session.run(query2)
step_actions = [node[0] for node in result2]
for id, step in enumerate(step_titles):
steps.append(step_titles[id] + ' - ' + step_actions[id])
if not steps:
return None
else:
return steps
print(get_procedure_steps('Zeolite Filter Swapout', session, 3))
'''
# Function that returns the equipment required for a procedure or list of procedures
def get_procedure_equipment(procedure, session):
return equipment
'''
| 46.747475 | 177 | 0.642178 |
119a0cbab86f26fb6ab15f22092ddf49f71f6f94 | 5,486 | py | Python | build.py | refaim/wots | dad9918c603293982a598fb5d6c73ade1a6080e1 | [
"MIT"
] | 2 | 2018-07-14T19:45:38.000Z | 2019-04-21T07:17:20.000Z | build.py | refaim/wots | dad9918c603293982a598fb5d6c73ade1a6080e1 | [
"MIT"
] | 155 | 2018-07-07T00:33:31.000Z | 2021-08-16T17:55:05.000Z | build.py | refaim/wots | dad9918c603293982a598fb5d6c73ade1a6080e1 | [
"MIT"
] | null | null | null | import datetime
import math
import os
import sys
import PyQt5
import dotenv
from PyInstaller.archive.pyz_crypto import PyiBlockCipher
from PyInstaller.building.api import PYZ, EXE, COLLECT
from PyInstaller.building.build_main import Analysis
from app import version
from app.core.utils import OsUtils, PathUtils
APP_NAME = 'wizard'
PROJECT_ROOT = os.path.dirname(os.path.realpath(__file__))
BUILD_DIRECTORY = os.path.join(PROJECT_ROOT, 'build')
DISTR_DIRECTORY = os.path.join(PROJECT_ROOT, 'dist', APP_NAME)
RESOURCES_DIRECTORY = os.path.join(PROJECT_ROOT, 'res')
BINARY_RESOURCE_EXTENSIONS = {'.png'}
dotenv.load_dotenv(os.path.join(PROJECT_ROOT, '.env'))
extra_path = []
app_version_file = None
app_version_ints = None
if OsUtils.is_windows():
extra_path.append(os.path.join(os.path.dirname(PyQt5.__file__), 'Qt', 'bin'))
extra_path.append(os.path.dirname(sys.executable))
if OsUtils.is_win10():
for program_files_var in ['ProgramFiles', 'ProgramFiles(x86)']:
for arch in ['x86', 'x64']:
dll_path = os.path.join(os.getenv(program_files_var), 'Windows Kits\\10\\Redist\\ucrt\\DLLs', arch)
if os.path.isdir(dll_path):
extra_path.append(dll_path)
app_version = version.VERSION
app_version_ints = [int(x) for x in app_version.split('.')]
while len(app_version_ints) < 4:
app_version_ints.append(0)
app_version_file = os.path.join(BUILD_DIRECTORY, 'exe_version.txt')
with open(os.path.join(PROJECT_ROOT, 'exe_version.template.txt')) as version_template_fobj:
with open(app_version_file, 'w') as version_fobj:
version_fobj.write(version_template_fobj.read().format(
version_string=str(app_version),
version_tuple=tuple(app_version_ints),
current_year=datetime.datetime.today().year))
txt_resources = []
if os.path.exists('.env'):
txt_resources.append(('.env', '.'))
bin_resources = []
for filename in os.listdir(RESOURCES_DIRECTORY):
target = txt_resources
if os.path.splitext(filename)[1] in BINARY_RESOURCE_EXTENSIONS:
target = bin_resources
target.append((os.path.join(RESOURCES_DIRECTORY, filename), os.path.relpath(RESOURCES_DIRECTORY, PROJECT_ROOT)))
block_cipher = None
cipher_key = os.getenv('PYINSTALLER_CIPHER_KEY')
if cipher_key:
block_cipher = PyiBlockCipher(key=cipher_key)
a = Analysis([os.path.join(PROJECT_ROOT, 'app', 'wizard.py')],
pathex=extra_path, binaries=bin_resources, datas=txt_resources, hiddenimports=[], hookspath=[],
runtime_hooks=[], excludes=[], win_no_prefer_redirects=True, win_private_assemblies=True,
cipher=block_cipher)
pyz = PYZ(a.pure, a.zipped_data, cipher=block_cipher)
exe = EXE(pyz, a.scripts, exclude_binaries=True, name=APP_NAME, debug=False, strip=False, upx=False, console=False, version=app_version_file)
COLLECT(exe, a.binaries, a.zipfiles, a.datas, strip=False, upx=False, name=APP_NAME)
if OsUtils.is_windows():
nsis_license = os.path.join(BUILD_DIRECTORY, 'license.txt')
with open(os.path.join(PROJECT_ROOT, 'LICENSE')) as src_license_fobj:
with open(nsis_license, 'w') as dst_license_fobj:
dst_license_fobj.write(src_license_fobj.read().replace('\n', '\r\n'))
with open(os.path.join(PROJECT_ROOT, 'setup.template.nsi')) as nsis_template_fobj:
config = nsis_template_fobj.read()
distr_directories = []
distr_files = []
for root, dirs, files in os.walk(DISTR_DIRECTORY):
for dir_name in dirs:
distr_directories.append(os.path.join(root, dir_name))
for file_name in files:
distr_files.append(os.path.join(root, file_name))
def make_inst_path(path: str) -> str:
return PathUtils.quote(os.path.join('$INSTDIR', os.path.relpath(path, DISTR_DIRECTORY)))
def add_command(commands: list, command: str) -> None:
commands.append((' ' * 4) + command)
indent = ' ' * 4
install_commands = []
for path in distr_directories:
add_command(install_commands, 'CreateDirectory {}'.format(make_inst_path(path)))
for path in distr_files:
add_command(install_commands, 'File {} {}'.format(PathUtils.quote('/oname={}'.format(os.path.relpath(path, DISTR_DIRECTORY))), PathUtils.quote(path)))
uninstall_commands = []
for path in distr_files:
add_command(uninstall_commands, 'Delete {}'.format(make_inst_path(path)))
for path in reversed(distr_directories):
add_command(uninstall_commands, 'RMDir {}'.format(make_inst_path(path)))
arch = 'x64' if OsUtils.is_x64() else 'x86'
NSIS_VARS = {
'%license_file%': os.path.basename(nsis_license),
'%version_major%': str(app_version_ints[0]),
'%version_minor%': str(app_version_ints[1]),
'%version_build%': str(app_version_ints[2]),
'%install_size_kb%': str(math.ceil(PathUtils.get_folder_size(DISTR_DIRECTORY) / 1024)),
'%program_arch%': arch,
'%exe_name%': APP_NAME,
'%setup_name%': 'WizardOfTheSearch_v{}_Setup_{}'.format(version.VERSION, arch),
'%distr_directory%': DISTR_DIRECTORY,
'%install_commands%': '\r\n'.join(install_commands),
'%uninstall_commands%': '\r\n'.join(uninstall_commands),
}
for k, v in NSIS_VARS.items():
config = config.replace(k, v)
with open(os.path.join(BUILD_DIRECTORY, 'setup.nsi'), 'w') as nsis_fobj:
nsis_fobj.write(config)
| 42.527132 | 158 | 0.693584 |
119a349c2ca5822591f4b6677156eec1b27631d0 | 1,939 | py | Python | server/constants.py | chrononyan/ok | 1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf | [
"Apache-2.0"
] | 148 | 2018-07-03T02:08:30.000Z | 2022-03-26T04:03:35.000Z | server/constants.py | chrononyan/ok | 1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf | [
"Apache-2.0"
] | 856 | 2015-01-10T04:27:20.000Z | 2018-06-27T14:43:23.000Z | server/constants.py | chrononyan/ok | 1c83e419dd8d5ef64c1e03a7f8a218e65a9fb7cf | [
"Apache-2.0"
] | 69 | 2015-01-26T08:06:55.000Z | 2018-06-25T12:46:03.000Z | """App constants"""
import os
STUDENT_ROLE = 'student'
GRADER_ROLE = 'grader'
STAFF_ROLE = 'staff'
INSTRUCTOR_ROLE = 'instructor'
LAB_ASSISTANT_ROLE = 'lab assistant'
ROLE_DISPLAY_NAMES = {
STUDENT_ROLE: 'Student',
GRADER_ROLE: 'Reader',
STAFF_ROLE: 'Teaching Assistant',
INSTRUCTOR_ROLE: 'Instructor',
LAB_ASSISTANT_ROLE: 'Lab Assistant',
}
VALID_ROLES = [STUDENT_ROLE, LAB_ASSISTANT_ROLE, GRADER_ROLE, STAFF_ROLE,
INSTRUCTOR_ROLE]
STAFF_ROLES = [GRADER_ROLE, STAFF_ROLE, INSTRUCTOR_ROLE]
SCORE_KINDS = ['composition', 'correctness', 'effort', 'total', 'partner a', 'partner b',
'regrade', 'revision', 'checkpoint 1', 'checkpoint 2',
'private', 'autograder', 'error']
API_PREFIX = '/api'
OAUTH_SCOPES = ['all', 'email']
OAUTH_OUT_OF_BAND_URI = 'urn:ietf:wg:oauth:2.0:oob'
COMMON_LANGUAGES = ['python', 'java', 'c', 'scheme', 'lisp', 'javascript']
COURSE_ENDPOINT_FORMAT = '^[\w\-]+/[\w\-]+/(fa|sp|su|wi|au|yr)\d\d$'
ASSIGNMENT_ENDPOINT_FORMAT = COURSE_ENDPOINT_FORMAT[:-1] + '/\w+$'
GRADES_BUCKET = 'ok_grades_bucket'
TIMEZONE = 'America/Los_Angeles'
ISO_DATETIME_FMT = '%Y-%m-%d %H:%M:%S'
APPLICATION_ROOT = os.getenv('APPLICATION_ROOT', '/')
# The default autograder url
# Each course can configure their own autograder url in course.edit view
AUTOGRADER_URL = os.getenv('AUTOGRADER_URL', 'https://autograder.cs61a.org')
SENDGRID_KEY = os.getenv("SENDGRID_KEY")
FORBIDDEN_ROUTE_NAMES = [
'about',
'admin',
'api',
'comments',
'login',
'logout',
'oauth',
'rq',
'testing-login',
]
FORBIDDEN_ASSIGNMENT_NAMES = []
# Service Providers
GOOGLE = "GOOGLE"
MICROSOFT = "MICROSOFT"
# Maximum file size to show in browser, in characters
DIFF_SIZE_LIMIT = 64 * 1024 # 64KB
SOURCE_SIZE_LIMIT = 10 * 1024 * 1024 # 10MB
MAX_UPLOAD_FILE_SIZE = 25 * 1024 * 1024 # 25MB
# Email client format for to field
EMAIL_FORMAT = "{name} <{email}>"
| 28.101449 | 89 | 0.684373 |
119ba308325a28e7115e3336760ea5459e34bcae | 31,721 | py | Python | SRTG-Scheduler/SRTG-ResultAnalysis/scripts/aperiodic-jobs-resultAnalyzer.py | kiritigowda/RTG-scheduler | 4aa3d66e011e6a0d16e19719f940c5cc0a6559ba | [
"MIT"
] | 2 | 2021-10-15T12:00:51.000Z | 2021-11-23T04:50:58.000Z | SRTG-Scheduler/SRTG-ResultAnalysis/scripts/aperiodic-jobs-resultAnalyzer.py | kiritigowda/RTG-scheduler | 4aa3d66e011e6a0d16e19719f940c5cc0a6559ba | [
"MIT"
] | 45 | 2018-01-24T15:38:11.000Z | 2020-10-31T19:50:19.000Z | SRTG-Scheduler/SRTG-ResultAnalysis/scripts/aperiodic-jobs-resultAnalyzer.py | kiritigowda/RTG-scheduler | 4aa3d66e011e6a0d16e19719f940c5cc0a6559ba | [
"MIT"
] | 2 | 2018-05-23T17:13:44.000Z | 2020-09-18T15:06:17.000Z | # Copyright (c) 2017 - 2020 Kiriti Nagesh Gowda, Inc. All rights reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import collections
import random
import os
import sys
import argparse
import csv
from datetime import date
__author__ = "Kiriti Nagesh Gowda"
__copyright__ = "Copyright 2018 - 2020, Kiriti Nagesh Gowda - SRTG-Scheduler"
__license__ = "MIT"
__version__ = "1.0.1"
__maintainer__ = "Kiriti Nagesh Gowda"
__email__ = "Kiritigowda@gmail.com"
__status__ = "Shipping"
# import arguments
parser = argparse.ArgumentParser()
parser.add_argument('--input_directory', type=str, default='',
help='Directory - RTGS_summary directory')
parser.add_argument('--output_directory', type=str, default='',
help='Directory - directory to save results')
parser.add_argument('--results_filename', type=str, default='',
help='Results File prefix - results .html file prefix')
args = parser.parse_args()
inputDirectory = args.input_directory
outputDirectory = args.output_directory
fileName = args.results_filename
if inputDirectory == '' or outputDirectory == '' or fileName == '':
print("ERROR - NO Arguments Passed, use --h option")
exit()
if not os.path.exists(inputDirectory):
print("ERROR Invalid Input Directory")
exit()
if not os.path.exists(outputDirectory):
os.makedirs(outputDirectory)
row_count = 0
row_count_1 = 0
row_count_2 = 0
row_count_3 = 0
row_count_4 = 0
row_count_5 = 0
with open(inputDirectory+'/RTGS-Mode-1-Summary.csv') as mode1:
reader_1 = csv.reader(mode1)
next(reader_1)
data_1 = [r for r in reader_1]
row_count_1 = len(data_1)
with open(inputDirectory+'/RTGS-Mode-2-Summary.csv') as mode2:
reader_2 = csv.reader(mode2)
next(reader_2)
data_2 = [r for r in reader_2]
row_count_2 = len(data_2)
with open(inputDirectory+'/RTGS-Mode-3-Summary.csv') as mode3:
reader_3 = csv.reader(mode3)
next(reader_3)
data_3 = [r for r in reader_3]
row_count_3 = len(data_3)
with open(inputDirectory+'/RTGS-Mode-4-Summary.csv') as mode4:
reader_4 = csv.reader(mode4)
next(reader_4)
data_4 = [r for r in reader_4]
row_count_4 = len(data_4)
with open(inputDirectory+'/RTGS-Mode-5-Summary.csv') as mode5:
reader_5 = csv.reader(mode5)
next(reader_5)
data_5 = [r for r in reader_5]
row_count_5 = len(data_5)
if row_count_1 != row_count_2 or row_count_2 != row_count_3 or row_count_3 != row_count_4 or row_count_4 != row_count_5:
print("ERROR: Number of entries in Summary File are different")
exit()
else:
row_count = row_count_1
# help print
print("\nSRTG-ResultAnalysis - Aperiodic Job Result Accumulator and Analyzer V-"+__version__+"\n")
# date
today = date.today()
dateCreated = today.strftime("%b-%d-%Y")
# output accum file
orig_stdout = sys.stdout
result_accum_1 = outputDirectory+'/mode-1-accum-results.csv'
result_accum_2 = outputDirectory+'/mode-2-accum-results.csv'
result_accum_3 = outputDirectory+'/mode-3-accum-results.csv'
result_accum_4 = outputDirectory+'/mode-4-accum-results.csv'
result_accum_5 = outputDirectory+'/mode-5-accum-results.csv'
if not os.path.isfile(result_accum_1):
sys.stdout = open(result_accum_1, 'w+')
print'AVG Lambda, AVG Jobs Released, AVG Jobs Accepted, AVG Jobs Accepted Percentage, \
Avg GCUs Requested - Accepted Jobs, Avg Exec Time - Accepted Jobs, Avg Response by Execution Time, \
Avg Response by Relative deadline, AVG Total GPU Usage Time - Accepted Jobs, AVG Total GPU Usage Time Requested - All Jobs, \
Avg Scheduler OverHead - Accepted Jobs, Avg Scheduler OverHead - All Jobs, Num Job Sets, '+dateCreated
if not os.path.isfile(result_accum_2):
sys.stdout = open(result_accum_2, 'w+')
print'AVG Lambda, AVG Jobs Released, AVG Jobs Accepted, AVG Jobs Accepted Percentage, \
Avg GCUs Requested - Accepted Jobs, Avg Exec Time - Accepted Jobs, Avg Response by Execution Time, \
Avg Response by Relative deadline, AVG Total GPU Usage Time - Accepted Jobs, AVG Total GPU Usage Time Requested - All Jobs, \
Avg Scheduler OverHead - Accepted Jobs, Avg Scheduler OverHead - All Jobs, Num Job Sets, '+dateCreated
if not os.path.isfile(result_accum_3):
sys.stdout = open(result_accum_3, 'w+')
print'AVG Lambda, AVG Jobs Released, AVG Jobs Accepted, AVG Jobs Accepted Percentage, \
Avg GCUs Requested - Accepted Jobs, Avg Exec Time - Accepted Jobs, Avg Response by Execution Time, \
Avg Response by Relative deadline, AVG Total GPU Usage Time - Accepted Jobs, AVG Total GPU Usage Time Requested - All Jobs, \
Avg Scheduler OverHead - Accepted Jobs, Avg Scheduler OverHead - All Jobs, Num Job Sets, '+dateCreated
if not os.path.isfile(result_accum_4):
sys.stdout = open(result_accum_4, 'w+')
print'AVG Lambda, AVG Jobs Released, AVG Jobs Accepted, AVG Jobs Accepted Percentage, \
Avg GCUs Requested - Accepted Jobs, Avg Exec Time - Accepted Jobs, Avg Response by Execution Time, \
Avg Response by Relative deadline, AVG Total GPU Usage Time - Accepted Jobs, AVG Total GPU Usage Time Requested - All Jobs, \
Avg Scheduler OverHead - Accepted Jobs, Avg Scheduler OverHead - All Jobs, Num Job Sets, '+dateCreated
if not os.path.isfile(result_accum_5):
sys.stdout = open(result_accum_5, 'w+')
print'AVG Lambda, AVG Jobs Released, AVG Jobs Accepted, AVG Jobs Accepted Percentage, \
Avg GCUs Requested - Accepted Jobs, Avg Exec Time - Accepted Jobs, Avg Response by Execution Time, \
Avg Response by Relative deadline, AVG Total GPU Usage Time - Accepted Jobs, AVG Total GPU Usage Time Requested - All Jobs, \
Avg Scheduler OverHead - Accepted Jobs, Avg Scheduler OverHead - All Jobs, Num Job Sets, '+dateCreated
# HTML File
html_output_file = outputDirectory+'/'+fileName+'-SchedulerResults.html'
sys.stdout = open(html_output_file, 'w+')
# HTML Header
print"<html>"
print"\t<head>"
print"\t\t<script type=\"text/javascript\" src=\"https://www.gstatic.com/charts/loader.js\"></script>"
print"\n"
# Google Charts Script
print"\t\t<script type=\"text/javascript\">"
print"\n"
# Jobs Accepted for GPU Schedule
print"\t\t\tgoogle.charts.load('current', {packages: ['corechart', 'line']});"
print"\t\t\tgoogle.charts.setOnLoadCallback(jobScheduledGraph);"
print"\t\t\tfunction jobScheduledGraph() {"
print"\t\t\tvar data = new google.visualization.DataTable();"
print"\t\t\tdata.addColumn('number', 'X');"
print"\t\t\tdata.addColumn('number', 'Mode 1');"
print"\t\t\tdata.addColumn('number', 'Mode 2');"
print"\t\t\tdata.addColumn('number', 'Mode 3');"
print"\t\t\tdata.addColumn('number', 'Mode 4');"
print"\t\t\tdata.addColumn('number', 'Mode 5');"
print"\t\t\tdata.addRows(["
for x in range(row_count):
if(x < row_count-1):
print '\t\t\t\t['+str(x)+','+str(data_1[x][2])+','+str(data_2[x][2])+','+str(data_3[x][2])+','+str(data_4[x][2])+','+str(data_5[x][2])+'],'
else:
print '\t\t\t\t['+str(x)+','+str(data_1[x][2])+','+str(data_2[x][2])+','+str(data_3[x][2])+','+str(data_4[x][2])+','+str(data_5[x][2])+']'
print"\t\t\t]);"
print"\t\t\tvar options = { title:'Average Jobs Accepted for GPU Schedule', \
titleTextStyle: { fontSize: 28, bold: true}, \
hAxis:{ title: 'JobSet ID', titleTextStyle: { fontSize: 24, bold: true}, marginTop: '5'}, \
vAxis:{ title: 'Number of Jobs Scheduled', titleTextStyle:{ fontSize: 24, bold: true} }, \
series:{ 0:{lineDashStyle: [1, 1]}, 1:{lineDashStyle: [2, 2]}, 2:{lineDashStyle: [4, 4]}, 3:{lineDashStyle: [5, 1, 3] }, 4:{ lineDashStyle: [5, 5]}}, \
legend:{ position: 'top', alignment: 'center', textStyle:{ fontSize: 26}}, \
width:1600, height:1000 };"
print"\t\t\tvar chart = new google.visualization.LineChart(document.getElementById('jobScheduled_chart'));"
print"\t\t\tchart.draw(data, options);}"
print"\n\n\n"
# Job Accepted Percentage for GPU Schedule
print"\t\t\tgoogle.charts.load('current', {packages: ['corechart', 'line']});"
print"\t\t\tgoogle.charts.setOnLoadCallback(jobScheduledPercentageGraph);"
print"\t\t\tfunction jobScheduledPercentageGraph() {"
print"\t\t\tvar data = new google.visualization.DataTable();"
print"\t\t\tdata.addColumn('number', 'X');"
print"\t\t\tdata.addColumn('number', 'Mode 1');"
print"\t\t\tdata.addColumn('number', 'Mode 2');"
print"\t\t\tdata.addColumn('number', 'Mode 3');"
print"\t\t\tdata.addColumn('number', 'Mode 4');"
print"\t\t\tdata.addColumn('number', 'Mode 5');"
print"\t\t\tdata.addRows(["
for x in range(row_count):
if(x < row_count-1):
print '\t\t\t\t['+str(x)+','+str(data_1[x][3])+','+str(data_2[x][3])+','+str(data_3[x][3])+','+str(data_4[x][3])+','+str(data_5[x][3])+'],'
else:
print '\t\t\t\t['+str(x)+','+str(data_1[x][3])+','+str(data_2[x][3])+','+str(data_3[x][3])+','+str(data_4[x][3])+','+str(data_5[x][3])+']'
print"\t\t\t]);"
print"\t\t\tvar options = { title:'Average Jobs Accepted Percentage for GPU Schedule', \
titleTextStyle: { fontSize: 28, bold: true}, \
hAxis:{ title: 'JobSet ID', titleTextStyle: { fontSize: 24, bold: true}, marginTop: '5'}, \
vAxis:{ title: 'Avg Jobs Scheduled %', titleTextStyle:{ fontSize: 24, bold: true}, minValue: 0, maxValue: 100 }, \
series:{ 0:{lineDashStyle: [1, 1]}, 1:{lineDashStyle: [2, 2]}, 2:{lineDashStyle: [4, 4]}, 3:{lineDashStyle: [5, 1, 3] }, 4:{ lineDashStyle: [5, 5]}}, \
legend:{ position: 'top', alignment: 'center', textStyle:{ fontSize: 26}}, \
width:1600, height:1000 };"
print"\t\t\tvar chart = new google.visualization.LineChart(document.getElementById('JobScheduledPercentage_chart'));"
print"\t\t\tchart.draw(data, options);}"
print"\n\n\n"
# Average Response by Execution Time
print"\t\t\tgoogle.charts.load('current', {packages: ['corechart', 'line']});"
print"\t\t\tgoogle.charts.setOnLoadCallback(avgResponseTimeGraph);"
print"\t\t\tfunction avgResponseTimeGraph() {"
print"\t\t\tvar data = new google.visualization.DataTable();"
print"\t\t\tdata.addColumn('number', 'X');"
print"\t\t\tdata.addColumn('number', 'Mode 1');"
print"\t\t\tdata.addColumn('number', 'Mode 2');"
print"\t\t\tdata.addColumn('number', 'Mode 3');"
print"\t\t\tdata.addColumn('number', 'Mode 4');"
print"\t\t\tdata.addColumn('number', 'Mode 5');"
print"\t\t\tdata.addRows(["
for x in range(row_count):
if(x < row_count-1):
print '\t\t\t\t['+str(x)+','+str(data_1[x][6])+','+str(data_2[x][6])+','+str(data_3[x][6])+','+str(data_4[x][6])+','+str(data_5[x][6])+'],'
else:
print '\t\t\t\t['+str(x)+','+str(data_1[x][6])+','+str(data_2[x][6])+','+str(data_3[x][6])+','+str(data_4[x][6])+','+str(data_5[x][6])+']'
print"\t\t\t]);"
print"\t\t\tvar options = { title:'Average Response by Execution Time', hAxis: { title: 'JobSet ID'}, vAxis: {title: 'Response by Execution Time'}, series: { 0.01: {curveType: 'function'} }, width:1600, height:1000 };"
print"\t\t\tvar chart = new google.visualization.LineChart(document.getElementById('responseByExecTime_chart'));"
print"\t\t\tchart.draw(data, options);}"
print"\n\n\n"
# Average Response by Relative Deadline
print"\t\t\tgoogle.charts.load('current', {packages: ['corechart', 'line']});"
print"\t\t\tgoogle.charts.setOnLoadCallback(avgResponseFactorGraph);"
print"\t\t\tfunction avgResponseFactorGraph() {"
print"\t\t\tvar data = new google.visualization.DataTable();"
print"\t\t\tdata.addColumn('number', 'X');"
print"\t\t\tdata.addColumn('number', 'Mode 1');"
print"\t\t\tdata.addColumn('number', 'Mode 2');"
print"\t\t\tdata.addColumn('number', 'Mode 3');"
print"\t\t\tdata.addColumn('number', 'Mode 4');"
print"\t\t\tdata.addColumn('number', 'Mode 5');"
print"\t\t\tdata.addRows(["
for x in range(row_count):
if(x < row_count-1):
print '\t\t\t\t['+str(x)+','+str(data_1[x][7])+','+str(data_2[x][7])+','+str(data_3[x][7])+','+str(data_4[x][7])+','+str(data_5[x][7])+'],'
else:
print '\t\t\t\t['+str(x)+','+str(data_1[x][7])+','+str(data_2[x][7])+','+str(data_3[x][7])+','+str(data_4[x][7])+','+str(data_5[x][7])+']'
print"\t\t\t]);"
print"\t\t\tvar options = { title:'Average Response by Relative Deadline', hAxis: { title: 'JobSet ID'}, vAxis: {title: 'Response by Relative Deadline'}, series: { 0.01: {curveType: 'function'} }, width:1600, height:1000 };"
print"\t\t\tvar chart = new google.visualization.LineChart(document.getElementById('responseByRelativeDeadline_chart'));"
print"\t\t\tchart.draw(data, options);}"
print"\n\n\n"
# GPU Usage Time for Jobs Accepted
print"\t\t\tgoogle.charts.load('current', {packages: ['corechart', 'line']});"
print"\t\t\tgoogle.charts.setOnLoadCallback(GPUUsagePercentageGraph);"
print"\t\t\tfunction GPUUsagePercentageGraph() {"
print"\t\t\tvar data = new google.visualization.DataTable();"
print"\t\t\tdata.addColumn('number', 'X');"
print"\t\t\tdata.addColumn('number', 'Mode 1');"
print"\t\t\tdata.addColumn('number', 'Mode 2');"
print"\t\t\tdata.addColumn('number', 'Mode 3');"
print"\t\t\tdata.addColumn('number', 'Mode 4');"
print"\t\t\tdata.addColumn('number', 'Mode 5');"
print"\t\t\tdata.addRows(["
for x in range(row_count):
if(x < row_count-1):
print '\t\t\t\t['+str(x)+','+str(data_1[x][8])+','+str(data_2[x][8])+','+str(data_3[x][8])+','+str(data_4[x][8])+','+str(data_5[x][8])+'],'
else:
print '\t\t\t\t['+str(x)+','+str(data_1[x][8])+','+str(data_2[x][8])+','+str(data_3[x][8])+','+str(data_4[x][8])+','+str(data_5[x][8])+']'
print"\t\t\t]);"
print"\t\t\tvar options = { title:'GPU Usage Jobs Accepted', hAxis: { title: 'JobSet ID'}, vAxis: {title: 'GPU Usage Jobs Accepted'}, series: { 0.01: {curveType: 'function'} }, width:1600, height:1000 };"
print"\t\t\tvar chart = new google.visualization.LineChart(document.getElementById('GPUUsage_accepted_chart'));"
print"\t\t\tchart.draw(data, options);}"
print"\n\n\n"
# GPU Usage Requested by all jobs
print"\t\t\tgoogle.charts.load('current', {packages: ['corechart', 'line']});"
print"\t\t\tgoogle.charts.setOnLoadCallback(GPUUsageGraph);"
print"\t\t\tfunction GPUUsageGraph() {"
print"\t\t\tvar data = new google.visualization.DataTable();"
print"\t\t\tdata.addColumn('number', 'X');"
print"\t\t\tdata.addColumn('number', 'Mode 1');"
print"\t\t\tdata.addColumn('number', 'Mode 2');"
print"\t\t\tdata.addColumn('number', 'Mode 3');"
print"\t\t\tdata.addColumn('number', 'Mode 4');"
print"\t\t\tdata.addColumn('number', 'Mode 5');"
print"\t\t\tdata.addRows(["
for x in range(row_count):
if(x < row_count-1):
print '\t\t\t\t['+str(x)+','+str(data_1[x][9])+','+str(data_2[x][9])+','+str(data_3[x][9])+','+str(data_4[x][9])+','+str(data_5[x][9])+'],'
else:
print '\t\t\t\t['+str(x)+','+str(data_1[x][9])+','+str(data_2[x][9])+','+str(data_3[x][9])+','+str(data_4[x][9])+','+str(data_5[x][9])+']'
print"\t\t\t]);"
print"\t\t\tvar options = { title:'Total GPU Usage Requested', hAxis: { title: 'JobSet ID'}, vAxis: {title: 'Total GPU Usage Requested'}, series: { 0.01: {curveType: 'function'} }, width:1600, height:1000 };"
print"\t\t\tvar chart = new google.visualization.LineChart(document.getElementById('GPUUsage_requested_chart'));"
print"\t\t\tchart.draw(data, options);}"
print"\n\n\n"
print"\t\t</script>"
print"\t</head>"
# Result Body
print"\t<body>"
# Summary of results
print'\t\t<br><br><h1><center>SRTG-ResultAnalysis: A-Periodic Job Schedule Summary</center></h2><br>'
print'\t\t<br><br><h3><center>Created on: '+dateCreated+'</center></h3><br>'
print"\t\t<table align=\"center\" style=\"width: 95%\">"
print"\t\t\t<tr>"
print"\t\t\t\t<td><center></center></td>"
print"\t\t\t\t<td><center>AVG Jobs Released</center></td>"
print"\t\t\t\t<td><center>AVG Jobs Accepted</center></td>"
print"\t\t\t\t<td><center>AVG Jobs Accepted Percentage</center></td>"
print"\t\t\t\t<td><center>Avg GCUs Requested - Accepted Jobs</center></td>"
print"\t\t\t\t<td><center>Avg Exec Time - Accepted Jobs</center></td>"
print"\t\t\t\t<td><center>Avg Response by Execution Time</center></td>"
print"\t\t\t\t<td><center>Avg Response by Relative deadline</center></td>"
print"\t\t\t\t<td><center>AVG Total GPU Usage Time - Accepted Jobs</center></td>"
print"\t\t\t\t<td><center>AVG Total GPU Usage Time Requested - All Jobs</center></td>"
print"\t\t\t\t<td><center>Avg Scheduler OverHead - Accepted Jobs</center></td>"
print"\t\t\t\t<td><center>Avg Scheduler OverHead - All Jobs</center></td>"
print"\t\t\t</tr>"
# Mode 1
avgJobsAccepted = 0
avgJobs = 0
avgProc = 0
avgExec = 0
totalGPUUsage = 0
avgResponseTime = 0
avgResponseFactor = 0
GPUUsagePercentage = 0
avgJobPercentage = 0
GPUScheduleOverhead = 0
AvgSchedulerOverhead = 0
avgReleaseLambda = 0
for x in range(row_count):
avgJobs = avgJobs + int(data_1[x][0])
avgReleaseLambda = avgReleaseLambda + float(data_1[x][1])
avgJobsAccepted = avgJobsAccepted + float(data_1[x][2])
avgJobPercentage = avgJobPercentage + float(data_1[x][3])
avgProc = avgProc + float(data_1[x][4])
avgExec = avgExec + float(data_1[x][5])
avgResponseTime = avgResponseTime + float(data_1[x][6])
avgResponseFactor = avgResponseFactor + float(data_1[x][7])
GPUUsagePercentage = GPUUsagePercentage + float(data_1[x][8])
totalGPUUsage = totalGPUUsage + float(data_1[x][9])
GPUScheduleOverhead = GPUScheduleOverhead + float(data_1[x][10])
AvgSchedulerOverhead = AvgSchedulerOverhead + float(data_1[x][11])
avgJobsAccepted = float(avgJobsAccepted)/row_count
avgJobs = float(avgJobs)/row_count
avgProc = float(avgProc)/row_count
avgExec = float(avgExec)/row_count
totalGPUUsage = float(totalGPUUsage)/row_count
avgResponseTime = float(avgResponseTime)/row_count
avgResponseFactor = float(avgResponseFactor)/row_count
GPUUsagePercentage = float(GPUUsagePercentage)/row_count
avgJobPercentage = float(avgJobPercentage)/row_count
GPUScheduleOverhead = float(GPUScheduleOverhead)/row_count
AvgSchedulerOverhead = float(AvgSchedulerOverhead)/row_count
avgReleaseLambda = float(avgReleaseLambda)/row_count
# accum results
sys.stdout = open(result_accum_1, 'a')
print(str(avgReleaseLambda)+', '+str(avgJobs)+', '+str(avgJobsAccepted)+', '+str(avgJobPercentage)+', '
+ str(avgProc)+', '+str(avgExec)+', ' +
str(avgResponseTime)+', '+str(avgResponseFactor)+', '
+ str(GPUUsagePercentage)+', '+str(totalGPUUsage)+', '
+ str(GPUScheduleOverhead)+','+str(AvgSchedulerOverhead)+','+str(row_count))
sys.stdout = open(html_output_file, 'a')
print"\t\t\t<tr>"
print"\t\t\t\t<td><center>Mode 1</center></td>"
print'\t\t\t\t<td><center>'+str(avgJobs)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobsAccepted)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobPercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgProc)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgExec)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseTime)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseFactor)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUUsagePercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(totalGPUUsage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUScheduleOverhead)+'</center></td>'
print'\t\t\t\t<td><center>'+str(AvgSchedulerOverhead)+'</center></td>'
print"\t\t\t</tr>"
# Mode 2
avgJobsAccepted = 0
avgJobs = 0
avgProc = 0
avgExec = 0
totalGPUUsage = 0
avgResponseTime = 0
avgResponseFactor = 0
GPUUsagePercentage = 0
avgJobPercentage = 0
GPUScheduleOverhead = 0
AvgSchedulerOverhead = 0
for x in range(row_count):
avgJobsAccepted = avgJobsAccepted + float(data_2[x][2])
avgJobPercentage = avgJobPercentage + float(data_2[x][3])
avgJobs = avgJobs + int(data_2[x][0])
avgProc = avgProc + float(data_2[x][4])
avgExec = avgExec + float(data_2[x][5])
avgResponseTime = avgResponseTime + float(data_2[x][6])
avgResponseFactor = avgResponseFactor + float(data_2[x][7])
GPUUsagePercentage = GPUUsagePercentage + float(data_2[x][8])
totalGPUUsage = totalGPUUsage + float(data_2[x][9])
GPUScheduleOverhead = GPUScheduleOverhead + float(data_2[x][10])
AvgSchedulerOverhead = AvgSchedulerOverhead + float(data_2[x][11])
avgJobsAccepted = float(avgJobsAccepted)/row_count
avgJobs = float(avgJobs)/row_count
avgProc = float(avgProc)/row_count
avgExec = float(avgExec)/row_count
totalGPUUsage = float(totalGPUUsage)/row_count
avgResponseTime = float(avgResponseTime)/row_count
avgResponseFactor = float(avgResponseFactor)/row_count
GPUUsagePercentage = float(GPUUsagePercentage)/row_count
avgJobPercentage = float(avgJobPercentage)/row_count
GPUScheduleOverhead = float(GPUScheduleOverhead)/row_count
AvgSchedulerOverhead = float(AvgSchedulerOverhead)/row_count
# accum results
sys.stdout = open(result_accum_2, 'a')
print(str(avgReleaseLambda)+', '+str(avgJobs)+', '+str(avgJobsAccepted)+', '+str(avgJobPercentage)+', '
+ str(avgProc)+', '+str(avgExec)+', ' +
str(avgResponseTime)+', '+str(avgResponseFactor)+', '
+ str(GPUUsagePercentage)+', '+str(totalGPUUsage)+', '
+ str(GPUScheduleOverhead)+','+str(AvgSchedulerOverhead)+','+str(row_count))
sys.stdout = open(html_output_file, 'a')
print"\t\t\t<tr>"
print"\t\t\t\t<td><center>Mode 2</center></td>"
print'\t\t\t\t<td><center>'+str(avgJobs)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobsAccepted)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobPercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgProc)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgExec)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseTime)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseFactor)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUUsagePercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(totalGPUUsage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUScheduleOverhead)+'</center></td>'
print'\t\t\t\t<td><center>'+str(AvgSchedulerOverhead)+'</center></td>'
print"\t\t\t</tr>"
# Mode 3
avgJobsAccepted = 0
avgJobs = 0
avgProc = 0
avgExec = 0
totalGPUUsage = 0
avgResponseTime = 0
avgResponseFactor = 0
GPUUsagePercentage = 0
avgJobPercentage = 0
GPUScheduleOverhead = 0
AvgSchedulerOverhead = 0
for x in range(row_count):
avgJobsAccepted = avgJobsAccepted + float(data_3[x][2])
avgJobs = avgJobs + int(data_3[x][0])
avgProc = avgProc + float(data_3[x][4])
avgExec = avgExec + float(data_3[x][5])
totalGPUUsage = totalGPUUsage + float(data_3[x][9])
avgResponseTime = avgResponseTime + float(data_3[x][6])
avgResponseFactor = avgResponseFactor + float(data_3[x][7])
GPUUsagePercentage = GPUUsagePercentage + float(data_3[x][8])
avgJobPercentage = avgJobPercentage + float(data_3[x][3])
GPUScheduleOverhead = GPUScheduleOverhead + float(data_3[x][10])
AvgSchedulerOverhead = AvgSchedulerOverhead + float(data_3[x][11])
avgJobsAccepted = float(avgJobsAccepted)/row_count
avgJobs = float(avgJobs)/row_count
avgProc = float(avgProc)/row_count
avgExec = float(avgExec)/row_count
totalGPUUsage = float(totalGPUUsage)/row_count
avgResponseTime = float(avgResponseTime)/row_count
avgResponseFactor = float(avgResponseFactor)/row_count
GPUUsagePercentage = float(GPUUsagePercentage)/row_count
avgJobPercentage = float(avgJobPercentage)/row_count
GPUScheduleOverhead = float(GPUScheduleOverhead)/row_count
AvgSchedulerOverhead = float(AvgSchedulerOverhead)/row_count
# accum results
sys.stdout = open(result_accum_3, 'a')
print(str(avgReleaseLambda)+', '+str(avgJobs)+', '+str(avgJobsAccepted)+', '+str(avgJobPercentage)+', '
+ str(avgProc)+', '+str(avgExec)+', ' +
str(avgResponseTime)+', '+str(avgResponseFactor)+', '
+ str(GPUUsagePercentage)+', '+str(totalGPUUsage)+', '
+ str(GPUScheduleOverhead)+','+str(AvgSchedulerOverhead)+','+str(row_count))
sys.stdout = open(html_output_file, 'a')
print"\t\t\t<tr>"
print"\t\t\t\t<td><center>Mode 3</center></td>"
print'\t\t\t\t<td><center>'+str(avgJobs)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobsAccepted)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobPercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgProc)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgExec)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseTime)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseFactor)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUUsagePercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(totalGPUUsage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUScheduleOverhead)+'</center></td>'
print'\t\t\t\t<td><center>'+str(AvgSchedulerOverhead)+'</center></td>'
print"\t\t\t</tr>"
# Mode 4
avgJobsAccepted = 0
avgJobs = 0
avgProc = 0
avgExec = 0
totalGPUUsage = 0
avgResponseTime = 0
avgResponseFactor = 0
GPUUsagePercentage = 0
avgJobPercentage = 0
GPUScheduleOverhead = 0
AvgSchedulerOverhead = 0
for x in range(row_count):
avgJobsAccepted = avgJobsAccepted + float(data_4[x][2])
avgJobs = avgJobs + int(data_4[x][0])
avgProc = avgProc + float(data_4[x][4])
avgExec = avgExec + float(data_4[x][5])
totalGPUUsage = totalGPUUsage + float(data_4[x][9])
avgResponseTime = avgResponseTime + float(data_4[x][6])
avgResponseFactor = avgResponseFactor + float(data_4[x][7])
GPUUsagePercentage = GPUUsagePercentage + float(data_4[x][8])
avgJobPercentage = avgJobPercentage + float(data_4[x][3])
GPUScheduleOverhead = GPUScheduleOverhead + float(data_4[x][10])
AvgSchedulerOverhead = AvgSchedulerOverhead + float(data_4[x][11])
avgJobsAccepted = float(avgJobsAccepted)/row_count
avgJobs = float(avgJobs)/row_count
avgProc = float(avgProc)/row_count
avgExec = float(avgExec)/row_count
totalGPUUsage = float(totalGPUUsage)/row_count
avgResponseTime = float(avgResponseTime)/row_count
avgResponseFactor = float(avgResponseFactor)/row_count
GPUUsagePercentage = float(GPUUsagePercentage)/row_count
avgJobPercentage = float(avgJobPercentage)/row_count
GPUScheduleOverhead = float(GPUScheduleOverhead)/row_count
AvgSchedulerOverhead = float(AvgSchedulerOverhead)/row_count
# accum results
sys.stdout = open(result_accum_4, 'a')
print(str(avgReleaseLambda)+', '+str(avgJobs)+', '+str(avgJobsAccepted)+', '+str(avgJobPercentage)+', '
+ str(avgProc)+', '+str(avgExec)+', ' +
str(avgResponseTime)+', '+str(avgResponseFactor)+', '
+ str(GPUUsagePercentage)+', '+str(totalGPUUsage)+', '
+ str(GPUScheduleOverhead)+','+str(AvgSchedulerOverhead)+','+str(row_count))
sys.stdout = open(html_output_file, 'a')
print"\t\t\t<tr>"
print"\t\t\t\t<td><center>Mode 4</center></td>"
print'\t\t\t\t<td><center>'+str(avgJobs)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobsAccepted)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobPercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgProc)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgExec)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseTime)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseFactor)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUUsagePercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(totalGPUUsage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUScheduleOverhead)+'</center></td>'
print'\t\t\t\t<td><center>'+str(AvgSchedulerOverhead)+'</center></td>'
print"\t\t\t</tr>"
# Mode 5
avgJobsAccepted = 0
avgJobs = 0
avgProc = 0
avgExec = 0
totalGPUUsage = 0
avgResponseTime = 0
avgResponseFactor = 0
GPUUsagePercentage = 0
avgJobPercentage = 0
GPUScheduleOverhead = 0
AvgSchedulerOverhead = 0
for x in range(row_count):
avgJobs = avgJobs + int(data_5[x][0])
avgJobsAccepted = avgJobsAccepted + float(data_5[x][2])
avgJobPercentage = avgJobPercentage + float(data_5[x][3])
avgProc = avgProc + float(data_5[x][4])
avgExec = avgExec + float(data_5[x][5])
avgResponseTime = avgResponseTime + float(data_5[x][6])
avgResponseFactor = avgResponseFactor + float(data_5[x][7])
GPUUsagePercentage = GPUUsagePercentage + float(data_5[x][8])
totalGPUUsage = totalGPUUsage + float(data_5[x][9])
GPUScheduleOverhead = GPUScheduleOverhead + float(data_5[x][10])
AvgSchedulerOverhead = AvgSchedulerOverhead + float(data_5[x][11])
avgJobsAccepted = float(avgJobsAccepted)/row_count
avgJobs = float(avgJobs)/row_count
avgProc = float(avgProc)/row_count
avgExec = float(avgExec)/row_count
totalGPUUsage = float(totalGPUUsage)/row_count
avgResponseTime = float(avgResponseTime)/row_count
avgResponseFactor = float(avgResponseFactor)/row_count
GPUUsagePercentage = float(GPUUsagePercentage)/row_count
avgJobPercentage = float(avgJobPercentage)/row_count
GPUScheduleOverhead = float(GPUScheduleOverhead)/row_count
AvgSchedulerOverhead = float(AvgSchedulerOverhead)/row_count
# accum results
sys.stdout = open(result_accum_5, 'a')
print(str(avgReleaseLambda)+', '+str(avgJobs)+', '+str(avgJobsAccepted)+', '+str(avgJobPercentage)+', '
+ str(avgProc)+', '+str(avgExec)+', ' +
str(avgResponseTime)+', '+str(avgResponseFactor)+', '
+ str(GPUUsagePercentage)+', '+str(totalGPUUsage)+', '
+ str(GPUScheduleOverhead)+','+str(AvgSchedulerOverhead)+','+str(row_count))
sys.stdout = open(html_output_file, 'a')
print"\t\t\t<tr>"
print"\t\t\t\t<td><center>Mode 5</center></td>"
print'\t\t\t\t<td><center>'+str(avgJobs)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobsAccepted)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgJobPercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgProc)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgExec)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseTime)+'</center></td>'
print'\t\t\t\t<td><center>'+str(avgResponseFactor)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUUsagePercentage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(totalGPUUsage)+'</center></td>'
print'\t\t\t\t<td><center>'+str(GPUScheduleOverhead)+'</center></td>'
print'\t\t\t\t<td><center>'+str(AvgSchedulerOverhead)+'</center></td>'
print"\t\t\t</tr>"
print"\t\t</table>"
# Release time Lambda
print'\t\t<br><br><h2><center> Avg Release Time Lambda:'+str(avgReleaseLambda)+'</center></h2><br>'
# Google Charts
print"\t\t<center><div id=\"JobScheduledPercentage_chart\" style=\"border: 1px solid #ccc\"></div></center>"
print"\t\t<center><div id=\"jobScheduled_chart\" style=\"border: 1px solid #ccc\"></div></center>"
print"\t\t<center><div id=\"GPUUsage_accepted_chart\" style=\"border: 1px solid #ccc\"></div></center>"
print"\t\t<center><div id=\"GPUUsage_requested_chart\" style=\"border: 1px solid #ccc\"></div></center>"
print"\t\t<center><div id=\"responseByExecTime_chart\" style=\"border: 1px solid #ccc\"></div></center>"
print"\t\t<center><div id=\"responseByRelativeDeadline_chart\" style=\"border: 1px solid #ccc\"></div></center>"
print"\t</body>"
print"</html>"
| 49.027821 | 225 | 0.694871 |
119bf6a968d6fb73503071ca0eb21d892d8e76bd | 342 | py | Python | legacy_codes/app/my_exception.py | gomjellie/SoongSiri | 251a8259f47e3f0a7b42faa20ea9223f125a42a2 | [
"MIT"
] | 8 | 2018-09-22T05:29:51.000Z | 2021-11-17T03:23:23.000Z | legacy_codes/app/my_exception.py | gomjellie/SoongSiri | 251a8259f47e3f0a7b42faa20ea9223f125a42a2 | [
"MIT"
] | 7 | 2017-12-30T08:02:32.000Z | 2020-08-26T01:59:20.000Z | legacy_codes/app/my_exception.py | gomjellie/SoongSiri | 251a8259f47e3f0a7b42faa20ea9223f125a42a2 | [
"MIT"
] | 3 | 2018-11-13T14:04:04.000Z | 2019-09-01T18:53:11.000Z |
class FoodNotFound(Exception):
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
return "식단을 불러올 수 없습니다.\n {}".format(self.msg)
class FoodRateDuplicate(Exception):
def __init__(self, msg=None):
self.msg = msg
def __str__(self):
return "이미 평가한 항목입니다.\n {}".format(self.msg)
| 19 | 54 | 0.614035 |
119cfe73411d8df272a8a84fca4c6e3deff92e8f | 939 | py | Python | estudiantes/funciones.py | tabris2015/intro_python_data_science | 784f0b35988676248c4ba6c7999164dfb51ee631 | [
"MIT"
] | null | null | null | estudiantes/funciones.py | tabris2015/intro_python_data_science | 784f0b35988676248c4ba6c7999164dfb51ee631 | [
"MIT"
] | null | null | null | estudiantes/funciones.py | tabris2015/intro_python_data_science | 784f0b35988676248c4ba6c7999164dfb51ee631 | [
"MIT"
] | null | null | null | def registrar_estudiantes(n):
# registro de datos
estudiantes = []
for i in range(n):
estudiante = {}
print(f'estudiante {i+1}:')
estudiante['nombre'] = input('nombre del estudiante: ')
in_edad = input('edad del estudiante: ')
while not in_edad.isdigit():
print('ingrese una edad valida')
in_edad = input('edad del estudiante: ')
estudiante['edad'] = int(in_edad)
estudiantes.append(estudiante)
return estudiantes
def guardar_estudiantes(estudiantes, archivo='estudiantes.txt'):
# abrir un archivo y almacenar todos los datos
# estudiante = {
# 'nombre': 'dfalkjasdf',
# 'edad': 123123
# }
# estudiante["nombre"] => 'dfalkjasdf'
with open(archivo, 'w') as f:
for est in estudiantes:
fila = est['nombre'] + ',' + str(est['edad']) + '\n'
print(fila)
f.write(fila)
| 31.3 | 64 | 0.57295 |
119d8e43e77baac65ef5c6b64d016a19f85843ba | 6,741 | py | Python | malaya/_utils/_softmax_class.py | leowmjw/Malaya | 33f39835eca08c238d2dd68aeca3b09c5d0a45ab | [
"MIT"
] | null | null | null | malaya/_utils/_softmax_class.py | leowmjw/Malaya | 33f39835eca08c238d2dd68aeca3b09c5d0a45ab | [
"MIT"
] | null | null | null | malaya/_utils/_softmax_class.py | leowmjw/Malaya | 33f39835eca08c238d2dd68aeca3b09c5d0a45ab | [
"MIT"
] | null | null | null | import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter('ignore')
import json
import os
import pickle
from ._utils import check_file, load_graph, check_available, generate_session
from ..stem import _classification_textcleaning_stemmer
from .._models._sklearn_model import (
BINARY_XGB,
BINARY_BAYES,
MULTICLASS_XGB,
MULTICLASS_BAYES,
)
from .._models._tensorflow_model import (
BINARY_SOFTMAX,
MULTICLASS_SOFTMAX,
SPARSE_SOFTMAX,
BINARY_BERT,
MULTICLASS_BERT,
)
def sparse_deep_model(
path,
s3_path,
class_name,
label,
output_size,
embedded_size = 128,
model = 'fast-text-char',
validate = True,
):
if not isinstance(model, str):
raise ValueError('model must be a string')
model = model.lower()
if model == 'fast-text-char':
if validate:
check_file(path[model], s3_path[model])
else:
if not check_available(path[model]):
raise Exception(
'%s/%s is not available, please `validate = True`'
% (class_name, model)
)
try:
with open(path[model]['vector'], 'rb') as fopen:
vector = pickle.load(fopen)
return SPARSE_SOFTMAX(
path = os.path.dirname(path[model]['model']),
vectorizer = vector,
label = label,
output_size = output_size,
embedded_size = embedded_size,
vocab_size = len(vector.vocabulary_),
)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/%s') and try again"
% (class_name, model)
)
else:
raise Exception(
'model subjectivity not supported, please check supported models from malaya.%s.available_sparse_deep_model()'
% (class_name)
)
def deep_model(
path, s3_path, class_name, label, model = 'luong', validate = True
):
if validate:
check_file(path[model], s3_path[model])
else:
if not check_available(path[model]):
raise Exception(
'%s/%s is not available, please `validate = True`'
% (class_name, model)
)
try:
with open(path[model]['setting'], 'r') as fopen:
dictionary = json.load(fopen)['dictionary']
g = load_graph(path[model]['model'])
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/%s') and try again"
% (class_name, model)
)
if len(label) > 2:
selected_class = MULTICLASS_SOFTMAX
else:
selected_class = BINARY_SOFTMAX
return selected_class(
X = g.get_tensor_by_name('import/Placeholder:0'),
logits = g.get_tensor_by_name('import/logits:0'),
logits_seq = g.get_tensor_by_name('import/logits_seq:0'),
alphas = g.get_tensor_by_name('import/alphas:0'),
sess = generate_session(graph = g),
dictionary = dictionary,
class_name = class_name,
label = label,
)
def multinomial(path, s3_path, class_name, label, validate = True):
if validate:
check_file(path['multinomial'], s3_path['multinomial'])
else:
if not check_available(path['multinomial']):
raise Exception(
'%s/multinomial is not available, please `validate = True`'
% (class_name)
)
try:
with open(path['multinomial']['model'], 'rb') as fopen:
multinomial = pickle.load(fopen)
with open(path['multinomial']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/multinomial') and try again"
% (class_name)
)
if len(label) > 2:
selected_class = MULTICLASS_BAYES
else:
selected_class = BINARY_BAYES
return selected_class(
multinomial = multinomial,
label = label,
vectorize = vectorize,
cleaning = _classification_textcleaning_stemmer,
)
def xgb(path, s3_path, class_name, label, validate = True):
if validate:
check_file(path['xgb'], s3_path['xgb'])
else:
if not check_available(path['xgb']):
raise Exception(
'%s/xgb is not available, please `validate = True`'
% (class_name)
)
try:
with open(path['xgb']['model'], 'rb') as fopen:
xgb = pickle.load(fopen)
with open(path['xgb']['vector'], 'rb') as fopen:
vectorize = pickle.load(fopen)
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/xgb') and try again"
% (class_name)
)
if len(label) > 2:
selected_class = MULTICLASS_XGB
else:
selected_class = BINARY_XGB
return selected_class(
xgb = xgb,
label = label,
vectorize = vectorize,
cleaning = _classification_textcleaning_stemmer,
)
def bert(path, s3_path, class_name, label, validate = True):
try:
from bert import tokenization
except:
raise Exception(
'bert-tensorflow not installed. Please install it using `pip3 install bert-tensorflow` and try again.'
)
if validate:
check_file(path['bert'], s3_path['bert'])
else:
if not check_available(path['bert']):
raise Exception(
'%s/bert is not available, please `validate = True`'
% (class_name)
)
tokenization.validate_case_matches_checkpoint(False, '')
tokenizer = tokenization.FullTokenizer(
vocab_file = path['bert']['vocab'], do_lower_case = False
)
try:
g = load_graph(path['bert']['model'])
except:
raise Exception(
"model corrupted due to some reasons, please run malaya.clear_cache('%s/bert') and try again"
% (class_name)
)
if len(label) > 2:
selected_class = MULTICLASS_BERT
else:
selected_class = BINARY_BERT
return selected_class(
X = g.get_tensor_by_name('import/Placeholder:0'),
segment_ids = g.get_tensor_by_name('import/Placeholder_1:0'),
input_masks = g.get_tensor_by_name('import/Placeholder_2:0'),
logits = g.get_tensor_by_name('import/logits:0'),
sess = generate_session(graph = g),
tokenizer = tokenizer,
maxlen = 100,
label = label,
)
| 30.640909 | 122 | 0.58745 |
119ea88acc92144d46ec6203fbc4f95f170a347e | 1,580 | py | Python | src/otest/test_setup.py | rohe/otest | 8983db8abfa63eda4e8a35bbe193ac80793c14bb | [
"Apache-2.0"
] | 2 | 2016-08-26T07:42:19.000Z | 2017-09-06T02:13:02.000Z | src/otest/test_setup.py | rohe/otest | 8983db8abfa63eda4e8a35bbe193ac80793c14bb | [
"Apache-2.0"
] | 3 | 2017-06-15T06:07:18.000Z | 2018-06-28T07:43:21.000Z | src/otest/test_setup.py | rohe/otest | 8983db8abfa63eda4e8a35bbe193ac80793c14bb | [
"Apache-2.0"
] | 5 | 2016-07-22T21:38:40.000Z | 2019-04-05T19:20:23.000Z | from oidctest.op import func
from oidctest.op import oper
from oidctest.op.client import Client
from oidctest.session import SessionHandler
from otest.aus.handling_ph import WebIh
from otest.conf_setup import OP_ORDER
from otest.conversation import Conversation
from otest.events import Events
from otest.flow import FlowState
from otest.prof_util import ProfileHandler
from oic.oic.message import factory
from oic.oic.message import ProviderConfigurationResponse
from oic.utils.authn.client import CLIENT_AUTHN_METHOD
def setup_conv():
entity = Client(client_authn_method=CLIENT_AUTHN_METHOD,
verify_ssl=False)
entity.provider_info = ProviderConfigurationResponse(
authorization_endpoint="https://example.com",
)
cls_factories = {'': oper.factory}
func_factory = func.factory
flow_state = FlowState('flows', profile_handler=ProfileHandler,
cls_factories=cls_factories,
func_factory=func_factory,
display_order=OP_ORDER)
iss = 'https://example.org'
tag = 'foobar'
session_handler = SessionHandler(iss, tag,
flows=flow_state,
tool_conf={}) # , rest=rest, **webenv)
session_handler.iss = iss
session_handler.tag = tag
info = WebIh(session=session_handler, profile_handler=ProfileHandler)
conv = Conversation([], entity, factory, callback_uris=[])
conv.events = Events()
conv.tool_config = {}
return {'conv': conv, 'io': info}
| 34.347826 | 76 | 0.681646 |
119efd61f102f9d7b866310597894dc025bd5e5a | 466 | py | Python | AlgoritimoRandomize.py | falluk/algoritimoDeBuscas | 6dbca79ef60f2820f5e81110bc4104bdc46496b1 | [
"MIT"
] | 1 | 2021-07-05T13:24:04.000Z | 2021-07-05T13:24:04.000Z | AlgoritimoRandomize.py | falluk/algoritimoDeBuscas | 6dbca79ef60f2820f5e81110bc4104bdc46496b1 | [
"MIT"
] | null | null | null | AlgoritimoRandomize.py | falluk/algoritimoDeBuscas | 6dbca79ef60f2820f5e81110bc4104bdc46496b1 | [
"MIT"
] | null | null | null | #Criado para randomizar uma lsita de 20mil Colaboradores retornando apenas 1000 colaboradores vários cargos distintos.
import pandas as pd
import random
base = pd.read_excel("usuarios - energisa.xlsx", encoding="ISO-8859-1",error_bad_lines=False)
sort1 = base.sample(15000)
sort2 = sort1.sample(10000)
sort3 = sort2.sample(7500)
sort4 = sort3.sample(5000)
sort5 = sort4.sample(2500)
sorteado = sort5.sample(1000)
sorteado.to_excel("Lista Randomizada.xlsx")
| 22.190476 | 118 | 0.776824 |
11a158752080d596792054d55693dc41df752af9 | 7,625 | py | Python | app.py | Build-Week-2106FT-AirBnB-3/front-end | 0df6f9814387a36002a1aaa8feff1f17fcb30b78 | [
"CC0-1.0"
] | null | null | null | app.py | Build-Week-2106FT-AirBnB-3/front-end | 0df6f9814387a36002a1aaa8feff1f17fcb30b78 | [
"CC0-1.0"
] | 1 | 2021-06-24T00:17:40.000Z | 2021-06-24T00:18:42.000Z | app.py | Build-Week-2106FT-AirBnB-3/pricing | 0df6f9814387a36002a1aaa8feff1f17fcb30b78 | [
"CC0-1.0"
] | null | null | null | # -*- coding: utf-8 -*-
# https://towardsdatascience.com/build-a-machine-learning-simulation-tool-with-dash-b3f6fd512ad6
# We start with the import of standard ML librairies
import pandas as pd
import numpy as np
from sklearn.datasets import make_regression
from sklearn.ensemble import RandomForestRegressor
# We add all Plotly and Dash necessary librairies
import plotly.graph_objects as go
import pickle
import dash
import dash_core_components as dcc
import dash_html_components as html
import dash_daq as daq
from dash.dependencies import Input, Output
from sklearn.pipeline import make_pipeline
from sklearn.impute import SimpleImputer
from category_encoders import OneHotEncoder
from sklearn.model_selection import train_test_split
df = pd.read_csv('clean_data.csv')
# importing our model
# target='price'
# X = df.drop(columns=target)
# y = df[target]
# # Let's split into a test and
# X_train, X_t, y_train, y_t = train_test_split(X,y, test_size=.2, random_state=7)
# # Let's split our test data into validation and test
# X_val, X_test, y_val, y_test = train_test_split(X_t,y_t, test_size=.2, random_state=7)
# model = make_pipeline(OneHotEncoder(use_cat_names=True),
# SimpleImputer(),
# RandomForestRegressor(random_state=70))
# model.fit(X_train,y_train)
infile = open('random_forest_model', 'rb')
model = pickle.load(infile)
infile.close()
# # # We create a DataFrame to store the features' importance and their corresponding label
f_impor = model.named_steps['randomforestregressor'].feature_importances_
col_names = model.named_steps['onehotencoder'].get_feature_names()
df_feature_importances = pd.DataFrame(f_impor, columns=["Importance"], index=col_names)
df_feature_importances = df_feature_importances.sort_values("Importance", ascending=False).head(10)
# Create the bar chart and limit it to the top 10 features
# # We create a Features Importance Bar Chart
fig_features_importance = go.Figure()
fig_features_importance.add_trace(go.Bar(x=df_feature_importances.index,
y=df_feature_importances["Importance"],
marker_color='rgb(171, 226, 251)')
)
fig_features_importance.update_layout(title_text='<b>Features Importance of the model<b>', title_x=0.5)
# The command below can be activated in a standard notebook to display the chart
# fig_features_importance.show()
# We record the name, min, mean and max of the three most important features
dropdown_1_label = df_feature_importances.index[0]
dropdown_1_min = round(df[dropdown_1_label].min(),5)
dropdown_1_mean = round(df[dropdown_1_label].mean(),5)
dropdown_1_max = round(df[dropdown_1_label].max(),5)
dropdown_2_label = df_feature_importances.index[1]
dropdown_2_min = round(df[dropdown_2_label].min(),5)
dropdown_2_mean = round(df[dropdown_2_label].mean(),5)
dropdown_2_max = round(df[dropdown_2_label].max(),5)
dropdown_3_label = df_feature_importances.index[5]
dropdown_3_min = round(df[dropdown_3_label].min(),5)
dropdown_3_mean = round(df[dropdown_3_label].mean(),5)
dropdown_3_max = round(df[dropdown_3_label].max(),5)
###############################################################################
app = dash.Dash()
server = app.server
# The page structure will be:
# Features Importance Chart
# <H4> Feature #1 name
# Slider to update Feature #1 value
# <H4> Feature #2 name
# Slider to update Feature #2 value
# <H4> Feature #3 name
# Slider to update Feature #3 value
# <H2> Updated Prediction
# Callback fuction with Sliders values as inputs and Prediction as Output
# We apply basic HTML formatting to the layout
app.layout = html.Div(style={'textAlign': 'center', 'width': '800px', 'font-family': 'Verdana'},
children=[
# The same logic is applied to the following names / sliders
html.H1(children="Simulation Tool"),
#Dash Graph Component calls the fig_features_importance parameters
dcc.Graph(figure=fig_features_importance),
# We display the most important feature's name
html.H4(children=dropdown_1_label),
# The Dash Slider is built according to Feature #1 ranges
dcc.Slider(
id='X1_slider',
min=dropdown_1_min,
max=dropdown_1_max,
step=0.029311,
value=dropdown_1_mean,
marks={i: '{}°'.format(i) for i in np.arange(dropdown_1_min, dropdown_1_max)}
),
# The same logic is applied to the following names / sliders
html.H4(children=dropdown_2_label),
dcc.Slider(
id='X2_slider',
min=dropdown_2_min,
max=dropdown_2_max,
step=0.080384,
value=dropdown_2_mean,
marks={i: '{}°'.format(i) for i in np.arange(dropdown_2_min, dropdown_2_max)}
),
html.H4(children=dropdown_3_label),
dcc.Slider(
id='X3_slider',
min=dropdown_3_min,
max=dropdown_3_max,
step=0.6,
value=dropdown_3_mean,
marks={i: '{}people'.format(i) for i in np.arange(dropdown_2_min, dropdown_2_max)},
),
# The prediction result will be displayed and updated here
html.H2(id="prediction_result")
])
# The callback function will provide one "Ouput" in the form of a string (=children)
@app.callback(Output(component_id="prediction_result",component_property="children"),
# The values correspnding to the three sliders are obtained by calling their id and value property
[Input("X1_slider","value"), Input("X2_slider","value"), Input("X3_slider","value")])
# The input variable are set in the same order as the callback Inputs
def update_prediction(X1, X2, X3):
# We create a NumPy array in the form of the original features
# ["Pressure","Viscosity","Particles_size", "Temperature","Inlet_flow", "Rotating_Speed","pH","Color_density"]
# Except for the X1, X2 and X3, all other non-influencing parameters are set to their mean
input_X = np.array([258668827,
1,
1,
1,
X1,
X2,
X3,
df["bedrooms"].mean(),
df['beds'].mean(),
df['number_of_reviews'].mean(),
df["review_scores_rating"].mean(),
1,
1]).reshape(1, -1)
# Prediction is calculated based on the input_X array
prediction = model.named_steps['randomforestregressor'].predict(input_X)
# And retuned to the Output of the callback function
return "Prediction in Yen: {}".format(round(prediction[0]))
# return 'this is working'
if __name__ == "__main__":
app.run_server()
| 39.921466 | 114 | 0.6 |
11a170a7d9d4be2747fc2f885fdbdea415898a39 | 356 | py | Python | others/python/exceptions/except.py | bt3gl/Resources-Numerical_Methods_for_Physics | 8668215f107230fafd9bdeb0061d353328cf03e8 | [
"Apache-2.0"
] | 17 | 2019-10-28T03:13:07.000Z | 2020-11-21T17:38:06.000Z | others/python/exceptions/except.py | bt3gl/Resources-Numerical_Methods_for_Physics | 8668215f107230fafd9bdeb0061d353328cf03e8 | [
"Apache-2.0"
] | null | null | null | others/python/exceptions/except.py | bt3gl/Resources-Numerical_Methods_for_Physics | 8668215f107230fafd9bdeb0061d353328cf03e8 | [
"Apache-2.0"
] | 5 | 2020-05-09T07:55:32.000Z | 2020-12-12T11:05:42.000Z | # try openning a file for reading
try: f = open("file.txt", "r")
except IOError:
print "I/O Error"
# undefined variable
x = 1.0
try: x + y
except NameError:
print "undefined variable"
# example from tutorial
def this_fails():
x = 1/0
try:
this_fails()
except ZeroDivisionError as detail:
print 'Handling run-time error:', detail
| 14.833333 | 44 | 0.671348 |
11a1d2a8f067924755b1bb004f5652117e69edcd | 1,787 | py | Python | balloon_learning_environment/env/gym.py | johannah/balloon-learning-environment | cdb2e582f2b03c41f037bf76142d31611f5e0316 | [
"Apache-2.0"
] | 64 | 2021-11-09T08:49:02.000Z | 2022-03-30T17:33:54.000Z | balloon_learning_environment/env/gym.py | johannah/balloon-learning-environment | cdb2e582f2b03c41f037bf76142d31611f5e0316 | [
"Apache-2.0"
] | null | null | null | balloon_learning_environment/env/gym.py | johannah/balloon-learning-environment | cdb2e582f2b03c41f037bf76142d31611f5e0316 | [
"Apache-2.0"
] | 5 | 2021-11-14T18:56:42.000Z | 2022-03-18T16:22:31.000Z | # coding=utf-8
# Copyright 2022 The Balloon Learning Environment Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Balloon Learning Environment gym utilities."""
import contextlib
def register_env() -> None:
"""Register the Gym environment."""
# We need to import Gym's registration module inline or else we'll
# get a circular dependency that will result in an error when importing gym
from gym.envs import registration # pylint: disable=g-import-not-at-top
env_id = 'BalloonLearningEnvironment-v0'
env_entry_point = 'balloon_learning_environment.env.balloon_env:BalloonEnv'
# We guard registration by checking if our env is already registered
# This is necesarry because the plugin system will load our module
# which also calls this function. If multiple `register()` calls are
# made this will result in a warning to the user.
registered = env_id in registration.registry.env_specs
if not registered:
with contextlib.ExitStack() as stack:
# This is a workaround for Gym 0.21 which didn't support
# registering into the root namespace with the plugin system.
if hasattr(registration, 'namespace'):
stack.enter_context(registration.namespace(None))
registration.register(id=env_id, entry_point=env_entry_point)
| 43.585366 | 77 | 0.758254 |
11a1e1b730dc4e433d8e1358594ee3d9a8526d1b | 15,181 | py | Python | models/multimodal_transformer.py | XiaoJake/MTTR | c383c5b151e3c97aeb45cd2fb4bf08719016498b | [
"Apache-2.0"
] | 516 | 2021-11-30T03:22:41.000Z | 2022-03-31T19:48:59.000Z | models/multimodal_transformer.py | codwest/MTTR | c383c5b151e3c97aeb45cd2fb4bf08719016498b | [
"Apache-2.0"
] | 15 | 2021-12-07T02:43:24.000Z | 2022-03-27T15:59:32.000Z | models/multimodal_transformer.py | codwest/MTTR | c383c5b151e3c97aeb45cd2fb4bf08719016498b | [
"Apache-2.0"
] | 57 | 2021-11-30T08:49:51.000Z | 2022-03-25T19:41:08.000Z | """
MTTR Multimodal Transformer class.
Modified from DETR https://github.com/facebookresearch/detr
"""
import copy
import os
from typing import Optional
import torch
import torch.nn.functional as F
from torch import nn, Tensor
from einops import rearrange, repeat
from transformers import RobertaModel, RobertaTokenizerFast
from models.position_encoding_2d import PositionEmbeddingSine2D
os.environ["TOKENIZERS_PARALLELISM"] = "false" # this disables a huggingface tokenizer warning (printed every epoch)
class MultimodalTransformer(nn.Module):
def __init__(self, num_encoder_layers=3, num_decoder_layers=3,
text_encoder_type="roberta-base", freeze_text_encoder=True, **kwargs):
super().__init__()
self.d_model = kwargs['d_model']
encoder_layer = TransformerEncoderLayer(**kwargs)
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers)
decoder_layer = TransformerDecoderLayer(**kwargs)
self.decoder = TransformerDecoder(decoder_layer, num_decoder_layers, norm=nn.LayerNorm(self.d_model),
return_intermediate=True)
self.pos_encoder_2d = PositionEmbeddingSine2D()
self._reset_parameters()
self.text_encoder = RobertaModel.from_pretrained(text_encoder_type)
self.text_encoder.pooler = None # this pooler is never used, this is a hack to avoid DDP problems...
self.tokenizer = RobertaTokenizerFast.from_pretrained(text_encoder_type)
self.freeze_text_encoder = freeze_text_encoder
if freeze_text_encoder:
for p in self.text_encoder.parameters():
p.requires_grad_(False)
self.txt_proj = FeatureResizer(
input_feat_size=self.text_encoder.config.hidden_size,
output_feat_size=self.d_model,
dropout=kwargs['dropout'],
)
def _reset_parameters(self):
for p in self.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
def forward(self, vid_embeds, vid_pad_mask, text_queries, obj_queries):
device = vid_embeds.device
t, b, _, h, w = vid_embeds.shape
txt_memory, txt_pad_mask = self.forward_text(text_queries, device)
# add temporal dim to txt memory & padding mask:
txt_memory = repeat(txt_memory, 's b c -> s (t b) c', t=t)
txt_pad_mask = repeat(txt_pad_mask, 'b s -> (t b) s', t=t)
vid_embeds = rearrange(vid_embeds, 't b c h w -> (h w) (t b) c')
# Concat the image & text embeddings on the sequence dimension
encoder_src_seq = torch.cat((vid_embeds, txt_memory), dim=0)
seq_mask = torch.cat((rearrange(vid_pad_mask, 't b h w -> (t b) (h w)'), txt_pad_mask), dim=1)
# vid_pos_embed is: [T*B, H, W, d_model]
vid_pos_embed = self.pos_encoder_2d(rearrange(vid_pad_mask, 't b h w -> (t b) h w'), self.d_model)
# use zeros in place of pos embeds for the text sequence:
pos_embed = torch.cat((rearrange(vid_pos_embed, 't_b h w c -> (h w) t_b c'), torch.zeros_like(txt_memory)), dim=0)
memory = self.encoder(encoder_src_seq, src_key_padding_mask=seq_mask, pos=pos_embed) # [S, T*B, C]
vid_memory = rearrange(memory[:h*w, :, :], '(h w) (t b) c -> t b c h w', h=h, w=w, t=t, b=b)
txt_memory = memory[h*w:, :, :]
txt_memory = rearrange(txt_memory, 's t_b c -> t_b s c')
txt_memory = [t_mem[~pad_mask] for t_mem, pad_mask in zip(txt_memory, txt_pad_mask)] # remove padding
# add T*B dims to query embeds (was: [N, C], where N is the number of object queries):
obj_queries = repeat(obj_queries, 'n c -> n (t b) c', t=t, b=b)
tgt = torch.zeros_like(obj_queries) # [N, T*B, C]
# hs is [L, N, T*B, C] where L is number of layers in the decoder
hs = self.decoder(tgt, memory, memory_key_padding_mask=seq_mask, pos=pos_embed, query_pos=obj_queries)
hs = rearrange(hs, 'l n (t b) c -> l t b n c', t=t, b=b)
return hs, vid_memory, txt_memory
def forward_text(self, text_queries, device):
tokenized_queries = self.tokenizer.batch_encode_plus(text_queries, padding='longest', return_tensors='pt')
tokenized_queries = tokenized_queries.to(device)
with torch.inference_mode(mode=self.freeze_text_encoder):
encoded_text = self.text_encoder(**tokenized_queries)
# Transpose memory because pytorch's attention expects sequence first
txt_memory = rearrange(encoded_text.last_hidden_state, 'b s c -> s b c')
txt_memory = self.txt_proj(txt_memory) # change text embeddings dim to model dim
# Invert attention mask that we get from huggingface because its the opposite in pytorch transformer
txt_pad_mask = tokenized_queries.attention_mask.ne(1).bool() # [B, S]
return txt_memory, txt_pad_mask
def num_parameters(self):
return sum(p.numel() for p in self.parameters() if p.requires_grad)
class TransformerEncoder(nn.Module):
def __init__(self, encoder_layer, num_layers, norm=None):
super().__init__()
self.layers = _get_clones(encoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
def forward(self, src,
mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
output = src
for layer in self.layers:
output = layer(output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos)
if self.norm is not None:
output = self.norm(output)
return output
class TransformerDecoder(nn.Module):
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
super().__init__()
self.layers = _get_clones(decoder_layer, num_layers)
self.num_layers = num_layers
self.norm = norm
self.return_intermediate = return_intermediate
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
output = tgt
intermediate = []
for layer in self.layers:
output = layer(output, memory, tgt_mask=tgt_mask,
memory_mask=memory_mask,
tgt_key_padding_mask=tgt_key_padding_mask,
memory_key_padding_mask=memory_key_padding_mask,
pos=pos, query_pos=query_pos)
if self.return_intermediate:
intermediate.append(self.norm(output))
if self.norm is not None:
output = self.norm(output)
if self.return_intermediate:
intermediate.pop()
intermediate.append(output)
if self.return_intermediate:
return torch.stack(intermediate)
return output.unsqueeze(0)
class TransformerEncoderLayer(nn.Module):
def __init__(self, d_model, nheads, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False, **kwargs):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nheads, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self,
src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(src, pos)
src2 = self.self_attn(q, k, value=src, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src = self.norm1(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
src = src + self.dropout2(src2)
src = self.norm2(src)
return src
def forward_pre(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
src2 = self.norm1(src)
q = k = self.with_pos_embed(src2, pos)
src2 = self.self_attn(q, k, value=src2, attn_mask=src_mask,
key_padding_mask=src_key_padding_mask)[0]
src = src + self.dropout1(src2)
src2 = self.norm2(src)
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
src = src + self.dropout2(src2)
return src
def forward(self, src,
src_mask: Optional[Tensor] = None,
src_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
class TransformerDecoderLayer(nn.Module):
def __init__(self, d_model, nheads, dim_feedforward=2048, dropout=0.1,
activation="relu", normalize_before=False, **kwargs):
super().__init__()
self.self_attn = nn.MultiheadAttention(d_model, nheads, dropout=dropout)
self.multihead_attn = nn.MultiheadAttention(d_model, nheads, dropout=dropout)
# Implementation of Feedforward model
self.linear1 = nn.Linear(d_model, dim_feedforward)
self.dropout = nn.Dropout(dropout)
self.linear2 = nn.Linear(dim_feedforward, d_model)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.norm3 = nn.LayerNorm(d_model)
self.dropout1 = nn.Dropout(dropout)
self.dropout2 = nn.Dropout(dropout)
self.dropout3 = nn.Dropout(dropout)
self.activation = _get_activation_fn(activation)
self.normalize_before = normalize_before
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
return tensor if pos is None else tensor + pos
def forward_post(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
q = k = self.with_pos_embed(tgt, query_pos)
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt = self.norm1(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt = self.norm2(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
tgt = tgt + self.dropout3(tgt2)
tgt = self.norm3(tgt)
return tgt
def forward_pre(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
tgt2 = self.norm1(tgt)
q = k = self.with_pos_embed(tgt2, query_pos)
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
key_padding_mask=tgt_key_padding_mask)[0]
tgt = tgt + self.dropout1(tgt2)
tgt2 = self.norm2(tgt)
tgt2 = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
key=self.with_pos_embed(memory, pos),
value=memory, attn_mask=memory_mask,
key_padding_mask=memory_key_padding_mask)[0]
tgt = tgt + self.dropout2(tgt2)
tgt2 = self.norm3(tgt)
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
tgt = tgt + self.dropout3(tgt2)
return tgt
def forward(self, tgt, memory,
tgt_mask: Optional[Tensor] = None,
memory_mask: Optional[Tensor] = None,
tgt_key_padding_mask: Optional[Tensor] = None,
memory_key_padding_mask: Optional[Tensor] = None,
pos: Optional[Tensor] = None,
query_pos: Optional[Tensor] = None):
if self.normalize_before:
return self.forward_pre(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
return self.forward_post(tgt, memory, tgt_mask, memory_mask,
tgt_key_padding_mask, memory_key_padding_mask, pos, query_pos)
def _get_clones(module, N):
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
class FeatureResizer(nn.Module):
"""
This class takes as input a set of embeddings of dimension C1 and outputs a set of
embedding of dimension C2, after a linear transformation, dropout and normalization (LN).
"""
def __init__(self, input_feat_size, output_feat_size, dropout, do_ln=True):
super().__init__()
self.do_ln = do_ln
# Object feature encoding
self.fc = nn.Linear(input_feat_size, output_feat_size, bias=True)
self.layer_norm = nn.LayerNorm(output_feat_size, eps=1e-12)
self.dropout = nn.Dropout(dropout)
def forward(self, encoder_features):
x = self.fc(encoder_features)
if self.do_ln:
x = self.layer_norm(x)
output = self.dropout(x)
return output
def _get_activation_fn(activation):
"""Return an activation function given a string"""
if activation == "relu":
return F.relu
if activation == "gelu":
return F.gelu
if activation == "glu":
return F.glu
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
| 44.259475 | 122 | 0.625585 |
11a22b195401a97025bc1265b213cb97ff210032 | 403 | py | Python | docker_sdk_api/shared/helpers/get_model_zip.py | BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI | 902f35a7e367e635898f687b16a830db892fbaa5 | [
"Apache-2.0"
] | 20 | 2021-07-13T13:08:57.000Z | 2022-03-29T09:38:00.000Z | docker_sdk_api/shared/helpers/get_model_zip.py | BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI | 902f35a7e367e635898f687b16a830db892fbaa5 | [
"Apache-2.0"
] | null | null | null | docker_sdk_api/shared/helpers/get_model_zip.py | BMW-InnovationLab/BMW-Semantic-Segmentation-Training-GUI | 902f35a7e367e635898f687b16a830db892fbaa5 | [
"Apache-2.0"
] | 2 | 2021-07-12T08:42:53.000Z | 2022-03-04T18:41:25.000Z | import os
from typing import Dict
def get_downloadable_zip(folder_path: str) -> Dict[str, str]:
servable_models: Dict[str, str] = {}
for root, dirs, files in os.walk(folder_path):
for directory in dirs:
for f in os.listdir(os.path.join(root, directory)):
if f.endswith(".zip"):
servable_models[f] = directory
return servable_models
| 28.785714 | 63 | 0.622829 |
11a302e0300bce122a82770aa16b84ca6e8d73b5 | 6,065 | py | Python | groups/views.py | 3crabs/class-book | f5de12be816aa9be889d8413007be8eb4abdf45f | [
"WTFPL"
] | 1 | 2020-11-19T14:49:41.000Z | 2020-11-19T14:49:41.000Z | groups/views.py | 3crabs/class-book | f5de12be816aa9be889d8413007be8eb4abdf45f | [
"WTFPL"
] | null | null | null | groups/views.py | 3crabs/class-book | f5de12be816aa9be889d8413007be8eb4abdf45f | [
"WTFPL"
] | null | null | null | from django.core.mail import EmailMessage
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from accounting.models import Attendance, Result
from accounting.templatetags import my_tags
from class_book import settings
from groups.models import Group, Student
from subjects.models import Subject
import xlwt
def groups(request):
if request.POST:
item = Group(name=request.POST['name'])
item.save()
object_list = Group.objects.all().order_by("name")
return render(request, 'groups/index.html', locals())
def group(request, pk):
group = Group.objects.get(id=pk)
if 'delete' in request.POST:
group.delete()
object_list = Group.objects.all().order_by("name")
return render(request, 'groups/index.html', locals())
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_students(request, pk):
if request.POST:
item = Student(
name=request.POST['name'],
email=request.POST['email'],
group_id=pk,
)
item.save()
group = Group.objects.get(id=pk)
subjects = group.subjects.all()
for subject in subjects:
for lesson in subject.lesson_set.all():
attendance = Attendance()
attendance.student = item
attendance.lesson = lesson
attendance.save()
for task in subject.task_set.all():
result = Result()
result.student = item
result.task = task
result.save()
group = Group.objects.get(id=pk)
return render(request, 'groups/info.html', locals())
def group_student(request, pk, id):
student = Student.objects.get(id=id)
if 'delete' in request.POST:
student.delete()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_subjects(request, pk):
if request.POST:
group = Group.objects.get(id=pk)
subject = Subject.objects.get(id=request.POST['subject'])
group.subjects.add(subject)
group.save()
group = Group.objects.get(id=pk)
for student in group.student_set.all():
for lesson in subject.lesson_set.all():
attendance = Attendance()
attendance.student = student
attendance.lesson = lesson
attendance.save()
for task in subject.task_set.all():
result = Result()
result.student = student
result.task = task
result.save()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
def group_subject(request, pk, id):
subject = Subject.objects.get(id=id)
if 'delete' in request.POST:
group = Group.objects.get(id=pk)
group.subjects.remove(subject)
group.save()
group = Group.objects.get(id=pk)
subject_list = Subject.objects.all().order_by("name")
return render(request, 'groups/info.html', locals())
else:
group = Group.objects.get(id=pk)
itogs = {}
for student in group.student_set.all():
itogs[student.id] = student.id + 1
print(itogs)
return render(request, 'accouting/index.html', locals())
def create_xls_(group, subject):
book = xlwt.Workbook(encoding="utf-8")
sheet = book.add_sheet(group.name)
sheet.write(0, 0, "Успеваемость группы " + group.name + " по предмету " + subject.name)
row = 1
col = 0
sheet.write(row, col, "Посещаемость")
row += 1
sheet.write(row, col, "Студент")
col += 1
for lesson in subject.lesson_set.all():
sheet.write(row, col, lesson.name)
col += 1
sheet.write(row, col, "Посещаемость")
row += 1
col = 0
for student in group.student_set.all():
sheet.write(row, col, student.name)
col += 1
for attendance in student.attendance_set.filter(lesson__subject_id=subject.id):
sheet.write(row, col, attendance.visit)
col += 1
sheet.write(row, col, my_tags.lessons(student, subject))
row += 1
col = 0
sheet.write(row, col, "Результаты")
row += 1
sheet.write(row, col, "Студент")
col += 1
for task in subject.task_set.all():
sheet.write(row, col, task.name)
col += 1
sheet.write(row, col, "Успеваемость")
row += 1
col = 0
for student in group.student_set.all():
sheet.write(row, col, student.name)
col += 1
for result in student.result_set.filter(task__subject_id=subject.id):
sheet.write(row, col, result.rating)
col += 1
sheet.write(row, col, my_tags.tasks(student, subject))
row += 1
col = 0
path = "groups/static/docs/spreadsheet-" + str(group.id) + "-" + str(subject.id) + ".xlsx"
book.save(path)
return path
def create_xls(request, pk, id):
group = Group.objects.get(id=pk)
subject = group.subjects.get(id=id)
path = create_xls_(group, subject)
file = open(path, 'rb')
response = HttpResponse(file, content_type='application/vnd.openxmlformats-officedocument.spreadsheetml.sheet')
response['Content-Disposition'] = 'attachment; filename=table.xlsx'
return response
def sending(request, pk, id):
group = Group.objects.get(id=pk)
students = group.student_set.all()
emails = [student.email for student in students]
email = EmailMessage(
'Результаты',
'Здравствуй, вот ваша успеваемость',
settings.EMAIL_HOST_USER,
emails
)
path = create_xls_(group, Subject.objects.get(id=id))
email.attach_file(path)
email.send()
return HttpResponseRedirect(request.META.get('HTTP_REFERER'))
| 32.433155 | 115 | 0.616983 |
11a618f11b6fac52f4355fdfd119636e79cd945a | 1,415 | py | Python | pset_pandas_ext/101problems/solutions/p65.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 5 | 2019-04-08T20:05:37.000Z | 2019-12-04T20:48:45.000Z | pset_pandas_ext/101problems/solutions/p65.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 8 | 2019-04-15T15:16:05.000Z | 2022-02-12T10:33:32.000Z | pset_pandas_ext/101problems/solutions/p65.py | mottaquikarim/pydev-psets | 9749e0d216ee0a5c586d0d3013ef481cc21dee27 | [
"MIT"
] | 2 | 2019-04-10T00:14:42.000Z | 2020-02-26T20:35:21.000Z | """
66. How to replace both the diagonals of dataframe with 0?
"""
"""
Difficulty Level: L2
"""
"""
Replace both values in both diagonals of df with 0.
"""
"""
Input
"""
"""
df = pd.DataFrame(np.random.randint(1,100, 100).reshape(10, -1))
df
# 0 1 2 3 4 5 6 7 8 9
# 0 11 46 26 44 11 62 18 70 68 26
# 1 87 71 52 50 81 43 83 39 3 59
# 2 47 76 93 77 73 2 2 16 14 26
# 3 64 18 74 22 16 37 60 8 66 39
# 4 10 18 39 98 25 8 32 6 3 29
# 5 29 91 27 86 23 84 28 31 97 10
# 6 37 71 70 65 4 72 82 89 12 97
# 7 65 22 97 75 17 10 43 78 12 77
# 8 47 57 96 55 17 83 61 85 26 86
# 9 76 80 28 45 77 12 67 80 7 63
"""
"""
Desired output
"""
"""
# 0 1 2 3 4 5 6 7 8 9
# 0 0 46 26 44 11 62 18 70 68 0
# 1 87 0 52 50 81 43 83 39 0 59
# 2 47 76 0 77 73 2 2 0 14 26
# 3 64 18 74 0 16 37 0 8 66 39
# 4 10 18 39 98 0 0 32 6 3 29
# 5 29 91 27 86 0 0 28 31 97 10
# 6 37 71 70 0 4 72 0 89 12 97
# 7 65 22 0 75 17 10 43 0 12 77
# 8 47 0 96 55 17 83 61 85 0 86
# 9 0 80 28 45 77 12 67 80 7 0
"""
# Input
df = pd.DataFrame(np.random.randint(1,100, 100).reshape(10, -1))
# Solution
for i in range(df.shape[0]):
df.iat[i, i] = 0
df.iat[df.shape[0]-i-1, i] = 0 | 27.745098 | 64 | 0.477032 |
11a8090bef6d5fb982bc2e421b4aadbc73c27dfc | 3,861 | py | Python | src/tree/tree_builder.py | rpSebastian/LeducPoker | 5bbdf61d885bcb23490410ef871de924c58bbf01 | [
"MIT"
] | 1 | 2020-05-22T15:45:22.000Z | 2020-05-22T15:45:22.000Z | src/tree/tree_builder.py | rpSebastian/LeducPoker | 5bbdf61d885bcb23490410ef871de924c58bbf01 | [
"MIT"
] | null | null | null | src/tree/tree_builder.py | rpSebastian/LeducPoker | 5bbdf61d885bcb23490410ef871de924c58bbf01 | [
"MIT"
] | 1 | 2020-05-31T03:01:42.000Z | 2020-05-31T03:01:42.000Z | from settings import constants
from game import bet_sizing, card_tools, card_to_string
from base import Node
import torch
class PokerTreeBuilder():
def __init__(self):
pass
def build_tree(self, params):
root = Node()
root.street = params.root_node.street
root.bets = params.root_node.bets.clone()
root.current_player = params.root_node.current_player
root.board = params.root_node.board.clone()
root.board_string = params.root_node.board_string
self.build_tree_dfs(root)
return root
def build_tree_dfs(self, current_node):
current_node.pot = torch.min(current_node.bets).item()
children = self.get_children_nodes(current_node)
current_node.children = children
for child in children:
self.build_tree_dfs(child)
def get_children_nodes(self, parent_node):
if parent_node.terminal:
return []
chance_node = parent_node.current_player == constants.players.chance
if chance_node:
return self.get_children_chance_nodes(parent_node)
else:
return self.get_children_player_nodes(parent_node)
def get_children_chance_nodes(self, parent_node):
children = []
next_boards = card_tools.get_second_round_boards()
for board in next_boards:
chance_node = Node(parent_node)
chance_node.current_player = constants.players.P1
chance_node.street = parent_node.street + 1
chance_node.board = board
chance_node.board_string = card_to_string.cards_to_string(board)
chance_node.num_bets = 0
chance_node.action = chance_node.board_string
children.append(chance_node)
return children
def get_children_player_nodes(self, parent_node):
children = []
# fold action
fold_node = Node(parent_node)
fold_node.terminal = True
fold_node.action = "fold"
fold_node.node_type = constants.node_types.terminal_fold
children.append(fold_node)
# P1 start check action
if (parent_node.current_player == constants.players.P1 and
parent_node.bets[0] == parent_node.bets[1]):
check_node = Node(parent_node)
check_node.action = "check"
children.append(check_node)
# raise -> ( P1 / P2 call ) -> chance
# P1 check -> (P2 check ) -> chance
elif parent_node.street == 0 and (
parent_node.bets[0] != parent_node.bets[1] or
parent_node.bets[0] == parent_node.bets[1] and
parent_node.current_player == constants.players.P2
):
chance_node = Node(parent_node)
chance_node.current_player = constants.players.chance
chance_node.bets[:] = chance_node.bets.max()
chance_node.action = "call" if parent_node.bets[0] != parent_node.bets[1] else "check"
children.append(chance_node)
# call -> terminal
else:
terminal_call_node = Node(parent_node)
terminal_call_node.current_player = 1 - constants.players.P2
terminal_call_node.terminal = True
terminal_call_node.node_type = constants.node_types.terminal_call
terminal_call_node.bets[:] = terminal_call_node.bets.max()
terminal_call_node.action = "call"
children.append(terminal_call_node)
# raise action
possible_bets = bet_sizing.get_possible_bets(parent_node)
for possible_bet in possible_bets:
raise_node = Node(parent_node)
raise_node.bets = possible_bet
raise_node.num_bets += 1
raise_node.action = "raise"
children.append(raise_node)
return children
tree_builder = PokerTreeBuilder()
| 38.227723 | 98 | 0.644911 |
11ab85dad8fb08a5c5eee01b9be2f4e803d8712c | 50,062 | py | Python | src/htsql/core/tr/bind.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | null | null | null | src/htsql/core/tr/bind.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | null | null | null | src/htsql/core/tr/bind.py | sirex/htsql | 52275f6a584b412c109822d2ed2a5e69ac522cdf | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2006-2013, Prometheus Research, LLC
#
"""
:mod:`htsql.core.tr.bind`
=========================
This module implements the binding process.
"""
from ..util import maybe, listof, tupleof, similar
from ..adapter import Adapter, Protocol, adapt, adapt_many
from ..domain import (Domain, BooleanDomain, IntegerDomain, DecimalDomain,
FloatDomain, UntypedDomain, EntityDomain, RecordDomain, ListDomain,
IdentityDomain, VoidDomain)
from ..classify import normalize
from ..error import Error, translate_guard, choices_guard, point
from ..syn.syntax import (Syntax, CollectSyntax, SelectSyntax, ApplySyntax,
FunctionSyntax, PipeSyntax, OperatorSyntax, PrefixSyntax,
ProjectSyntax, FilterSyntax, LinkSyntax, DetachSyntax, AttachSyntax,
AssignSyntax, ComposeSyntax, LocateSyntax, IdentitySyntax, GroupSyntax,
IdentifierSyntax, UnpackSyntax, ReferenceSyntax, LiftSyntax,
StringSyntax, LabelSyntax, NumberSyntax, RecordSyntax, DirectSyntax)
from .binding import (Binding, WrappingBinding, CollectBinding, RootBinding,
HomeBinding, TableBinding, ChainBinding, ColumnBinding,
QuotientBinding, KernelBinding, ComplementBinding, LocateBinding,
SieveBinding, AttachBinding, SortBinding, CastBinding, IdentityBinding,
ImplicitCastBinding, RescopingBinding, AssignmentBinding,
DefineBinding, DefineReferenceBinding, DefineCollectionBinding,
DefineLiftBinding, SelectionBinding, WildSelectionBinding,
DirectionBinding, TitleBinding, RerouteBinding,
ReferenceRerouteBinding, AliasBinding, LiteralBinding, FormulaBinding,
VoidBinding, Recipe, LiteralRecipe, SelectionRecipe, FreeTableRecipe,
AttachedTableRecipe, ColumnRecipe, KernelRecipe, ComplementRecipe,
IdentityRecipe, ChainRecipe, SubstitutionRecipe, BindingRecipe,
ClosedRecipe, PinnedRecipe, AmbiguousRecipe)
from .lookup import (lookup_attribute, lookup_reference, lookup_complement,
lookup_attribute_set, lookup_reference_set, expand, direct, guess_tag,
identify, unwrap)
from .signature import IsEqualSig, AndSig
from .coerce import coerce
from .decorate import decorate
class BindingState(object):
def __init__(self, root, environment=None):
assert isinstance(root, RootBinding)
# The root lookup scope.
self.root = root
# The current lookup scope.
self.scope = root
# The stack of previous lookup scopes.
self.scope_stack = []
# References in the root scope.
self.environment = environment
if self.environment is not None:
collection = {}
for name, recipe in self.environment:
name = normalize(name)
collection[name] = recipe
if collection:
self.scope = DefineCollectionBinding(
self.scope, collection, True, self.scope.syntax)
def push_scope(self, scope):
"""
Sets the new lookup scope.
This function stores the current scope in the stack and makes
the given binding the new lookup scope. Use the :attr:`scope`
attribute to get the current scope; :meth:`pop_scope` to restore
the previous scope.
`scope` (:class:`htsql.core.tr.binding.Binding`)
The new lookup scope.
"""
# Sanity check on the argument.
assert isinstance(scope, Binding)
# Ensure that the root scope was set.
assert self.root is not None
# Save the current lookup scope.
self.scope_stack.append(self.scope)
# Assign the new lookup scope.
self.scope = scope
def pop_scope(self):
"""
Restores the previous lookup scope.
This functions restores the previous lookup scope from the stack.
Use the :attr:`scope` attribute to get the current scope;
:meth:`push_scope` to change the current scope.
"""
# Restore the prevous lookup scope from the stack.
self.scope = self.scope_stack.pop()
def bind(self, syntax, scope=None):
"""
Binds the given syntax node using the current binding state.
Returns a binding node.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup scope is set to `scope` when
binding the syntax node.
"""
with translate_guard(syntax):
if scope is not None:
self.push_scope(scope)
binding = Bind.__prepare__(syntax, self)()
if scope is not None:
self.pop_scope()
return binding
def use(self, recipe, syntax, scope=None):
"""
Applies a recipe to produce a binding node.
Returns a binding node.
`recipe` (:class:`htsql.core.tr.binding.Recipe`)
The recipe to apply.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node associated with the recipe.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup scope is set to `scope` when
binding the syntax node.
"""
# If passed, set the new lookup scope.
if scope is not None:
self.push_scope(scope)
# Realize and apply `BindByRecipe` adapter.
with translate_guard(syntax):
binding = BindByRecipe.__invoke__(recipe, syntax, self)
# Restore the old lookup scope.
if scope is not None:
self.pop_scope()
# Return the generated binding node.
return binding
def call(self, syntax, scope=None):
"""
Binds a global function or a global identifier.
Returns a binding node.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`scope` (:class:`htsql.core.tr.binding.Binding` or ``None``)
If set, the lookup context is set to `scope` when
binding the syntax node.
"""
# If passed, set the new lookup scope.
if scope is not None:
self.push_scope(scope)
# Realize and apply `BindByName` protocol.
with translate_guard(syntax):
binding = BindByName.__invoke__(syntax, self)
# Restore the old lookup scope.
if scope is not None:
self.pop_scope()
# Return the generated binding node.
return binding
class Bind(Adapter):
"""
Translates a syntax node to a binding node.
This is an interface adapter; see subclasses for implementations.
The binding process resolves identifiers against database objects,
resolves and validates operators and function calls, and determine
types of all expression.
The :class:`Bind` adapter has the following signature::
Bind: (Syntax, BindingState) -> Binding
The adapter is polymorphic on the `Syntax` argument.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node to bind.
`state` (:class:`BindingState`)
The current state of the binding process.
"""
adapt(Syntax)
def __init__(self, syntax, state):
assert isinstance(syntax, Syntax)
assert isinstance(state, BindingState)
self.syntax = syntax
self.state = state
def __call__(self):
# The default implementation raises an error. It is actually
# unreachable since we provide an implementation for all syntax nodes.
raise Error("Unable to bind a node")
def hint_choices(choices):
# Generate a hint from a list of choices.
assert isinstance(choices, listof(unicode))
if not choices:
return None
chunks = ["did you mean:"]
if len(choices) == 1:
chunks.append("'%s'" % choices[0].encode('utf-8'))
else:
chunks.append(", ".join("'%s'" % choice.encode('utf-8')
for choice in choices[:-1]))
chunks.append("or")
chunks.append("'%s'" % choices[-1].encode('utf-8'))
return " ".join(chunks)
class BindCollect(Bind):
adapt(CollectSyntax)
def __call__(self):
## FIXME: an empty segment syntax should not be generated.
#if self.syntax.arm is None:
# raise Error("output columns are not specified",
# self.syntax.mark)
# Bind the segment expression.
if self.syntax.arm is not None:
seed = self.state.bind(self.syntax.arm)
if isinstance(seed, AssignmentBinding):
with translate_guard(seed):
if len(seed.terms) != 1:
raise Error("Qualified definition is not allowed"
" for an in-segment assignment")
if seed.parameters is not None:
raise Error("Parameterized definition is not allowed"
" for an in-segment assignment")
name, is_reference = seed.terms[0]
if is_reference:
recipe = BindingRecipe(self.state.bind(seed.body))
else:
recipe = SubstitutionRecipe(self.state.scope, [],
None, seed.body)
recipe = ClosedRecipe(recipe)
syntax = seed.syntax
if isinstance(syntax, AssignSyntax):
syntax = syntax.larm
seed = self.state.use(recipe, syntax)
else:
seed = self.state.scope
seed = Select.__invoke__(seed, self.state)
domain = ListDomain(seed.domain)
return CollectBinding(self.state.scope, seed, domain,
self.syntax)
class Select(Adapter):
adapt(Domain)
@classmethod
def __dispatch__(interface, binding, *args, **kwds):
assert isinstance(binding, Binding)
return (type(binding.domain),)
def __init__(self, binding, state):
self.binding = binding
self.state = state
def __call__(self):
domain = coerce(self.binding.domain)
if domain is None:
# FIXME: separate implementation for VoidDomain with a better error
# message.
raise Error("Output column must be scalar")
return ImplicitCastBinding(self.binding, domain, self.binding.syntax)
class SelectRecord(Select):
adapt_many(EntityDomain,
RecordDomain)
def __call__(self):
recipes = expand(self.binding, with_syntax=True, with_wild=True,
with_class=True)
if recipes is None:
return super(SelectRecord, self).__call__()
elements = []
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax, scope=self.binding)
element = Select.__invoke__(element, self.state)
elements.append(element)
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
binding = SelectionBinding(self.binding, elements, domain,
self.binding.syntax)
return binding
class SelectList(Select):
adapt(ListDomain)
def __call__(self):
return self.binding
class SelectIdentity(Select):
adapt(IdentityDomain)
def __call__(self):
return self.binding
class SelectUntyped(Select):
adapt(UntypedDomain)
def __call__(self):
return self.binding
class BindSelect(Bind):
adapt(SelectSyntax)
def __call__(self):
scope = self.state.bind(self.syntax.larm)
return self.state.bind(self.syntax.rarm, scope=scope)
class BindRecord(Bind):
adapt(RecordSyntax)
def __call__(self):
# Extract selector elements.
elements = []
scope = self.state.scope
self.state.push_scope(scope)
for arm in self.syntax.arms:
binding = self.state.bind(arm)
# Handle in-selector assignments.
if isinstance(binding, AssignmentBinding):
with translate_guard(binding):
if len(binding.terms) != 1:
raise Error("Qualified definition is not allowed"
" for an in-selector assignment")
if binding.parameters is not None:
raise Error("Parameterized definition is not allowed"
" for an in-selector assignment")
name, is_reference = binding.terms[0]
if is_reference:
recipe = BindingRecipe(self.state.bind(binding.body))
else:
recipe = SubstitutionRecipe(scope, [],
None, binding.body)
recipe = ClosedRecipe(recipe)
syntax = binding.syntax
if isinstance(syntax, AssignSyntax):
syntax = syntax.larm.larms[0]
binding = self.state.use(recipe, syntax)
if is_reference:
scope = DefineReferenceBinding(scope, name,
recipe, scope.syntax)
else:
scope = DefineBinding(scope, name, None,
recipe, scope.syntax)
self.state.pop_scope()
self.state.push_scope(scope)
# Extract nested selectors, if any.
bindings = []
recipes = expand(binding, with_wild=True)
if recipes is not None:
seed = binding
for syntax, recipe in recipes:
binding = self.state.use(recipe, syntax)
binding = RescopingBinding(binding, seed, binding.syntax)
bindings.append(binding)
else:
bindings.append(binding)
# Handle in-selector direction decorators.
order = []
for binding in bindings:
direction = direct(binding)
if direction is not None:
order.append(binding)
if order:
scope = SortBinding(scope, order, None, None, scope.syntax)
self.state.pop_scope()
self.state.push_scope(scope)
elements.extend(bindings)
self.state.pop_scope()
# Generate a selection scope.
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
return SelectionBinding(scope, elements, domain, self.syntax)
class BindApply(Bind):
adapt(ApplySyntax)
def __call__(self):
# Look for the parameterized attribute in the current local scope.
recipe = lookup_attribute(self.state.scope,
self.syntax.name, len(self.syntax.arguments))
if recipe is not None:
binding = self.state.use(recipe, self.syntax)
# If not found, look for a global function.
else:
binding = self.state.call(self.syntax)
return binding
class BindOperator(Bind):
adapt(OperatorSyntax)
def __call__(self):
# Look for the operator in the global scope. We skip the local scope
# as there is no way to add an operator to a local scope.
return self.state.call(self.syntax)
class BindProject(Bind):
adapt(ProjectSyntax)
def __call__(self):
# Get the seed of the quotient.
seed = self.state.bind(self.syntax.larm)
# get the kernel expressions.
elements = []
binding = self.state.bind(self.syntax.rarm, scope=seed)
recipes = expand(binding, with_syntax=True)
if recipes is not None:
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax, scope=binding)
element = RescopingBinding(element, binding, element.syntax)
elements.append(element)
else:
elements.append(binding)
# Validate types of the kernel expressions.
kernels = []
for element in elements:
domain = coerce(element.domain)
with translate_guard(element):
if domain is None:
raise Error("Expected a scalar column")
kernel = ImplicitCastBinding(element, domain, element.syntax)
kernels.append(kernel)
# Generate the quotient scope.
quotient = QuotientBinding(self.state.scope, seed, kernels,
self.syntax)
# Assign names to the kernel and the complement links when possible.
binding = quotient
name = guess_tag(seed)
if name is not None:
recipe = ComplementRecipe(quotient)
recipe = ClosedRecipe(recipe)
binding = DefineBinding(binding, name, None, recipe, self.syntax)
for index, kernel in enumerate(kernels):
name = guess_tag(kernel)
if name is not None:
recipe = KernelRecipe(quotient, index)
recipe = ClosedRecipe(recipe)
binding = DefineBinding(binding, name, None, recipe,
self.syntax)
return binding
class BindFilter(Bind):
adapt(FilterSyntax)
def __call__(self):
# Get the sieve base.
base = self.state.bind(self.syntax.larm)
# Bind the filter and force the Boolean type on it.
filter = self.state.bind(self.syntax.rarm, scope=base)
filter = ImplicitCastBinding(filter, coerce(BooleanDomain()),
filter.syntax)
# Produce a sieve scope.
return SieveBinding(base, filter, self.syntax)
class BindLink(Bind):
adapt(LinkSyntax)
def __call__(self):
# Bind the origin images.
origin_images = []
binding = self.state.bind(self.syntax.larm)
recipes = expand(binding, with_syntax=True)
if recipes is not None:
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax)
element = RescopingBinding(element, binding, element.syntax)
origin_images.append(element)
else:
origin_images.append(binding)
# Bind the target scope.
home = HomeBinding(self.state.scope, self.syntax)
seed = self.state.bind(self.syntax.rarm, scope=home)
# Bind the target images; if not provided, reuse the syntax node
# of the origin images.
binding = seed
target_images = []
recipes = expand(seed, with_syntax=True)
if recipes is None:
binding = self.state.bind(self.syntax.larm, scope=seed)
recipes = expand(binding, with_syntax=True)
if recipes is not None:
for syntax, recipe in recipes:
element = self.state.use(recipe, syntax, scope=seed)
element = RescopingBinding(element, binding, element.syntax)
target_images.append(element)
else:
target_images.append(binding)
# Correlate origin and target images.
if len(origin_images) != len(target_images):
raise Error("Found unbalanced origin and target columns")
images = []
for origin_image, target_image in zip(origin_images, target_images):
domain = coerce(origin_image.domain, target_image.domain)
if domain is None:
raise Error("Cannot coerce origin and target columns"
" to a common type")
origin_image = ImplicitCastBinding(origin_image, domain,
origin_image.syntax)
target_image = ImplicitCastBinding(target_image, domain,
target_image.syntax)
images.append((origin_image, target_image))
# Generate a link scope.
return AttachBinding(self.state.scope, seed, images, None, self.syntax)
class BindAttach(Bind):
adapt(AttachSyntax)
def __call__(self):
home = HomeBinding(self.state.scope, self.syntax)
seed = self.state.bind(self.syntax.rarm, scope=home)
recipe = BindingRecipe(seed)
scope = self.state.scope
scope = DefineLiftBinding(scope, recipe, self.syntax)
name = guess_tag(seed)
if name is not None:
scope = DefineBinding(scope, name, None, recipe, self.syntax)
condition = self.state.bind(self.syntax.larm, scope=scope)
condition = ImplicitCastBinding(condition, coerce(BooleanDomain()),
condition.syntax)
return AttachBinding(self.state.scope, seed, [], condition, self.syntax)
class BindDetach(Bind):
adapt(DetachSyntax)
def __call__(self):
# Make the home scope.
home = HomeBinding(self.state.scope, self.syntax)
# Bind the operand against the home scope.
return self.state.bind(self.syntax.arm, scope=home)
class BindAssign(Bind):
adapt(AssignSyntax)
def __call__(self):
# Parse the left side of the assignment. It takes one of the forms:
# $reference := ...
# identifier := ...
# identifier(parameter,...) := ...
# parent. ... .identifier(parameter,...) := ...
# parent. ... .$identifier(parameter,...) := ...
# The dot-separated names and reference indicators.
terms = []
parameters = None
syntax = self.syntax.larm
for idx, arm in enumerate(syntax.larms):
if isinstance(arm, ReferenceSyntax):
with translate_guard(arm):
if idx < len(syntax.larms)-1:
raise Error("Expected an identifier")
terms.append((arm.identifier.name, True))
else:
terms.append((arm.name, False))
if syntax.rarms is not None:
parameters = []
for arm in syntax.rarms:
if isinstance(arm, ReferenceSyntax):
parameters.append((arm.identifier.name, True))
else:
parameters.append((arm.name, False))
# The right side of the assignment expression.
body = self.syntax.rarm
# Generate an assignment node.
return AssignmentBinding(self.state.scope, terms, parameters, body,
self.syntax)
class BindCompose(Bind):
adapt(ComposeSyntax)
def __call__(self):
# Expression:
# parent . child
# evaluates `child` in the scope of `parent`.
scope = self.state.bind(self.syntax.larm)
binding = self.state.bind(self.syntax.rarm, scope=scope)
return binding
class BindLocate(Bind):
adapt(LocateSyntax)
def __call__(self):
seed = self.state.bind(self.syntax.larm)
recipe = identify(seed)
with translate_guard(seed):
if recipe is None:
raise Error("Cannot determine identity")
identity = self.state.use(recipe, self.syntax.rarm, scope=seed)
location = self.state.bind(self.syntax.rarm, scope=seed)
with translate_guard(self.syntax.rarm):
if identity.domain.width != location.width:
raise Error("Found ill-formed locator")
def convert(identity, elements):
assert isinstance(identity, IdentityBinding)
images = []
for field in identity.elements:
if isinstance(field.domain, IdentityDomain):
total_width = 0
items = []
while total_width < field.domain.width:
assert elements
element = elements.pop(0)
if (total_width == 0 and
isinstance(element, IdentityBinding) and
element.width == field.domain.width):
items = element.elements[:]
total_width = element.width
elif isinstance(element, IdentityBinding):
items.append(element)
total_width += element.width
else:
items.append(element)
total_width += 1
with translate_guard(self.syntax.rarm):
if total_width > field.domain.width:
raise Error("Found ill-formed locator")
images.extend(convert(field, items))
else:
assert elements
element = elements.pop(0)
with translate_guard(self.syntax.larm):
if isinstance(element, IdentityBinding):
raise Error("Found ill-formed locator")
item = ImplicitCastBinding(element, field.domain,
element.syntax)
images.append((item, field))
return images
elements = location.elements[:]
while len(elements) == 1 and isinstance(elements[0], IdentityBinding):
elements = elements[0].elements[:]
images = convert(identity, elements)
return LocateBinding(self.state.scope, seed, images, None, self.syntax)
class BindIdentity(Bind):
adapt(IdentitySyntax)
def __call__(self):
elements = []
for arm in self.syntax.arms:
element = self.state.bind(arm)
identity = unwrap(element, IdentityBinding, is_deep=False)
if identity is not None:
element = identity
elements.append(element)
return IdentityBinding(self.state.scope, elements, self.syntax)
class BindGroup(Bind):
adapt(GroupSyntax)
def __call__(self):
# Bind the expression in parenthesis, then wrap the result
# to attach the original syntax node.
binding = self.state.bind(self.syntax.arm)
return WrappingBinding(binding, self.syntax)
class BindIdentifier(Bind):
adapt(IdentifierSyntax)
def __call__(self):
# Look for the identifier in the current lookup scope.
recipe = lookup_attribute(self.state.scope, self.syntax.name)
if recipe is not None:
binding = self.state.use(recipe, self.syntax)
# If not found, try the global scope.
else:
binding = self.state.call(self.syntax)
return binding
class BindUnpack(Bind):
adapt(UnpackSyntax)
def __call__(self):
# Get all public columns in the current lookup scope.
recipes = expand(self.state.scope, with_syntax=True, with_wild=True,
with_class=True, with_link=True)
if recipes is None:
raise Error("Cannot expand '*' since output columns"
" are not defined")
# If a position is given, extract a specific element.
if self.syntax.index is not None:
index = self.syntax.index
index -= 1
if not (0 <= index < len(recipes)):
raise Error("Expected value in range 1-%s" % len(recipes))
syntax, recipe = recipes[index]
syntax = point(syntax, self.syntax)
return self.state.use(recipe, syntax)
# Otherwise, generate a selection node.
elements = []
for syntax, recipe in recipes:
syntax = point(syntax, self.syntax)
element = self.state.use(recipe, syntax)
elements.append(element)
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
return WildSelectionBinding(self.state.scope, elements, domain,
self.syntax)
class BindDirect(Bind):
adapt(DirectSyntax)
def __call__(self):
base = self.state.bind(self.syntax.arm)
direction = {u'+': +1, u'-': -1}[self.syntax.symbol]
return DirectionBinding(base, direction, self.syntax)
class BindReference(Bind):
adapt(ReferenceSyntax)
def __call__(self):
# Look for a reference, complain if not found.
recipe = lookup_reference(self.state.scope,
self.syntax.identifier.name)
if recipe is None:
model = self.syntax.identifier.name.lower()
names = lookup_reference_set(self.state.scope)
choices = [u"$"+name for name in sorted(names)
if similar(model, name)]
with choices_guard(choices):
raise Error("Found unknown reference", self.syntax)
return self.state.use(recipe, self.syntax)
class BindLift(Bind):
adapt(LiftSyntax)
def __call__(self):
# Look for a complement, complain if not found.
recipe = lookup_complement(self.state.scope)
if recipe is None:
raise Error("'^' could only be used in a quotient scope")
return self.state.use(recipe, self.syntax)
class BindString(Bind):
adapt_many(StringSyntax,
LabelSyntax)
def __call__(self):
# Bind a quoted literal. Note that a quoted literal not necessarily
# represents a string value; its initial domain is untyped.
binding = LiteralBinding(self.state.scope,
self.syntax.text,
UntypedDomain(),
self.syntax)
return binding
class BindNumber(Bind):
adapt(NumberSyntax)
def __call__(self):
# Bind an unquoted (numeric) literal.
# Create an untyped literal binding.
binding = LiteralBinding(self.state.scope,
self.syntax.text,
UntypedDomain(),
self.syntax)
# Cast the binding to an appropriate numeric type.
if self.syntax.is_float:
domain = coerce(FloatDomain())
elif self.syntax.is_decimal:
domain = coerce(DecimalDomain())
elif self.syntax.is_integer:
domain = coerce(IntegerDomain())
binding = ImplicitCastBinding(binding, domain, self.syntax)
return binding
class BindByName(Protocol):
"""
Binds a application node.
This is an abstract protocol interface that provides a mechanism
for name-based dispatch of application syntax nodes.
The :class:`BindByName` interface has the following signature::
BindByName: (ApplicationSyntax, BindingState) -> Binding
BindByName: (IdentifierSyntax, BindingState) -> Binding
The protocol is polymorphic on the name and the number of arguments
of the syntax node.
To add an implementation of the interface, define a subclass
of :class:`BindByName` and specify its name and expected number
of arguments using function :func:`call`.
Class attributes:
`names` (a list of names or pairs `(name, length)`)
List of names the component matches.
Here `name` is a non-empty string, `length` is an integer or
``None``, where ``-1`` indicates any number of arguments, ``None``
means no arguments are accepted.
"""
names = []
@classmethod
def __dominates__(component, other):
# Determine if the component dominates another component
# assuming that they match the same dispatch key.
# A component implementing a protocol interface dominates
# another component if one of the following two conditions
# holds:
# (1) The component is a subclass of the other component.
if issubclass(component, other):
return True
# (2) The component and the other component match the
# same name, but the former requires a fixed number of
# arguments while the latter accepts a node with any
# number of arguments.
for name in component.__names__:
arity = -1
if isinstance(name, tuple):
name, arity = name
name = name.lower()
for other_name in other.__names__:
other_arity = -1
if isinstance(other_name, tuple):
other_name, other_arity = other_name
other_name = other_name.lower()
if name == other_name:
if arity != -1 and other_arity == -1:
return True
return False
@classmethod
def __matches__(component, dispatch_key):
# Check if the component matches the given function name
# and the number of arguments.
assert isinstance(dispatch_key, tupleof(unicode, maybe(int)))
# The name and the number of arguments of the call node.
key_name, key_arity = dispatch_key
# We want to compare names case insensitive. Unfortunately,
# we cannot use `normalize` from `htsql.core.tr.lookup` since it
# mangles symbols.
key_name = key_name.lower()
# Check if any of the component names matches the given name.
for name in component.__names__:
# `name` could be either a string or a pair of a string
# and an integer. The former assumes that the component
# accepts call nodes with any number of arguments.
arity = -1
if isinstance(name, tuple):
name, arity = name
name = name.lower()
# Check if the component name matches the node name.
if name == key_name:
if ((arity == key_arity) or
(arity == -1 and key_arity is not None)):
return True
# None of the names matched the dispatch key.
return False
@classmethod
def __dispatch__(interface, syntax, *args, **kwds):
assert isinstance(syntax, (ApplySyntax, IdentifierSyntax))
# We override `dispatch` since, as opposed to regular protocol
# interfaces, we also want to take into account not only the
# function name, but also the number of arguments.
if isinstance(syntax, ApplySyntax):
name = syntax.name
arity = len(syntax.arguments)
elif isinstance(syntax, IdentifierSyntax):
name = syntax.name
arity = None
return (name, arity)
def __init__(self, syntax, state):
assert isinstance(syntax, (ApplySyntax, IdentifierSyntax))
assert isinstance(state, BindingState)
self.syntax = syntax
self.state = state
# Extract commonly accessed attributes of the call node.
if isinstance(syntax, ApplySyntax):
self.name = syntax.name
self.arguments = syntax.arguments
elif isinstance(syntax, IdentifierSyntax):
self.name = syntax.name
self.arguments = None
def __call__(self):
# The default implementation; override in subclasses.
# Generate a hint with a list of alternative names.
model = self.name.lower()
arity = None
if self.arguments is not None:
arity = len(self.arguments)
attributes = lookup_attribute_set(self.state.scope)
global_attributes = set()
for component_name in BindByName.__catalogue__():
component_arity = -1
if isinstance(component_name, tuple):
component_name, component_arity = component_name
if isinstance(component_name, str):
component_name = component_name.decode('utf-8')
component_name = component_name.lower()
global_attributes.add((component_name, component_arity))
all_attributes = sorted(attributes|global_attributes)
choices = []
if not choices and arity is None:
names = lookup_reference_set(self.state.scope)
if model in names:
choices = ["a reference '$%s'" % model.encode('utf-8')]
if not choices and arity is None:
if any(model == sample
for sample, sample_arity in all_attributes
if sample_arity is not None):
choices = ["a function '%s'" % model.encode('utf-8')]
if not choices and arity is None:
choices = [sample
for sample, sample_arity in all_attributes
if sample_arity is None and sample != model
and similar(model, sample)]
if not choices and arity is not None \
and not isinstance(self.syntax, OperatorSyntax):
arities = [sample_arity
for sample, sample_arity in all_attributes
if sample == model and
sample_arity not in [None, -1, arity]]
if arities:
required_arity = []
arities.sort()
if len(arities) == 1:
required_arity.append(str(arities[0]))
else:
required_arity.append(", ".join(str(sample_arity)
for sample_arity in arities[:-1]))
required_arity.append("or")
required_arity.append(str(arities[-1]))
if required_arity[-1] == "1":
required_arity.append("argument")
else:
required_arity.append("arguments")
required_arity = " ".join(required_arity)
raise Error("Function '%s' requires %s; got %s"
% (self.syntax.identifier,
required_arity, arity))
if not choices and arity is not None:
if any(model == sample
for sample, sample_arity in all_attributes
if sample_arity is None):
choices = ["an attribute '%s'" % model.encode('utf-8')]
if not choices and arity is not None:
choices = [sample
for sample, sample_arity in all_attributes
if sample_arity in [-1, arity] and sample != model
and similar(model, sample)]
scope_name = guess_tag(self.state.scope)
if scope_name is not None:
scope_name = scope_name.encode('utf-8')
with choices_guard(choices):
if isinstance(self.syntax, (FunctionSyntax, PipeSyntax)):
raise Error("Found unknown function",
self.syntax.identifier)
if isinstance(self.syntax, OperatorSyntax):
raise Error("Found unknown operator",
self.syntax.symbol)
if isinstance(self.syntax, PrefixSyntax):
raise Error("Found unknown unary operator",
self.syntax.symbol)
if isinstance(self.syntax, IdentifierSyntax):
raise Error("Found unknown attribute",
"%s.%s" % (scope_name, self.syntax)
if scope_name is not None else str(self.syntax))
class BindByRecipe(Adapter):
"""
Applies a recipe to generate a binding node.
This is an abstract adapter that generates new binding nodes
from binding recipes. The :class:`BindByRecipe` interface
has the following signature::
BindByRecipe: (Recipe, Syntax, BindingState) -> Binding
The adapter is polymorphic by the first argument.
`recipe` (:class:`htsql.core.tr.binding.Recipe`)
A recipe to apply.
`syntax` (:class:`htsql.core.tr.syntax.Syntax`)
The syntax node associated with the recipe.
`state` (:class:`BindingState`)
The current binding state.
"""
adapt(Recipe)
def __init__(self, recipe, syntax, state):
assert isinstance(recipe, Recipe)
assert isinstance(syntax, Syntax)
assert isinstance(state, BindingState)
self.recipe = recipe
self.syntax = syntax
self.state = state
def __call__(self):
# The default implementation should not be reachable.
raise Error("unable to bind a node")
class BindByLiteral(BindByRecipe):
adapt(LiteralRecipe)
def __call__(self):
return LiteralBinding(self.state.scope,
self.recipe.value,
self.recipe.domain,
self.syntax)
class BindBySelection(BindByRecipe):
adapt(SelectionRecipe)
def __call__(self):
elements = []
for recipe in self.recipe.recipes:
element = self.state.use(recipe, self.syntax)
elements.append(element)
fields = [decorate(element) for element in elements]
domain = RecordDomain(fields)
return SelectionBinding(self.state.scope, elements, domain, self.syntax)
class BindByFreeTable(BindByRecipe):
adapt(FreeTableRecipe)
def __call__(self):
# Produce a free table scope.
return TableBinding(self.state.scope,
self.recipe.table,
self.syntax)
class BindByAttachedTable(BindByRecipe):
adapt(AttachedTableRecipe)
def __call__(self):
return ChainBinding(self.state.scope, self.recipe.joins, self.syntax)
class BindByColumn(BindByRecipe):
adapt(ColumnRecipe)
def __call__(self):
# Generate a link associated with the column.
link = None
if self.recipe.link is not None:
link = self.state.use(self.recipe.link, self.syntax)
# Produce a column scope.
return ColumnBinding(self.state.scope, self.recipe.column,
link, self.syntax)
class BindByKernel(BindByRecipe):
adapt(KernelRecipe)
def __call__(self):
# Generate a kernel expression of a quotient scope.
return KernelBinding(self.state.scope, self.recipe.quotient,
self.recipe.index, self.syntax)
class BindByComplement(BindByRecipe):
adapt(ComplementRecipe)
def __call__(self):
# Generate a complement link to a quotient scope.
return ComplementBinding(self.state.scope,
self.recipe.quotient, self.syntax)
class BindByIdentity(BindByRecipe):
adapt(IdentityRecipe)
def __call__(self):
elements = [self.state.use(recipe, self.syntax)
for recipe in self.recipe.elements]
return IdentityBinding(self.state.scope, elements, self.syntax)
class BindBySubstitution(BindByRecipe):
adapt(SubstitutionRecipe)
def __call__(self):
# Bind the given syntax node in place of an identifier
# or a function call.
# Check if the recipe has a qualifier.
if self.recipe.terms:
# Find the same identifier in the base scope.
assert isinstance(self.syntax, IdentifierSyntax)
name, is_reference = self.recipe.terms[0]
arity = None
if (len(self.recipe.terms) == 1 and
self.recipe.parameters is not None):
arity = len(self.recipe.parameters)
recipe = lookup_attribute(self.recipe.base, self.syntax.name)
if recipe is None:
raise Error("Found unknown attribute", self.syntax)
binding = self.state.use(recipe, self.syntax)
# Check if the term is a reference.
if is_reference:
# Must the the last term in the assignment.
assert len(self.recipe.terms) == 1
# Bind the reference against the scope where it is defined.
body = self.state.bind(self.recipe.body, scope=binding)
recipe = BindingRecipe(body)
# Augment the scope with the tail of the recipe.
else:
recipe = SubstitutionRecipe(binding, self.recipe.terms[1:],
self.recipe.parameters,
self.recipe.body)
recipe = ClosedRecipe(recipe)
if is_reference:
binding = DefineReferenceBinding(binding, name,
recipe, self.syntax)
else:
binding = DefineBinding(binding, name, arity,
recipe, self.syntax)
return binding
# Otherwise, bind the syntax node associated with the recipe.
# Bind against the current scope, but route all lookup requests
# to the scope where the recipe was defined.
scope = self.state.scope
scope = RerouteBinding(scope, self.recipe.base, scope.syntax)
# Bind the parameters.
if self.recipe.parameters is not None:
assert isinstance(self.syntax, ApplySyntax)
assert len(self.syntax.arguments) == len(self.recipe.parameters)
for (name, is_reference), syntax in zip(self.recipe.parameters,
self.syntax.arguments):
binding = self.state.bind(syntax)
recipe = BindingRecipe(binding)
recipe = ClosedRecipe(recipe)
if is_reference:
scope = DefineReferenceBinding(scope, name,
recipe, scope.syntax)
else:
scope = DefineBinding(scope, name, None,
recipe, scope.syntax)
# Bind the syntax node associated with the recipe.
binding = self.state.bind(self.recipe.body, scope=scope)
# Hide all referenced defined there.
binding = ReferenceRerouteBinding(binding, self.state.scope,
binding.syntax)
return binding
class BindByBinding(BindByRecipe):
adapt(BindingRecipe)
def __call__(self):
return self.recipe.binding
class BindByClosed(BindByRecipe):
adapt(ClosedRecipe)
def __call__(self):
# Generate a binding from the given recipe.
binding = self.state.use(self.recipe.recipe, self.syntax)
# Force the current syntax node to the binding.
return AliasBinding(binding, self.syntax)
class BindByChain(BindByRecipe):
adapt(ChainRecipe)
def __call__(self):
binding = self.state.scope
for recipe in self.recipe.recipes:
binding = self.state.use(recipe, self.syntax, scope=binding)
return binding
class BindByPinned(BindByRecipe):
adapt(PinnedRecipe)
def __call__(self):
# Bind the given recipe in the specified scope.
binding = self.state.use(self.recipe.recipe, self.syntax,
scope=self.recipe.scope)
return binding
class BindByAmbiguous(BindByRecipe):
adapt(AmbiguousRecipe)
def __call__(self):
syntax = self.syntax
if isinstance(self.syntax, (FunctionSyntax, PipeSyntax)):
syntax = self.syntax.identifier
int = None
choices = []
if self.recipe.alternatives:
choices = [str(alternative)
for alternative in self.recipe.alternatives]
with choices_guard(choices):
raise Error("Found ambiguous name", syntax)
def bind(syntax, environment=None):
recipes = []
if environment is not None:
for name in sorted(environment):
value = environment[name]
if value.data is None:
recipe = LiteralRecipe(value.data, value.domain)
elif isinstance(value.domain, ListDomain):
item_recipes = [LiteralRecipe(item,
value.domain.item_domain)
for item in value.data]
recipe = SelectionRecipe(item_recipes)
elif isinstance(value.domain, RecordDomain):
item_recipes = [LiteralRecipe(item, profile.domain)
for item, profile in
zip(value.data, value.domain.fields)]
recipe = SelectionRecipe(item_recipes)
elif isinstance(value.domain, IdentityDomain):
def convert(domain, data):
items = []
for element, item in zip(domain.labels, data):
if isinstance(element, IdentityDomain):
item = convert(element, item)
else:
item = LiteralRecipe(item, element)
items.append(item)
return IdentityRecipe(items)
recipe = convert(value.domain, value.data)
else:
recipe = LiteralRecipe(value.data, value.domain)
recipes.append((name, recipe))
root = RootBinding(syntax)
state = BindingState(root, recipes)
if isinstance(syntax, AssignSyntax):
specifier = syntax.larm
with translate_guard(specifier):
if specifier.identifier is None:
raise Error("Expected an identifier")
identifier = specifier.larms[0]
binding = state.bind(syntax.rarm)
binding = Select.__invoke__(binding, state)
binding = TitleBinding(binding, identifier, binding.syntax)
else:
binding = state.bind(syntax)
binding = Select.__invoke__(binding, state)
return binding
| 37.028107 | 80 | 0.583716 |
11ad8fe6bba3193be56826f292aa054b4c5199e3 | 2,226 | py | Python | locuszoom_plotting_service/gwas/tests/factories.py | statgen/locuszoom-hosted | ecfcc5f48fefe2869ab277202a661c2575af6abb | [
"MIT"
] | null | null | null | locuszoom_plotting_service/gwas/tests/factories.py | statgen/locuszoom-hosted | ecfcc5f48fefe2869ab277202a661c2575af6abb | [
"MIT"
] | 14 | 2021-01-01T17:16:23.000Z | 2022-02-28T19:37:28.000Z | locuszoom_plotting_service/gwas/tests/factories.py | statgen/locuszoom-hosted | ecfcc5f48fefe2869ab277202a661c2575af6abb | [
"MIT"
] | null | null | null | import os
import random
from django.db.models import signals
from django.utils import timezone
import factory
from factory.django import DjangoModelFactory
from locuszoom_plotting_service.users.tests.factories import UserFactory
from .. import constants as lz_constants
from .. import models as lz_models
def choose_genome_build() -> str:
return random.choice(lz_constants.GENOME_BUILDS)[0]
def choose_consortium() -> str:
return random.choice(['LocusZoom JS', 'LocusZoom Standalone', 'LocusZoom Hosted', 'LocusZoom.org'])
@factory.django.mute_signals(signals.post_save)
class AnalysisFilesetFactory(DjangoModelFactory):
raw_gwas_file = None # Only create temp files if has_data trait is True
ingest_status = 0 # pending (most tests don't run celery tasks, and therefore are "pending" processing)
ingest_complete = None
parser_options = factory.Dict({ # Parser options for standard gwas format
'chrom_col': 1,
'pos_col': 2,
'ref_col': 3,
'alt_col': 4,
'pvalue_col': 5,
'is_neg_log_pvalue': False
})
class Meta:
model = lz_models.AnalysisFileset
class Params:
# Most samples will be fine with a 0B file. Only provide actual data if explicitly requested.
has_data = factory.Trait(
raw_gwas_file=factory.django.FileField(
from_path=os.path.join(os.path.dirname(__file__), 'fixtures/placeholder.txt'))
)
has_completed = factory.Trait( # Marks pipeline complete (without actually running it)
ingest_complete=timezone.now(),
ingest_status=2
)
class AnalysisInfoFactory(DjangoModelFactory):
owner = factory.SubFactory(UserFactory)
label = factory.Faker('sentence', nb_words=2)
study_name = factory.LazyFunction(choose_consortium)
files = factory.SubFactory(AnalysisFilesetFactory)
build = factory.LazyFunction(choose_genome_build)
is_public = False
class Meta:
model = lz_models.AnalysisInfo
class ViewLinkFactory(DjangoModelFactory):
label = factory.Faker('sentence', nb_words=2)
gwas = factory.SubFactory(AnalysisInfoFactory)
class Meta:
model = lz_models.ViewLink
| 29.68 | 108 | 0.709344 |
11aed6db8dec1d89d1561ef9163cbf9b2aff8920 | 761 | py | Python | utils/api.py | alirzaev/vyatsu-schedule-viber-bot | ff44195742b07c541d67be1e8f4ce0e204cba70b | [
"MIT"
] | 1 | 2020-01-31T16:29:15.000Z | 2020-01-31T16:29:15.000Z | utils/api.py | alirzaev/vyatsu-schedule-viber-bot | ff44195742b07c541d67be1e8f4ce0e204cba70b | [
"MIT"
] | null | null | null | utils/api.py | alirzaev/vyatsu-schedule-viber-bot | ff44195742b07c541d67be1e8f4ce0e204cba70b | [
"MIT"
] | null | null | null | import requests
from os import getenv
from typing import List, Dict
_API_URL = getenv('API_URL')
def get_groups() -> List[Dict]:
response = requests.get(f'{_API_URL}/api/v2/groups/by_faculty')
response.raise_for_status()
return response.json()
def get_season() -> str:
response = requests.get(f'{_API_URL}/api/v2/season/current')
response.raise_for_status()
return response.json()['season']
def get_schedule(group_id: str, season: str) -> Dict:
response = requests.get(f'{_API_URL}/api/v2/schedule/{group_id}/{season}')
response.raise_for_status()
return response.json()
def get_calls() -> List:
response = requests.get(f'{_API_URL}/api/v2/calls')
response.raise_for_status()
return response.json()
| 21.742857 | 78 | 0.697766 |
11b20ebad8eab479fb6fed2be3f7940e76f88665 | 22,860 | py | Python | lib/modeling/torchResNet.py | Min-Sheng/CA_FSIS_Cell | c24750d860a9417b30819c05613282cd74dc517f | [
"MIT"
] | null | null | null | lib/modeling/torchResNet.py | Min-Sheng/CA_FSIS_Cell | c24750d860a9417b30819c05613282cd74dc517f | [
"MIT"
] | 1 | 2021-03-01T09:16:15.000Z | 2021-03-01T09:34:49.000Z | lib/modeling/torchResNet.py | Min-Sheng/CA_FSIS_Cell | c24750d860a9417b30819c05613282cd74dc517f | [
"MIT"
] | null | null | null | import torch
import torch.nn as nn
import math
import copy
from collections import OrderedDict
import torch.utils.model_zoo as model_zoo
from core.config import cfg
import utils.net as net_utils
from deform.torch_deform_conv.layers import ConvOffset2D
model_urls = {
'resnet50': 'https://s3.amazonaws.com/pytorch/models/resnet50-19c8e357.pth',
'resnet101': 'https://s3.amazonaws.com/pytorch/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://s3.amazonaws.com/pytorch/models/resnet152-b121ed2d.pth',
}
# ---------------------------------------------------------------------------- #
# Helper functions
# ---------------------------------------------------------------------------- #
def weight_mapping(state_dict):
state_dict_v2 = copy.deepcopy(state_dict)
layer0_mapping = {
'conv1.weight': 'res1.conv1.weight',
'bn1.weight': 'res1.bn1.weight',
'bn1.bias': 'res1.bn1.bias',
'bn1.running_mean': 'res1.bn1.running_mean',
'bn1.running_var': 'res1.bn1.running_var',
'bn1.num_batches_tracked': 'res1.bn1.num_batches_tracked'
}
for key in state_dict:
if key in layer0_mapping.keys():
new_key = layer0_mapping[key]
state_dict_v2[new_key] = state_dict_v2.pop(key)
if key.find('layer') != -1:
layer_id = int(key[key.find('layer') + 5])
new_key = key.replace(f'layer{layer_id}', f'res{layer_id+1}')
state_dict_v2[new_key] = state_dict_v2.pop(key)
return state_dict_v2
# ---------------------------------------------------------------------------- #
# Bits for specific architectures (ResNet50, ResNet101, ...)
# ---------------------------------------------------------------------------- #
def ResNet50_conv4_body(pretrained=True, model_path=None):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 4, 6, 3), 4)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet50'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
def ResNet50_conv5_body(pretrained=True, model_path=None):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 4, 6, 3), 5)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet50'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
def ResNet101_conv4_body(pretrained=True, model_path = None):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 4, 23, 3), 4)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet101'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
def ResNet101_conv5_body(pretrained=True, model_path = None):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 4, 23, 3), 5)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet101'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
def ResNet152_conv5_body(pretrained=True, model_path=None):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS if model_path is None else model_path
model = ResNet_convX_body((3, 8, 36, 3), 5)
if pretrained:
if model_path:
print("Loading pretrained weights from %s" %(model_path))
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
else:
state_dict = model_zoo.load_url(model_urls['resnet152'])
state_dict_v2 = weight_mapping(state_dict)
model.load_state_dict(state_dict_v2, strict=False)
return model
# ---------------------------------------------------------------------------- #
# Generic ResNet components
# ---------------------------------------------------------------------------- #
class ResNet_convX_body(nn.Module):
def __init__(self, block_counts, convX):
super().__init__()
self.block_counts = block_counts
self.convX = convX
self.num_layers = (sum(block_counts) + 3 * (self.convX == 4)) * 3 + 2
self.res1 = globals()[cfg.RESNETS.STEM_FUNC]()
dim_in = 64
dim_bottleneck = cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP #64
self.res2, dim_in = add_stage(dim_in, 256, dim_bottleneck, block_counts[0],
dilation=1, stride_init=1)
if cfg.MODEL.USE_DEFORM:
self.res3, dim_in = add_stage(dim_in, 512, dim_bottleneck * 2, block_counts[1],
dilation=1, stride_init=2, deform=True)
self.res4, res4_dim_out = add_stage(dim_in, 1024, dim_bottleneck * 4, block_counts[2],
dilation=1, stride_init=2, deform=True)
else:
self.res3, dim_in = add_stage(dim_in, 512, dim_bottleneck * 2, block_counts[1],
dilation=1, stride_init=2)
self.res4, res4_dim_out = add_stage(dim_in, 1024, dim_bottleneck * 4, block_counts[2],
dilation=1, stride_init=2)
stride_init = 2 if cfg.RESNETS.RES5_DILATION == 1 else 1
if cfg.MODEL.USE_DEFORM:
self.res5, res5_dim_out = add_stage(res4_dim_out, 2048, dim_bottleneck * 8, block_counts[3],
cfg.RESNETS.RES5_DILATION, stride_init, deform=True)
else:
self.res5, res5_dim_out = add_stage(res4_dim_out, 2048, dim_bottleneck * 8, block_counts[3],
cfg.RESNETS.RES5_DILATION, stride_init)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(res5_dim_out, 1000)
if self.convX == 5:
self.spatial_scale = 1 / 32 * cfg.RESNETS.RES5_DILATION
self.dim_out = res5_dim_out
else:
self.spatial_scale = 1 / 16 # final feature scale wrt. original image scale
self.dim_out = res4_dim_out
# Initial weights
self.apply(self._init_weights)
self._init_modules()
def _init_weights(self, m):
classname = m.__class__.__name__
if classname.find('Conv') != -1:
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif classname.find('BatchNorm') != -1:
m.weight.data.fill_(1)
m.bias.data.zero_()
def _init_modules(self):
assert cfg.RESNETS.FREEZE_AT in [0, 2, 3, 4, 5]
assert cfg.RESNETS.FREEZE_AT <= self.convX
for i in range(1, cfg.RESNETS.FREEZE_AT + 1):
freeze_params(getattr(self, 'res%d' % i))
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters(): p.requires_grad=False
# Freeze all bn layers !!!
self.apply(set_bn_fix)
def train(self, mode=True):
# Override
self.training = mode
for i in range(cfg.RESNETS.FREEZE_AT + 1, self.convX + 1):
getattr(self, 'res%d' % i).train(mode)
def set_bn_eval(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
m.eval()
# Set all bn layers to eval
self.apply(set_bn_eval)
def forward(self, x):
for i in range(self.convX):
x = getattr(self, 'res%d' % (i + 1))(x)
return x
class ResNet_roi_conv5_head(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
dim_bottleneck = cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP
stride_init = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION // 7
self.res5, self.dim_out = add_stage(dim_in, 2048, dim_bottleneck * 8, 3,
dilation=1, stride_init=stride_init)
self.avgpool = nn.AvgPool2d(7)
self._init_modules()
def _init_modules(self):
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS
if model_path is not None:
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
self.load_state_dict(state_dict_v2, strict=False)
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters(): p.requires_grad=False
# Freeze all bn layers !!!
self.apply(set_bn_fix)
def forward(self, x, rpn_ret):
x = self.roi_xform(
x, rpn_ret,
blob_rois='rois',
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION,
spatial_scale=self.spatial_scale,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO
)
res5_feat = self.res5(x)
x = self.avgpool(res5_feat)
if cfg.MODEL.SHARE_RES5 and self.training:
return x, res5_feat
else:
return x
class ResNet_roi_conv5_head_co(nn.Module):
def __init__(self, dim_in, roi_xform_func, spatial_scale):
super().__init__()
self.roi_xform = roi_xform_func
self.spatial_scale = spatial_scale
dim_bottleneck = cfg.RESNETS.NUM_GROUPS * cfg.RESNETS.WIDTH_PER_GROUP
stride_init = cfg.FAST_RCNN.ROI_XFORM_RESOLUTION // 7
self.res5, self.dim_out = add_stage(dim_in, 2048, dim_bottleneck * 8, 3,
dilation=1, stride_init=stride_init)
self.avgpool = nn.AvgPool2d(7)
self._init_modules()
def _init_modules(self):
model_path = cfg.RESNETS.IMAGENET_PRETRAINED_WEIGHTS
if model_path is not None:
state_dict = torch.load(model_path)
state_dict = state_dict['state_dict']
state_dict_v2 = copy.deepcopy(state_dict)
for key in state_dict:
pre, post = key.split('module.')
state_dict_v2[post] = state_dict_v2.pop(key)
state_dict_v2 = weight_mapping(state_dict_v2)
self.load_state_dict(state_dict_v2, strict=False)
# Freeze all bn layers !!!
def set_bn_fix(m):
classname = m.__class__.__name__
if classname.find('BatchNorm') != -1:
for p in m.parameters(): p.requires_grad=False
# Freeze all bn layers !!!
self.apply(set_bn_fix)
def forward(self, x, y, rpn_ret):
x, y = self.roi_xform(
x, rpn_ret,
blob_rois='rois',
method=cfg.FAST_RCNN.ROI_XFORM_METHOD,
resolution=cfg.FAST_RCNN.ROI_XFORM_RESOLUTION,
spatial_scale=self.spatial_scale,
sampling_ratio=cfg.FAST_RCNN.ROI_XFORM_SAMPLING_RATIO,
query_blobs_in=y
)
res5_feat = self.res5(x)
x = self.avgpool(res5_feat)
query_res5_feat = self.res5(y)
y = self.avgpool(query_res5_feat)
if cfg.MODEL.SHARE_RES5 and self.training:
return x, y, res5_feat, query_res5_feat
else:
return x, y
def add_stage(inplanes, outplanes, innerplanes, nblocks, dilation=1, stride_init=2, deform=False):
"""Make a stage consist of `nblocks` residual blocks.
Returns:
- stage module: an nn.Sequentail module of residual blocks
- final output dimension
"""
res_blocks = []
stride = stride_init
for _ in range(nblocks):
res_blocks.append(add_residual_block(
inplanes, outplanes, innerplanes, dilation, stride, deform=deform)
)
inplanes = outplanes
stride = 1
return nn.Sequential(*res_blocks), outplanes
def add_residual_block(inplanes, outplanes, innerplanes, dilation, stride, deform=False):
"""Return a residual block module, including residual connection, """
if stride != 1 or inplanes != outplanes:
shortcut_func = globals()[cfg.RESNETS.SHORTCUT_FUNC]
downsample = shortcut_func(inplanes, outplanes, stride)
else:
downsample = None
trans_func = globals()[cfg.RESNETS.TRANS_FUNC]
res_block = trans_func(
inplanes, outplanes, innerplanes, stride,
dilation=dilation, group=cfg.RESNETS.NUM_GROUPS,
downsample=downsample, deform=deform)
return res_block
# ------------------------------------------------------------------------------
# various downsample shortcuts (may expand and may consider a new helper)
# ------------------------------------------------------------------------------
def basic_bn_shortcut(inplanes, outplanes, stride):
return nn.Sequential(
nn.Conv2d(inplanes,
outplanes,
kernel_size=1,
stride=stride,
bias=False),
nn.BatchNorm2d(outplanes),
)
def basic_gn_shortcut(inplanes, outplanes, stride):
return nn.Sequential(
nn.Conv2d(inplanes,
outplanes,
kernel_size=1,
stride=stride,
bias=False),
nn.GroupNorm(net_utils.get_group_gn(outplanes), outplanes,
eps=cfg.GROUP_NORM.EPSILON)
)
# ------------------------------------------------------------------------------
# various stems (may expand and may consider a new helper)
# ------------------------------------------------------------------------------
def basic_bn_stem():
return nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 64, 7, stride=2, padding=3, bias=False)),
('bn1', nn.BatchNorm2d(64)),
('relu', nn.ReLU(inplace=True)),
('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True))]))
#('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]))
def basic_gn_stem():
return nn.Sequential(OrderedDict([
('conv1', nn.Conv2d(3, 64, 7, stride=2, padding=3, bias=False)),
('gn1', nn.GroupNorm(net_utils.get_group_gn(64), 64,
eps=cfg.GROUP_NORM.EPSILON)),
('relu', nn.ReLU(inplace=True)),
('maxpool', nn.MaxPool2d(kernel_size=3, stride=2, padding=1))]))
# ------------------------------------------------------------------------------
# various transformations (may expand and may consider a new helper)
# ------------------------------------------------------------------------------
class bottleneck_transformation(nn.Module):
""" Bottleneck Residual Block """
def __init__(self, inplanes, outplanes, innerplanes, stride=1, dilation=1, group=1,
downsample=None, deform=False):
super().__init__()
# In original resnet, stride=2 is on 1x1.
# In fb.torch resnet, stride=2 is on 3x3.
(str1x1, str3x3) = (stride, 1) if cfg.RESNETS.STRIDE_1X1 else (1, stride)
self.stride = stride
self.deform = deform
if not self.deform:
self.conv1 = nn.Conv2d(
inplanes, innerplanes, kernel_size=1, stride=str1x1, bias=False)
self.bn1 = nn.BatchNorm2d(innerplanes)
self.conv2 = nn.Conv2d(
innerplanes, innerplanes, kernel_size=3, stride=str3x3, bias=False,
padding=1 * dilation, dilation=dilation, groups=group)
self.bn2 = nn.BatchNorm2d(innerplanes)
self.conv3 = nn.Conv2d(
innerplanes, outplanes, kernel_size=1, stride=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.downsample = downsample
self.relu = nn.ReLU(inplace=True)
else:
self.offsets1 = ConvOffset2D(inplanes)
self.conv1 = nn.Conv2d(
inplanes, innerplanes, kernel_size=1, stride=str1x1, bias=False)
self.bn1 = nn.BatchNorm2d(innerplanes)
self.offsets2 = ConvOffset2D(innerplanes)
self.conv2 = nn.Conv2d(
innerplanes, innerplanes, kernel_size=3, stride=str3x3, bias=False,
padding=1 * dilation, dilation=dilation, groups=group)
self.bn2 = nn.BatchNorm2d(innerplanes)
self.offsets3 = ConvOffset2D(innerplanes)
self.conv3 = nn.Conv2d(
innerplanes, outplanes, kernel_size=1, stride=1, bias=False)
self.bn3 = nn.BatchNorm2d(outplanes)
self.downsample = downsample
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
if not self.deform:
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
else:
out = self.offsets1(x)
out = self.conv1(out)
out = self.bn1(out)
out = self.relu(out)
out = self.offsets2(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.offsets3(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class bottleneck_gn_transformation(nn.Module):
expansion = 4
def __init__(self, inplanes, outplanes, innerplanes, stride=1, dilation=1, group=1,
downsample=None):
super().__init__()
# In original resnet, stride=2 is on 1x1.
# In fb.torch resnet, stride=2 is on 3x3.
(str1x1, str3x3) = (stride, 1) if cfg.RESNETS.STRIDE_1X1 else (1, stride)
self.stride = stride
self.conv1 = nn.Conv2d(
inplanes, innerplanes, kernel_size=1, stride=str1x1, bias=False)
self.gn1 = nn.GroupNorm(net_utils.get_group_gn(innerplanes), innerplanes,
eps=cfg.GROUP_NORM.EPSILON)
self.conv2 = nn.Conv2d(
innerplanes, innerplanes, kernel_size=3, stride=str3x3, bias=False,
padding=1 * dilation, dilation=dilation, groups=group)
self.gn2 = nn.GroupNorm(net_utils.get_group_gn(innerplanes), innerplanes,
eps=cfg.GROUP_NORM.EPSILON)
self.conv3 = nn.Conv2d(
innerplanes, outplanes, kernel_size=1, stride=1, bias=False)
self.gn3 = nn.GroupNorm(net_utils.get_group_gn(outplanes), outplanes,
eps=cfg.GROUP_NORM.EPSILON)
self.downsample = downsample
self.relu = nn.ReLU(inplace=True)
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.gn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.gn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.gn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
def freeze_params(m):
"""Freeze all the weights by setting requires_grad to False
"""
for p in m.parameters():
p.requires_grad = False | 38.484848 | 104 | 0.580971 |
11b5aaf2858fc133d106e1faff9a6c588ffce900 | 1,530 | py | Python | node_Interface.py | robocol-rem-u/master_msgs | fac49cf34a25c16b01ab6014ac47b60c3c5c14a8 | [
"MIT"
] | null | null | null | node_Interface.py | robocol-rem-u/master_msgs | fac49cf34a25c16b01ab6014ac47b60c3c5c14a8 | [
"MIT"
] | null | null | null | node_Interface.py | robocol-rem-u/master_msgs | fac49cf34a25c16b01ab6014ac47b60c3c5c14a8 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import rospy
from master_msgs.msg import traction_Orders, imu_Speed, imu_Magnetism, pots, current, rpm, arm_Orders, goal,connection
def node_Interface():
rospy.init_node('node_Interface',anonymous=True)
rospy.Subscriber('topic_Traction_Orders',traction_Orders,traction_Orders_Callback)
rospy.Subscriber('topic_IMU_Speed',imu_Speed,IMU_Speed_Callback)
rospy.Subscriber('topic_IMU_Magnetism',imu_Magnetism,IMU_Magnetism_Callback)
rospy.Subscriber('topic_Pots', pots,pots_Callback)
rospy.Subscriber('topic_Current',current,current_Callback)
rospy.Subscriber('topic_RPM', RPM,rpm_Callback)
rospy.Subscriber('topic_Arm_Orders',arm_Orders,arm_Orders_Callback)
pub_Goal = rospy.Publisher('topic_Goal',goal, queue_size=10)
pub_Arm_Orders = rospy.Publisher('topic_Arm_Orders', arm_Orders, queue_size=10)
pub_Connection = rospy.Publisher('topic_Connection', connection, queue_size=10)
pub_Traction_Orders = rospy.Publisher('topic_Traction_Orders', traction_Orders, queue_size=10)
rate = rospy.Rate (10)
while not rospy.is_shutdown ():
rate.sleep ()
def traction_Orders_Callback(param):
pass
def IMU_Speed_Callback(param):
pass
def IMU_Magnetism_Callback(param):
pass
def pots_Callback(param):
pass
def current_Callback(param):
pass
def RPM_Callback(param):
pass
def arm_Orders_Callback(param):
pass
if __name__ == '__main__':
try:
node_Interface()
except rospy.ROSInterruptException:
pass
| 29.423077 | 118 | 0.760784 |
11b627ad398f9ae3625b734210d1a5d1347b9bf2 | 1,700 | py | Python | pantofola_search/management/commands/_private.py | phingage/pantofola.io | f41036d2e568a45f328e2a7ca81d76a27cd134dc | [
"WTFPL"
] | 1 | 2018-06-09T22:20:00.000Z | 2018-06-09T22:20:00.000Z | pantofola_search/management/commands/_private.py | phingage/pantofola.io | f41036d2e568a45f328e2a7ca81d76a27cd134dc | [
"WTFPL"
] | 4 | 2020-02-11T22:01:16.000Z | 2021-06-10T17:38:56.000Z | pantofola_search/management/commands/_private.py | phingage/pantofola.io | f41036d2e568a45f328e2a7ca81d76a27cd134dc | [
"WTFPL"
] | null | null | null | from pantofola_search.models import *
from pantofola_search.tools.imdb_fetcher import ImdbFetcher
def update_new_movie_info(clean_title, imdb_id, torrent, is_imdb=False):
my_imdb = ImdbFetcher()
if not Movie.objects.filter(pk=imdb_id).exists():
# #[imdb_id,year,max_ratio,[titles[1]]]
movie_info = my_imdb.query_movie_info(imdb_id, clean_title)
movie = Movie(imdb_id=movie_info[0],
year=movie_info[1],
original_title=movie_info[3][0])
movie.save()
for aka in movie_info[3][1]:
movie.title_set.create(title=aka)
for forg in movie_info[3][2]:
movie.foreigntitle_set.create(title=forg[0], language=forg[1])
max_ratio = movie_info[2]
# print movie_info, tags, lang_tag
else:
movie = Movie.objects.get(pk=imdb_id)
score_title = [movie.original_title]
for aka_q in movie.title_set.all():
score_title.append(aka_q.title)
max_ratio = my_imdb.compute_score(clean_title, score_title)
alarm_ratio = False
if float(max_ratio) < 0.5 and not is_imdb:
alarm_ratio = True
torrent.movie = movie
torrent.score = max_ratio
torrent.broken = alarm_ratio
# torrent.ready_to_recheck = False
if is_imdb:
#torrent.sanitized_name = movie.original_title
torrent.score = 1
torrent.broken = False
torrent.save()
def check_for_title_in_db(clean_title):
t_e = Torrent.objects.filter(sanitized_name__exact=clean_title,
broken=False, ready_to_recheck=False).first()
if t_e:
return t_e.movie.imdb_id
else:
return None | 36.956522 | 78 | 0.648824 |
11b673d3e56e187a96e8ce75c9577f8cea8df161 | 200 | py | Python | pymtl3/passes/rtlir/structural/__init__.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 152 | 2020-06-03T02:34:11.000Z | 2022-03-30T04:16:45.000Z | pymtl3/passes/rtlir/structural/__init__.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 139 | 2019-05-29T00:37:09.000Z | 2020-05-17T16:49:26.000Z | pymtl3/passes/rtlir/structural/__init__.py | kevinyuan/pymtl3 | 5949e6a4acc625c0ccbbb25be3af1d0db683df3c | [
"BSD-3-Clause"
] | 22 | 2020-05-18T13:42:05.000Z | 2022-03-11T08:37:51.000Z | """Expose structural RTLIR generation pass.
PyMTL user should only interact with the passes exposed here.
"""
from .StructuralRTLIRGenL4Pass import StructuralRTLIRGenL4Pass as StructuralRTLIRGenPass
| 33.333333 | 88 | 0.84 |
11b6a22d0d9d730ae6441343ec296d67f55adf10 | 7,663 | py | Python | ArcLint.py | namur007/ArcLint | b17b39cf7fdfeff144339b6f3494d9120eafde90 | [
"MIT"
] | null | null | null | ArcLint.py | namur007/ArcLint | b17b39cf7fdfeff144339b6f3494d9120eafde90 | [
"MIT"
] | 4 | 2020-07-17T18:11:54.000Z | 2020-07-26T12:34:57.000Z | ArcLint.py | namur007/ArcLint | b17b39cf7fdfeff144339b6f3494d9120eafde90 | [
"MIT"
] | null | null | null | import json
import re
import datetime
import os
import arcpy
regex_flag_dict = {
# 'ASCII' re.A, # this is py3 only so wont work in arcgis desktop
'IGNORECASE': re.I,
'LOCALE': re.L,
"MULTILINE": re.M,
"DOTMATCH": re.S,
"UNICODE": re.U,
"VERBOSE": re.X,
}
def main(json_path, feature, output_location=None, output_file_name=None):
output_file = format_output_file(output_location, output_file_name)
start_time = datetime.datetime.now()
start_str = start_time.strftime("%Y-%m-%d %H:%M:%S")
json_obj = read_json(json_path)
rule_data = compile_rules(json_obj)
results = _arc_process(rule_data, feature)
save_json(format_results(results, start_str), output_file)
def format_output_file(output_location, output_file_name):
if output_location is None:
output_location = ""
if output_file_name is None:
output_file_name = "results.json"
if not output_file_name.endswith(".json"):
output_file_name += ".json"
return os.path.join(output_location, output_file_name)
def read_json(_json_path):
js = None
with open(_json_path, 'r') as fl:
js = json.loads(fl.read())
return js
def save_json(_json_data, output_file):
with open(output_file, 'w') as fl:
fl.write(json.dumps(_json_data))
def format_results(rule_data, _datetime_str):
# format fields
# format groups
out_fields = {}
out_groups = {}
for field in rule_data['Fields']:
field_result = []
for rule in rule_data['Fields'][field]:
if not rule['output']:
continue
field_result.append({
"ruleName": rule['ruleName'],
"errorIDs": rule['result']
})
if len(field_result) == 0:
continue
out_fields[field] = field_result
for group in rule_data['Groups']:
out_groups[group] = {
"errorIDs": rule_data['Groups'][group]['result'],
'description': rule_data['Groups'][group]['description']
}
result = {
"run_datetime": _datetime_str,
"fields": out_fields,
"groups": out_groups,
}
return result
def _arc_process(rule_data, feature):
"""
impure function as i am modifying the rule_data
input = {
"Rules": rule_dict,
"Fields": field_dict,
"Groups": group_dict
}
returns dictionary of the rules"""
fields = [field for field in rule_data['Fields']]
with arcpy.da.SearchCursor(feature, ["OID@"] + fields) as sc:
for row in sc:
_id = row[0]
for ix, value in enumerate(row[1:]):
field_rules = rule_data['Fields'][fields[ix]]
# append ID to each rule if they test = False
[rule['result'].append(_id) for rule in field_rules if rule['rule'](value)]
for group_name in rule_data['Groups']:
group = rule_data['Groups'][group_name]
group_func = any if group.get('match') == 'any' else all
group_result = group_func([True if _id in r['result'] else False for r in group['rules']])
if group_result == True:
group['result'].append(_id)
return rule_data
# region Linters
def regex_lint(value, _regex):
# if regex is good, return true, else return false
if len(_regex.findall(str(value))) > 0:
return True
else:
return False
def range_lint(value, firstValue, secondValue, outside):
lv = min(firstValue, secondValue)
mx = max(firstValue, secondValue)
result = True if value >= lv and value <= mx else False
result = not result if outside else result
return result
# region builders
def compile_rules(json_obj):
rule_dict = _compile_global_rules(json_obj)
field_dict = _compile_field_rules(json_obj, rule_dict)
group_dict = _compile_group_rules(json_obj, field_dict)
return {
"Rules": rule_dict,
"Fields": field_dict,
"Groups": group_dict
}
def _compile_global_rules(json_obj):
"""
returns
rule name is either global_RULENAME for global or fieldname_RULENAME for field specific ones
{
rule_name: rule_function > str: function
}
"""
rule_dict = {}
for rule in json_obj.get('globalRules', []):
rule_name = rule.get('ruleName', '').upper()
nm = 'global_{}'.format(rule_name)
f = _parse_rule(rule)
rule_dict[nm] = f
return rule_dict
def _compile_field_rules(json_obj, rule_dict):
"""
returns:
{
FieldName > str: {
'result': [] > str: list,
'ruleName': ruleName > str: str,
'rule': rule_dict[fieldname_rule_name] > str: function,
}
}
"""
field_dict = {}
for field in json_obj.get('fields', []):
field_rules = []
field_name = field.get('fieldName')
for rule in field.get('rules', []):
rule_name = rule.get('ruleName', '').upper()
rule_type = rule.get('type')
output_rule = rule.get('output', True)
nm = None
if rule_type is None and 'global_{}'.format(rule_name) in rule_dict:
nm = 'global_{}'.format(rule_name)
else:
nm = '{}_{}'.format(field_name, rule_name)
rule_dict[nm] = _parse_rule(rule)
field_rules.append({
'result': [],
'ruleName': rule_name,
'rule': rule_dict[nm],
'output': output_rule
})
field_dict[field_name] = field_rules
return field_dict
def _compile_group_rules(json_obj, field_dict):
"""
rules are the address to the rule from the field dictionary. when updating the result in the field results, should be available here
returns
{
group_name: {
"result": [], # array of ids with errors,
"match": "all" or "any", # type of match to test for
"rules": [group_rules], # array of the rules for this group
}
}
"""
group_dict = {}
for group in json_obj.get("ruleGroups", []):
group_name = group.get("groupName", "")
match_type = group.get("match", "")
group_rules = []
for rule in group.get("rules", []):
f = rule.get("fieldName")
rn = rule.get("ruleName","").upper()
group_rules += [r for r in field_dict[f] if r['ruleName']==rn]
group_dict[group_name] = {
"result": [],
"match": match_type,
"rules": group_rules,
"description": group.get('description', '')
}
return group_dict
def _parse_rule(rule):
func_dct = {
'regex': _parse_regex,
'range': _parse_range
}
return func_dct[rule.get('type')](rule)
# region parse rules
def _parse_regex(rule):
_pattern = rule.get('pattern')
flags = rule.get('flags', [])
pat_flags = 0
for f in flags:
if f is None:
continue
pat_flags |= regex_flag_dict.get(f.upper(), 0)
_regex = re.compile(_pattern, pat_flags)
def f(x): return regex_lint(x, _regex)
return f
def _parse_range(rule):
f_value = rule.get('fromValue')
s_value = rule.get('toValue')
outside = rule.get('outside', False)
def f(x): return range_lint(x, f_value, s_value, outside)
return f
if __name__ == "__main__":
feat = r"C:\Users\scody\Desktop\ArcPro Model\AllPipes2020\Data\ModelNetwork.gdb\facility_junction"
main('facil_jct.json', feat)
| 27.66426 | 136 | 0.588542 |
11b7cee72a017b56ab9f447f74c1610717cfe52e | 8,784 | py | Python | tests/st/ops/gpu/test_scatter_nd_func_op.py | PowerOlive/mindspore | bda20724a94113cedd12c3ed9083141012da1f15 | [
"Apache-2.0"
] | 1 | 2021-12-27T13:42:29.000Z | 2021-12-27T13:42:29.000Z | tests/st/ops/gpu/test_scatter_nd_func_op.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | null | null | null | tests/st/ops/gpu/test_scatter_nd_func_op.py | zimo-geek/mindspore | 665ec683d4af85c71b2a1f0d6829356f2bc0e1ff | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import numpy as np
import pytest
import mindspore.context as context
import mindspore.nn as nn
from mindspore import Tensor, Parameter
import mindspore.common.dtype as mstype
import mindspore.ops as ops
context.set_context(mode=context.GRAPH_MODE, device_target="GPU")
func_map = {
"update": ops.ScatterNdUpdate,
"add": ops.ScatterNdAdd,
"sub": ops.ScatterNdSub,
}
class TestScatterNdFuncNet(nn.Cell):
def __init__(self, func, lock, inputx, indices, updates):
super(TestScatterNdFuncNet, self).__init__()
self.scatter_func = func_map[func](use_locking=lock)
self.inputx = Parameter(inputx, name="inputx")
self.indices = Parameter(indices, name="indices")
self.updates = Parameter(updates, name="updates")
def construct(self):
out = self.scatter_func(self.inputx, self.indices, self.updates)
return out
def scatter_nd_func_net(func, inputx, indices, updates):
lock = True
net = TestScatterNdFuncNet(func, lock, inputx, indices, updates)
return net()
def scatter_nd_func_use_locking_false_net(func, inputx, indices, updates):
lock = False
net = TestScatterNdFuncNet(func, lock, inputx, indices, updates)
return net()
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_traning
@pytest.mark.env_onecard
def test_scatter_nd_func_small_float32():
inputx = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mstype.float32)
indices = Tensor(np.array([[0, 0], [1, 1]]), mstype.int32)
updates = Tensor(np.array([1.0, 2.2]), mstype.float32)
# update
output = scatter_nd_func_net("update", inputx, indices, updates)
expected = np.array([[1.0, 0.3, 3.6], [0.4, 2.2, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# add
output = scatter_nd_func_net("add", inputx, indices, updates)
expected = np.array([[0.9, 0.3, 3.6], [0.4, 2.7, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# sub
output = scatter_nd_func_net("sub", inputx, indices, updates)
expected = np.array([[-1.1, 0.3, 3.6], [0.4, -1.7, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_scatter_nd_func_input_updated():
inputx = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mstype.float32)
indices = Tensor(np.array([[0, 0], [1, 1]]), mstype.int32)
updates = Tensor(np.array([1.0, 2.2]), mstype.float32)
lock = True
# update
net = TestScatterNdFuncNet("update", lock, inputx, indices, updates)
output = net()
expected = np.array([[1.0, 0.3, 3.6], [0.4, 2.2, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# add
net = TestScatterNdFuncNet("add", lock, inputx, indices, updates)
output = net()
expected = np.array([[0.9, 0.3, 3.6], [0.4, 2.7, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# sub
net = TestScatterNdFuncNet("sub", lock, inputx, indices, updates)
output = net()
expected = np.array([[-1.1, 0.3, 3.6], [0.4, -1.7, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_traning
@pytest.mark.env_onecard
def test_scatter_nd_func_small_float32_using_locking_false():
inputx = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mstype.float32)
indices = Tensor(np.array([[0, 0], [1, 1]]), mstype.int32)
updates = Tensor(np.array([1.0, 2.2]), mstype.float32)
# update
output = scatter_nd_func_use_locking_false_net("update", inputx, indices, updates)
expected = np.array([[1.0, 0.3, 3.6], [0.4, 2.2, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# add
output = scatter_nd_func_use_locking_false_net("add", inputx, indices, updates)
expected = np.array([[0.9, 0.3, 3.6], [0.4, 2.7, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# sub
output = scatter_nd_func_use_locking_false_net("sub", inputx, indices, updates)
expected = np.array([[-1.1, 0.3, 3.6], [0.4, -1.7, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_traning
@pytest.mark.env_onecard
def test_scatter_nd_func_small_int32():
inputx = Tensor(np.array([1, 2, 3, 4, 5, 6, 7, 8]), mstype.float32)
indices = Tensor(np.array([[4], [3], [1], [7]]), mstype.int32)
updates = Tensor(np.array([9, 10, 11, 12]), mstype.float32)
# update
output = scatter_nd_func_net("update", inputx, indices, updates)
expected = np.array([1, 11, 3, 10, 9, 6, 7, 12])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# add
output = scatter_nd_func_net("add", inputx, indices, updates)
expected = np.array([1, 13, 3, 14, 14, 6, 7, 20])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# sub
output = scatter_nd_func_net("sub", inputx, indices, updates)
expected = np.array([1, -9, 3, -6, -4, 6, 7, -4])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_traning
@pytest.mark.env_onecard
def test_scatter_nd_func_multi_dims():
inputx = Tensor(np.zeros((4, 4, 4)), mstype.float32)
indices = Tensor(np.array([[0], [2]]), mstype.int32)
updates = Tensor(
np.array(
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
]
),
mstype.float32,
)
# update
output = scatter_nd_func_net("update", inputx, indices, updates)
expected = np.array(
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
]
)
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# add
output = scatter_nd_func_net("add", inputx, indices, updates)
expected = np.array(
[
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[5, 5, 5, 5], [6, 6, 6, 6], [7, 7, 7, 7], [8, 8, 8, 8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
]
)
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# sub
output = scatter_nd_func_net("sub", inputx, indices, updates)
expected = np.array(
[
[[-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7], [-8, -8, -8, -8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
[[-5, -5, -5, -5], [-6, -6, -6, -6], [-7, -7, -7, -7], [-8, -8, -8, -8]],
[[0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0], [0, 0, 0, 0]],
]
)
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_traning
@pytest.mark.env_onecard
def test_scatter_nd_func_one_value():
inputx = Tensor(np.array([[-0.1, 0.3, 3.6], [0.4, 0.5, -3.2]]), mstype.float32)
indices = Tensor(np.array([[0, 1]]), mstype.int32)
updates = Tensor(np.array([1.0]), mstype.float32)
# update
output = scatter_nd_func_net("update", inputx, indices, updates)
expected = np.array([[-0.1, 1.0, 3.6], [0.4, 0.5, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# add
output = scatter_nd_func_net("add", inputx, indices, updates)
expected = np.array([[-0.1, 1.3, 3.6], [0.4, 0.5, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
# sub
output = scatter_nd_func_net("sub", inputx, indices, updates)
expected = np.array([[-0.1, -0.7, 3.6], [0.4, 0.5, -3.2]])
np.testing.assert_array_almost_equal(output.asnumpy(), expected)
| 37.378723 | 86 | 0.612136 |
11b7d8f84ea9074863867abdbc15c4a61c060614 | 1,710 | py | Python | files/persona_dao.py | DaletWolff/Curso_postgresql | a9d716236b1a840f104c98a4982eab9b1ad641ba | [
"Unlicense"
] | null | null | null | files/persona_dao.py | DaletWolff/Curso_postgresql | a9d716236b1a840f104c98a4982eab9b1ad641ba | [
"Unlicense"
] | null | null | null | files/persona_dao.py | DaletWolff/Curso_postgresql | a9d716236b1a840f104c98a4982eab9b1ad641ba | [
"Unlicense"
] | null | null | null | from persona import Persona
from logger_base import log
from cursor import Cursor
class PersonaDAO:
_SELECCIONAR = 'SELECT * FROM persona ORDER BY id_persona'
_INSERTAR = 'INSERT INTO persona(nombre, apellido, email) VALUES(%s, %s, %s)'
_ACTUALIZAR = 'UPDATE persona SET nombre=%s, apellido=%s, email=%s WHERE id_persona=%s'
_ELIMINAR = 'DELETE FROM persona WHERE id_persona=%s'
@classmethod
def seleccionar(cls):
with Cursor() as cursor:
cursor.execute(cls._SELECCIONAR)
registros = cursor.fetchall()
personas = []
for registro in registros:
persona = Persona(registro[0], registro[1], registro[2], registro[3])
personas.append(persona)
return personas
@classmethod
def insertar(cls, persona):
with Cursor() as cursor:
valores = (persona.nombre, persona.apellido, persona.email)
cursor.execute(cls._INSERTAR, valores)
log.debug(f"Persona insertada: {persona}")
return cursor.rowcount
@classmethod
def actualizar(cls, persona):
with Cursor() as cursor:
valores = (persona.nombre, persona.apellido, persona.email, persona.id_persona)
cursor.execute(cls._ACTUALIZAR, valores)
log.debug(f'Persona actualizada: {persona}')
return cursor.rowcount
@classmethod
def eliminar(cls, persona):
with Cursor() as cursor:
valores = (persona.id_persona,)
cursor.execute(cls._ELIMINAR, valores)
log.debug(f'Persona eliminada: {persona}')
return cursor.rowcount | 38 | 92 | 0.612281 |
11b95e0f9e7afe8543bf0c3e7be151865cf4b771 | 5,394 | py | Python | tests/serve/mock/end-to-end/opbank/test_opbank.py | dfioravanti/hmt | df79404076ec7acea0cfb12b636d58e3ffc83bc5 | [
"MIT"
] | 25 | 2020-05-14T13:25:42.000Z | 2021-11-09T10:09:27.000Z | tests/serve/mock/end-to-end/opbank/test_opbank.py | dfioravanti/hmt | df79404076ec7acea0cfb12b636d58e3ffc83bc5 | [
"MIT"
] | 19 | 2020-05-05T19:47:41.000Z | 2021-02-05T17:06:53.000Z | tests/serve/mock/end-to-end/opbank/test_opbank.py | dfioravanti/hmt | df79404076ec7acea0cfb12b636d58e3ffc83bc5 | [
"MIT"
] | 6 | 2020-05-16T10:02:48.000Z | 2021-10-04T08:03:49.000Z | import json
import pytest
from tornado.httpclient import AsyncHTTPClient, HTTPRequest
from hmt.serve.mock.log import Log
from hmt.serve.mock.scope import Scope
from hmt.serve.mock.specs import load_specs
from hmt.serve.utils.routing import HeaderRouting
@pytest.fixture
def app(mocking_app):
return mocking_app(
"tests/serve/mock/end-to-end/opbank/callbacks",
load_specs("tests/serve/mock/end-to-end/opbank/schemas"),
HeaderRouting(),
Log(Scope()),
)
ACCOUNTS_HEADERS = {
"Host": "sandbox.apis.op-palvelut.fi",
"x-api-key": "ZoStul8nNuwq1SYCzSrLcO1wAj4Tyf7x",
"x-request-id": "12345",
"x-session-id": "12345",
"authorization": "Bearer 6c18c234b1b18b1d97c7043e2e41135c293d0da9",
"x-authorization": "6c18c234b1b18b1d97c7043e2e41135c293d0da9",
}
#
PAYMENTS_HEADERS = {
"Host": "sandbox.apis.op-palvelut.fi",
"x-api-key": "ZoStul8nNuwq1SYCzSrLcO1wAj4Tyf7x",
"x-request-id": "12345",
"x-session-id": "12345",
# 'authorization': "Bearer 6c18c234b1b18b1d97c7043e2e41135c293d0da9",
"x-authorization": "6c18c234b1b18b1d97c7043e2e41135c293d0da9",
}
"""
def get_accounts(http_client: AsyncHTTPClient, base_url: str):
req = HTTPRequest(base_url+'/accounts/v3/accounts', headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
return json.loads(ret.body)['accounts']
"""
"""
def init_payment(payer_iban, receiver_iban, amount, http_client, base_url):
body = {
"amount": amount,
"subject": "Client Test",
"currency": "EUR",
"payerIban": payer_iban,
"valueDate": "2020-01-27T22:59:34Z",
"receiverBic": "string",
"receiverIban": receiver_iban,
"receiverName": "string"
}
url = base_url + '/v1/payments/initiate'
req = HTTPRequest(url, method='POST', headers=PAYMENTS_HEADERS, body=json.dumps(body))
res = yield http_client.fetch(req)
return json.loads(res.body)
"""
"""
def confirm_payment(payment_id, http_client: AsyncHTTPClient, base_url: str):
body = {
'paymentId': payment_id
}
url = base_url + '/v1/payments/confirm'
req = HTTPRequest(url, headers=PAYMENTS_HEADERS, body=json.dumps(body))
response = yield http_client.fetch(req)
return json.loads(response)
"""
@pytest.mark.gen_test
def test_opbank(http_client: AsyncHTTPClient, base_url: str):
# eventually, we will want to test the line below
# currently, however, pytest.tornado only supports creating
# one fixture for a mock
# requests.delete("http://localhost:8888/admin/storage")
# payer_iban = 'FI8359986950002741'
# receiver_iban = 'FI4859986920215738'
payer_iban = "FI3959986920207073"
receiver_iban = "FI2350009421535899"
amount = 5
### get account
req = HTTPRequest(base_url + "/accounts/v3/accounts", headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
accounts = json.loads(ret.body)["accounts"]
print("Account list before payment: {}".format(accounts))
payer_account = next(
(account for account in accounts if account["identifier"] == payer_iban)
)
receiver_account = next(
(account for account in accounts if account["identifier"] == receiver_iban)
)
assert 2215.81 == payer_account["balance"]
assert 0 == receiver_account["balance"]
### init account
body = {
"amount": amount,
"subject": "Client Test",
"currency": "EUR",
"payerIban": payer_iban,
"valueDate": "2020-01-27T22:59:34Z",
"receiverBic": "string",
"receiverIban": receiver_iban,
"receiverName": "string",
}
url = base_url + "/v1/payments/initiate"
req = HTTPRequest(
url, method="POST", headers=PAYMENTS_HEADERS, body=json.dumps(body)
)
res = yield http_client.fetch(req)
payment = json.loads(res.body)
payment_id: str = payment["paymentId"]
print("Created payment {}".format(payment))
### get account
req = HTTPRequest(base_url + "/accounts/v3/accounts", headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
accounts = json.loads(ret.body)["accounts"]
print("Account list after payment initiated: {}".format(accounts))
payer_account = next(
(account for account in accounts if account["identifier"] == payer_iban)
)
receiver_account = next(
(account for account in accounts if account["identifier"] == receiver_iban)
)
assert 2215.81 == payer_account["balance"]
assert 0 == receiver_account["balance"]
### confirm payment
body = {"paymentId": payment_id}
url = base_url + "/v1/payments/confirm"
req = HTTPRequest(
url, method="POST", headers=PAYMENTS_HEADERS, body=json.dumps(body)
)
yield http_client.fetch(req)
### get accounts
req = HTTPRequest(base_url + "/accounts/v3/accounts", headers=ACCOUNTS_HEADERS)
ret = yield http_client.fetch(req)
accounts = json.loads(ret.body)["accounts"]
print("Account list after payment confirmed: {}".format(accounts))
payer_account = next(
(account for account in accounts if account["identifier"] == payer_iban)
)
receiver_account = next(
(account for account in accounts if account["identifier"] == receiver_iban)
)
assert 2210.81 == payer_account["balance"]
assert 5 == receiver_account["balance"]
| 32.493976 | 90 | 0.670931 |
11ba755db1dbc0aa52b8605bc8949960f9ba11a9 | 346 | py | Python | less3_task5.py | rezapci/Algorithms-with-Python | 5f4faf2d463f33375856f5a5ab525467d303aa24 | [
"MIT"
] | null | null | null | less3_task5.py | rezapci/Algorithms-with-Python | 5f4faf2d463f33375856f5a5ab525467d303aa24 | [
"MIT"
] | null | null | null | less3_task5.py | rezapci/Algorithms-with-Python | 5f4faf2d463f33375856f5a5ab525467d303aa24 | [
"MIT"
] | null | null | null | # Find the maximum negative element in the array.
# Display its value and position in the array.
import random
arr = [random.randint(-50, 50) for _ in range(10)]
print(arr)
num = -50
position = 0
for i in arr:
if i < 0 and i > num:
num = i
print ('The maximum negative element {}, its position: {}'.format(num, arr.index(num)))
| 19.222222 | 87 | 0.656069 |
11bc0b3cb2807ff10941fab0ad8b5ff296d80b41 | 253 | py | Python | pages/wacs.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | pages/wacs.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | pages/wacs.py | irzaip/selevaporum | 05754f2a8152185f550e1135feb94fdc85e4046c | [
"MIT"
] | null | null | null | import collections
from numpy.core.defchararray import lower
import streamlit as st
import numpy as np
import pandas as pd
from pages import utils
def app():
st.title("WhatApp Customer Service")
st.subheader("Where automation matters") | 25.3 | 44 | 0.754941 |
11c058f314fcdf27f630e4e67e934c957629b5a4 | 1,000 | py | Python | pype9/cmd/convert.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | null | null | null | pype9/cmd/convert.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | null | null | null | pype9/cmd/convert.py | tclose/Pype9 | 23f96c0885fd9df12d9d11ff800f816520e4b17a | [
"MIT"
] | 1 | 2021-04-08T12:46:21.000Z | 2021-04-08T12:46:21.000Z | """
Tool to convert 9ML files between different supported formats (e.g. XML_,
JSON_, YAML_) and 9ML versions.
"""
from argparse import ArgumentParser
from pype9.utils.arguments import nineml_document
from pype9.utils.logging import logger
def argparser():
parser = ArgumentParser(prog='pype9 convert',
description=__doc__)
parser.add_argument('in_file', type=nineml_document,
help="9ML file to be converted")
parser.add_argument('out_file', help="Converted filename")
parser.add_argument('--nineml_version', '-v', type=str, default=None,
help="The version of nineml to output")
return parser
def run(argv):
args = argparser().parse_args(argv)
doc = args.in_file.clone()
kwargs = {}
if args.nineml_version is not None:
kwargs['version'] = args.nineml_version
doc.write(args.out_file, **kwargs)
logger.info("Converted '{}' to '{}'".format(args.in_file, args.out_file))
| 33.333333 | 77 | 0.664 |
11c2627f43e4b6eeb9e8f2281dbb147804505bde | 85 | py | Python | test.py | Wuzhiqiang88/myFirstSpider | 6e964d26038e2937b0f060c1ff6d30b092394ee3 | [
"Apache-2.0"
] | 1 | 2018-09-12T07:13:53.000Z | 2018-09-12T07:13:53.000Z | test.py | Wuzhiqiang88/myFirstSpider | 6e964d26038e2937b0f060c1ff6d30b092394ee3 | [
"Apache-2.0"
] | null | null | null | test.py | Wuzhiqiang88/myFirstSpider | 6e964d26038e2937b0f060c1ff6d30b092394ee3 | [
"Apache-2.0"
] | null | null | null | i=0
s=[50]
for i in range(0,10):
print("w%dwww"%i)
s[i]=i
print(s[i]
| 7.727273 | 21 | 0.470588 |
11c29c94c567a27034de5cc0c60d69d3debbcc00 | 871 | py | Python | Python_UN_POCO_MAS_AVANZADO.py | FreyderUrbano/Python_Programas | 8a11729d1148c319d8fa145ad18038cc7d63f0d9 | [
"MIT"
] | null | null | null | Python_UN_POCO_MAS_AVANZADO.py | FreyderUrbano/Python_Programas | 8a11729d1148c319d8fa145ad18038cc7d63f0d9 | [
"MIT"
] | null | null | null | Python_UN_POCO_MAS_AVANZADO.py | FreyderUrbano/Python_Programas | 8a11729d1148c319d8fa145ad18038cc7d63f0d9 | [
"MIT"
] | null | null | null | #PYTHON UN POCO MAS AVANZADO METODO DE ABRIR EN CONSOLA
print("PYTHON MAS AVANZADO")
texto = "TEXTO DE PRUEBA"
nombre = "FREYDER"
altura = "2 metros"
year = 2021
#print(f"{texto}--{nombre}--{altura}--{str(year)}")
print(texto + " " +nombre + " "+ altura +" "+ str(year))
#entradas o peticiones por teclado
sitio = input("CUAL ES TU NOMBRE: ")
print(sitio)
#condicionales
"""
altura = int(input("Cual es tu altura?: " ))
if altura > 190:
print("ALT@")
else:
print("BAJO")
"""
"""
#FUNCIONES
var_altura = int(input("Cual es tu altura?: " ))
def mostrarAltura(estatura):
resultado = ""
if estatura > 190:
resultado = ("ALT@")
else:
resultado = ("BAJO")
return resultado
print(mostrarAltura(var_altura))
"""
#listas
personas = ["PACHO", "HUGO", "PEIPEI"]
print(personas[2])
for persona in personas:
print(persona)
| 17.078431 | 56 | 0.629162 |
11c365d4ccc71a94837656d754364a0fe60f8958 | 3,615 | py | Python | Tools/MakeHDF.py | Kadantte/VideoSuperResolution | 4c86e49d81c7a9bea1fe0780d651afc126768df3 | [
"MIT"
] | 1,447 | 2018-06-04T08:44:07.000Z | 2022-03-29T06:19:10.000Z | Tools/MakeHDF.py | Evergreengyq/VideoSuperResolution | 1d0c54fafaf7a02f0d69408502f90c55f0f76536 | [
"MIT"
] | 96 | 2018-08-29T01:02:45.000Z | 2022-01-12T06:00:01.000Z | Tools/MakeHDF.py | Evergreengyq/VideoSuperResolution | 1d0c54fafaf7a02f0d69408502f90c55f0f76536 | [
"MIT"
] | 307 | 2018-06-26T13:35:54.000Z | 2022-01-21T09:01:54.000Z | # Copyright (c): Wenyi Tang 2017-2019.
# Author: Wenyi Tang
# Email: wenyi.tang@intel.com
# Update Date: 2019/4/3 下午5:03
import argparse
import time
from pathlib import Path
import h5py
import numpy as np
import tqdm
from PIL import Image
__all__ = ["gather_videos_vqp", "gather_videos", "print_dataset"]
parser = argparse.ArgumentParser(description="Make HDF5 datasets")
parser.add_argument("input_dir", help="path of the input root folder.")
parser.add_argument("-o", "--output", help="output hdf file path.")
parser.add_argument("-a", "--append", action='store_true')
parser.add_argument("-t", "--task_name", choices=__all__, help="task name")
parser.add_argument("--compression", type=int, default=None)
parser.add_argument("--glob", help="glob pattern to gather files inside input."
"For recursively glob, use **/*.")
parser.add_argument("--data_format",
choices=('channels_first', 'channels_last'),
default='channels_first', help="data format (default: CHW)")
FLAGS, args = parser.parse_known_args()
def make_hdf_header():
if FLAGS.output:
if FLAGS.append:
fd = h5py.File(FLAGS.output, 'a')
else:
fd = h5py.File(FLAGS.output, 'w')
fd.attrs['author'] = 'LoSealL'
fd.attrs['email'] = 'wenyi.tang@intel.com'
fd.attrs['date'] = time.strftime("%Y-%m-%d")
fd.attrs['data_format'] = FLAGS.data_format
return fd
def flush_hdf(fd: h5py.File):
if isinstance(fd, h5py.File):
fd.close()
def gather_videos_vqp(fd: h5py.File):
"""Specified for VQP"""
root = Path(FLAGS.input_dir)
glob = FLAGS.glob or '*'
inputs = sorted(root.glob(glob))
candidates = set(i.parent for i in filter(lambda f: f.is_file(), inputs))
frames_info = {}
for p in tqdm.tqdm(candidates):
seq = [Image.open(f) for f in
filter(lambda f: f.is_file(), sorted(p.rglob('*')))]
cube = np.stack(seq)
if FLAGS.data_format == 'channels_first':
cube = cube.transpose([0, 3, 1, 2])
cube = np.expand_dims(cube, 0)
path = p.relative_to(root)
# ugly
path = path.parent / path.stem.split('_')[0]
key = str(path.as_posix())
if not key in fd:
fd.create_dataset(key, data=cube,
maxshape=(52,) + cube.shape[1:],
compression=FLAGS.compression)
frames_info[key] = len(seq)
else:
d = fd[key]
cnt = d.shape[0] + 1
d.resize(cnt, 0)
d[-1] = cube
del cube
def gather_videos(fd: h5py.File):
"""Gather videos. Video is defined in a folder containing sequential images."""
root = Path(FLAGS.input_dir)
glob = FLAGS.glob or '*'
inputs = sorted(root.glob(glob))
candidates = set(i.parent for i in filter(lambda f: f.is_file(), inputs))
frames_info = {}
for p in tqdm.tqdm(candidates):
seq = [Image.open(f) for f in
filter(lambda f: f.is_file(), sorted(p.rglob('*')))]
cube = np.stack(seq)
if FLAGS.data_format == 'channels_first':
cube = cube.transpose([0, 3, 1, 2])
path = p.relative_to(root)
key = str(path.as_posix())
fd.create_dataset(key, data=cube, compression=FLAGS.compression)
frames_info[key] = len(seq)
del cube
fd.attrs['frames_info'] = list(frames_info.items())
def print_dataset(*args):
def _print(name, obj):
print(f"key: [{name}], shape: {obj.shape}")
fd = Path(FLAGS.input_dir)
if fd.exists():
with h5py.File(str(fd), 'r') as fd:
fd.visititems(_print)
def main():
fd = make_hdf_header()
globals()[FLAGS.task_name](fd)
flush_hdf(fd)
if __name__ == '__main__':
main()
| 30.125 | 81 | 0.634302 |
11c45856fc39f00ce8b427bda4629a69a7f9c3b7 | 1,480 | py | Python | modules/ddg_appwv_cookies.py | ItWasDNS/DDG-Parser | fd63099df7b93a603b9fe2ae4259c232f0555a65 | [
"MIT"
] | null | null | null | modules/ddg_appwv_cookies.py | ItWasDNS/DDG-Parser | fd63099df7b93a603b9fe2ae4259c232f0555a65 | [
"MIT"
] | null | null | null | modules/ddg_appwv_cookies.py | ItWasDNS/DDG-Parser | fd63099df7b93a603b9fe2ae4259c232f0555a65 | [
"MIT"
] | null | null | null | """
Process 'com.duckduckgo.mobile.android/app_webview/Cookies'
"""
import os
import sqlite3
from modules.helpers.ddg_path_handler import process_directory_paths
query_cookies = """
SELECT
host_key,
path,
name,
value,
creation_utc,
last_access_utc,
expires_utc,
secure,
httponly,
persistent,
encrypted_value
FROM cookies;
"""
cookies_template = """--
Host: %s
Path: %s
Cookie Name: %s
Cookie Value: %s
Cookie Creation: %s
Cookie Expiration: %s
"""
def process_appwv_cookies(duckduckgo_path, output_path):
""" Process DDG 'Cookies' database """
with open(os.path.join(output_path, 'appwv_cookies_output.txt'), 'w') as o:
o.write("Processed: 'com.duckduckgo.mobile.android/app_webview/Cookies'\n")
try:
conn = sqlite3.connect(duckduckgo_path + 'app_webview/Cookies')
answer = conn.execute(query_cookies).fetchall()
conn.close()
except sqlite3.OperationalError as e:
o.write("Error: %s" % str(e))
return None
if len(answer) == 0:
o.write("No Cookies Found in app_webview/Cookies")
return None
for result in answer:
o.write(cookies_template % (result[0], result[1], result[2], result[3], result[4], result[5]))
if __name__ == '__main__':
# Set DDG application data path for testing
ddg_path, out_path = process_directory_paths()
# Process artifacts
process_appwv_cookies(ddg_path, out_path)
| 25.964912 | 107 | 0.663514 |
11c4b04fb594071b02b7ee34e2b0b343fa536a12 | 3,382 | py | Python | scripts/redis_performance_test.py | Robbybp/IDAES-CLC | 5498aeab070afe5f3dc57be4cd198250f0f88ff9 | [
"MIT"
] | null | null | null | scripts/redis_performance_test.py | Robbybp/IDAES-CLC | 5498aeab070afe5f3dc57be4cd198250f0f88ff9 | [
"MIT"
] | 1 | 2021-06-01T23:42:14.000Z | 2021-06-01T23:42:14.000Z | scripts/redis_performance_test.py | Robbybp/IDAES-CLC | 5498aeab070afe5f3dc57be4cd198250f0f88ff9 | [
"MIT"
] | null | null | null | """
A simple and short Redis performance test.
"""
__author__ = 'Dan Gunter <dkgunter@lbl.gov>'
__date__ = '8/8/16'
import argparse
import logging
import os
import redis
import subprocess
import sys
import time
_log = logging.getLogger(__name__)
_h = logging.StreamHandler()
_h.setFormatter(logging.Formatter('%(asctime)s %(levelname)10s - %(message)s'))
_log.addHandler(_h)
def run_server(binpath=None):
_log.info("Run Redis server")
if binpath:
server_cmd = os.path.join(binpath, 'redis-server')
else:
server_cmd = 'redis-server'
retcode = subprocess.Popen([server_cmd])
return retcode
def run_performance_test(cmd='set', num_items=1000, list_len=5):
_log.info("Run Performance test")
r = redis.StrictRedis(host='localhost', port=6379, db=0)
data = ['bar'] * list_len
if cmd == 'set':
t0 = time.time()
t1 = redis_set(r, num_items, data)
elif cmd == 'get':
redis_set(r, num_items, data)
t0 = time.time()
t1 = redis_get(r, num_items)
elif cmd == 'mix':
t0 = time.time()
t1 = redis_getset(r, num_items, data)
else:
_log.error('Bad command: {}'.format(cmd))
return
report_timing(True, cmd, t1 - t0, num_items, ['list-length'], ['{}'.format(list_len)])
def redis_set(r, num_items, data):
i = 0
while i < num_items:
key = 'foo' + str(i)
r.set(key, data)
i += 1
return time.time()
def redis_get(r, num_items):
i = 0
while i < num_items:
key = 'foo' + str(i)
data = r.get(key)
i += 1
return time.time()
def redis_getset(r, num_items, data):
i = 0
while i < num_items:
key = 'foo' + str(i)
r.set(key, data)
data2 = r.get(key)
i += 1
return time.time()
def report_timing(readable, mode, dt, n, info_hdr, info):
rate = 1. * n / dt
gap = 1. * dt / n
if readable:
kvp = ', '.join(['{}={}'.format(k, v) for k, v in zip(info_hdr, info)])
print("{}: Processed {:d} items in {:.3f} seconds: {:.1f} items/sec <-> {:.6f} seconds/item. {}"
.format(mode, n, dt, rate, gap, kvp))
else:
print('blah')
def verbose_add(parser):
"""Add a verbosity argument to an ArgumentParser.
"""
parser.add_argument('-v', '--verbose', dest='vb',
action='count', default=0)
def verbose_set_log(vb, log):
"""Set logging level from verbosity level.
"""
if vb >= 2:
log.setLevel(logging.DEBUG)
elif vb >= 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.WARN)
def main():
parser = argparse.ArgumentParser()
parser.add_argument('-m', '--mode', dest='mode', help='mode: get, set, mix')
parser.add_argument('-n', '--count', dest='count', type=int, help='iterations', default=1000)
parser.add_argument('-z', '--length', dest='len', type=int, help='list length', default=5)
parser.add_argument('-s', '--server', dest='server', action='store_true')
verbose_add(parser)
args = parser.parse_args()
verbose_set_log(args.vb, _log)
if args.server:
retcode = run_server()
_log.info("Redis server stopped")
return retcode
else:
run_performance_test(cmd=args.mode, num_items=args.count, list_len=args.len)
if __name__ == '__main__':
sys.exit(main())
| 28.661017 | 104 | 0.59521 |
11c756cc812aa8aa64b2f69c97b3ae507b530f8b | 1,323 | py | Python | Question-1.py | sowmyamanojna/CS6910-Deep-Learning-Assignment-1 | e46d3a82bdfb61d7527ed3daf9250bb4ce228854 | [
"MIT"
] | null | null | null | Question-1.py | sowmyamanojna/CS6910-Deep-Learning-Assignment-1 | e46d3a82bdfb61d7527ed3daf9250bb4ce228854 | [
"MIT"
] | null | null | null | Question-1.py | sowmyamanojna/CS6910-Deep-Learning-Assignment-1 | e46d3a82bdfb61d7527ed3daf9250bb4ce228854 | [
"MIT"
] | null | null | null | print("Importing packages... ", end="")
##############################################################################
import wandb
import numpy as np
from keras.datasets import fashion_mnist
import matplotlib.pyplot as plt
wandb.init(project="trail-1")
print("Done!")
##############################################################################
print("Loading data... ", end="")
# Load the dataset
[(x_train, y_train), (x_test, y_test)] = fashion_mnist.load_data()
# Get the number of classes and their name mappings
num_classes = 10
class_mapping = {0: "T-shirt/top", 1: "Trouser", 2: "Pullover", 3: "Dress", 4: "Coat", 5: "Sandal", 6: "Shirt", 7: "Sneaker", 8: "Bag", 9: "Ankle boot"}
print("Done!")
##############################################################################
# Plotting a figure from each class
plt.figure(figsize=[12, 5])
img_list = []
class_list = []
for i in range(num_classes):
position = np.argmax(y_train==i)
image = x_train[position,:,:]
plt.subplot(2, 5, i+1)
plt.imshow(image)
plt.title(class_mapping[i])
img_list.append(image)
class_list.append(class_mapping[i])
wandb.log({"Question 1": [wandb.Image(img, caption=caption) for img, caption in zip(img_list, class_list)]})
##############################################################################
| 35.756757 | 152 | 0.517763 |
11c77e0e125890c44783034eeeb3c9b9a0ff0a7d | 1,386 | py | Python | app/api/v1/task.py | coder-yuan/vue-template-api | 135f13d7c32b4a2830366fc0b79a1e2a1eda6923 | [
"MIT"
] | null | null | null | app/api/v1/task.py | coder-yuan/vue-template-api | 135f13d7c32b4a2830366fc0b79a1e2a1eda6923 | [
"MIT"
] | null | null | null | app/api/v1/task.py | coder-yuan/vue-template-api | 135f13d7c32b4a2830366fc0b79a1e2a1eda6923 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Project : icode_flask_be
# @Package : task
# @Author : jackeroo
# @Time : 2019/11/29 5:25 下午
# @File : task.py
# @Contact :
# @Software : PyCharm
# @Desc :
from app.extensions import celery
from flask_jwt_extended import jwt_required
from app.helper.HttpHelper import HttpHelper
from app.libs.redprint import RedPrint
api = RedPrint('task')
@api.route('/<task_id>', methods=['GET'])
@jwt_required
def get_task_result(task_id):
task = celery.AsyncResult(task_id)
if task.state == 'PENDING':
# job did not start yet
response = {
'state': task.state,
'current': 0,
'total': 1,
'status': 'Pending...'
}
elif task.state != 'FAILURE':
response = {
'state': task.state,
'current': task.info.get('current', 0),
'total': task.info.get('total', 1),
'status': task.info.get('status', '')
}
if 'result' in task.info:
response['result'] = task.info['result']
else:
# something went wrong in the background job
response = {
'state': task.state,
'current': 1,
'total': 1,
'status': str(task.info), # this is the exception raised
}
return HttpHelper.normal_handler(response)
| 27.72 | 69 | 0.554113 |
11c82b11914ac9b51ec458c369a7893fadc1d1d2 | 1,851 | bzl | Python | config/infra/buildkite/deps.bzl | corypaik/labtools | 1d9d75eff40e8bf258e8de6d4377bbea073e109d | [
"Apache-2.0"
] | 1 | 2021-09-16T11:57:35.000Z | 2021-09-16T11:57:35.000Z | config/infra/buildkite/deps.bzl | corypaik/labtools | 1d9d75eff40e8bf258e8de6d4377bbea073e109d | [
"Apache-2.0"
] | null | null | null | config/infra/buildkite/deps.bzl | corypaik/labtools | 1d9d75eff40e8bf258e8de6d4377bbea073e109d | [
"Apache-2.0"
] | null | null | null | # Copyright 2021 The LabTools Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" defines dependencies for building a buildkite deployment """
load("@bazel_tools//tools/build_defs/repo:http.bzl", "http_archive", "http_file")
load("@io_bazel_rules_docker//container:container.bzl", "container_pull")
def buildkite_deps():
""" download buildkite deps """
http_file(
name = "com_github_krallin_tini",
downloaded_file_path = "tini",
urls = ["https://github.com/krallin/tini/releases/download/v0.19.0/tini"],
sha256 = "93dcc18adc78c65a028a84799ecf8ad40c936fdfc5f2a57b1acda5a8117fa82c",
executable = True,
)
http_archive(
name = "com_github_buildkite_agent",
url = "https://github.com/buildkite/agent/releases/download/v3.31.0/buildkite-agent-linux-amd64-3.31.0.tar.gz",
build_file_content = "exports_files([\"buildkite-agent\", \"buildkite-agent.cfg\"])",
sha256 = "f8b3b59d1c27e7e2ccc46819e4cafedb6d58ee1fdbfd006b22f34950558e4a27",
)
container_pull(
name = "bazel_ubuntu_2004",
registry = "gcr.io",
repository = "bazel-public/ubuntu2004-nojava",
digest = "sha256:4fceaeb1734849aa3d08168e1845165c98b3acfc69901cd4bf097f7512764d8f",
)
| 43.046512 | 119 | 0.688817 |
11cb6ae52142719479c56b52ce2a6eeaa8a094de | 3,855 | py | Python | Tests/Data/Parabolic/T/3D_3BHEs_array/pre/3bhes.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | null | null | null | Tests/Data/Parabolic/T/3D_3BHEs_array/pre/3bhes.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | 1 | 2021-09-02T14:21:33.000Z | 2021-09-02T14:21:33.000Z | Tests/Data/Parabolic/T/3D_3BHEs_array/pre/3bhes.py | jbathmann/ogs | a79e95d7521a841ffebd441a6100562847e03ab5 | [
"BSD-4-Clause"
] | null | null | null | ###
# Copyright (c) 2012-2020, OpenGeoSys Community (http://www.opengeosys.org)
# Distributed under a Modified BSD License.
# See accompanying file LICENSE.txt or
# http://www.opengeosys.org/project/license
###
# Execute this file to generate TESPy network csv files
from tespy import cmp, con, nwk, hlp
from tespy import nwkr
import numpy as np
import pandas as pd
# %% network
btes = nwk.network(fluids=['water'],
T_unit='K',
p_unit='bar',
h_unit='kJ / kg',
T_range=[273.25, 373.15],
p_range=[1, 20],
h_range=[1, 1000])
# components
fc_in = cmp.source('from consumer inflow')
fc_out = cmp.sink('from consumer outflow')
pu = cmp.pump('pump')
sp = cmp.splitter('splitter', num_out=3)
# bhe:
bhe_name = 'BHE1'
assert 'BHE1' in bhe_name, "BHE should be named with 'BHE1'"
bhe1 = cmp.heat_exchanger_simple(bhe_name)
bhe_name = 'BHE2'
assert 'BHE2' in bhe_name, "BHE should be named with 'BHE2'"
bhe2 = cmp.heat_exchanger_simple(bhe_name)
bhe_name = 'BHE3'
assert 'BHE3' in bhe_name, "BHE should be named with 'BHE3'"
bhe3 = cmp.heat_exchanger_simple(bhe_name)
mg = cmp.merge('merge', num_in=3)
cons = cmp.heat_exchanger_simple('consumer')
# connections
# inlet
fc_pu = con.connection(fc_in, 'out1', pu, 'in1')
pu_sp = con.connection(pu, 'out1', sp, 'in1')
sp_bhe1 = con.connection(sp, 'out1', bhe1, 'in1')
sp_bhe2 = con.connection(sp, 'out2', bhe2, 'in1')
sp_bhe3 = con.connection(sp, 'out3', bhe3, 'in1')
bhe1_mg = con.connection(bhe1, 'out1', mg, 'in1')
bhe2_mg = con.connection(bhe2, 'out1', mg, 'in2')
bhe3_mg = con.connection(bhe3, 'out1', mg, 'in3')
mg_cons = con.connection(mg, 'out1', cons, 'in1')
cons_fc = con.connection(cons, 'out1', fc_out, 'in1')
btes.add_conns(fc_pu, pu_sp, sp_bhe1, sp_bhe2, sp_bhe3, bhe1_mg, bhe2_mg,
bhe3_mg, mg_cons, cons_fc)
# busses
heat = con.bus('consumer heat demand')
heat.add_comps({'c': cons, 'p': 'P'})
btes.add_busses(heat)
# flow_char
# provide volumetric flow in m^3 / s
x = np.array([
0.00, 0.00001952885971862, 0.00390577194372, 0.005858657915586,
0.007811543887448, 0.00976442985931, 0.011717315831173, 0.013670201803035,
0.015623087774897, 0.017575973746759, 0.019528859718621, 0.021481745690483,
0.023434631662345, 0.025387517634207, 0.027340403606069, 0.029293289577931,
0.031246175549793, 0.033199061521655, 0.035151947493517, 0.037104833465379,
0.039057719437241, 0.041010605409104, 0.042963491380966, 0.044916377352828,
0.04686926332469, 0.048822149296552, 0.050775035268414, 0.052727921240276,
0.054680807212138, 0.056633693184
])
# provide head in Pa
y = np.array([
0.47782539, 0.47725723, 0.47555274, 0.47271192, 0.46873478, 0.46362130,
0.45737151, 0.44998538, 0.44146293, 0.43180416, 0.4220905, 0.40907762,
0.39600986, 0.38180578, 0.36646537, 0.34998863, 0.33237557, 0.31362618,
0.29374046, 0.27271841, 0.25056004, 0.22726535, 0.20283432, 0.17726697,
0.15056329, 0.12272329, 0.09374696, 0.06363430, 0.03238531, 0.00000000
]) * 1e5
f = hlp.dc_cc(x=x, y=y, is_set=True)
pu.set_attr(flow_char=f)
# components paramerization
# system inlet
inflow_head = 2 # bar
fc_pu.set_attr(p=inflow_head, m=0.6, fluid={'water': 1})
# pump
pu.set_attr(eta_s=0.90)
# bhes
bhe1.set_attr(D=0.02733, L=100, ks=0.00001)
bhe2.set_attr(D=0.02733, L=100, ks=0.00001)
bhe3.set_attr(D=0.02733, L=100, ks=0.00001)
# consumer
cons.set_attr(D=0.2, L=20, ks=0.00001)
# connection parametrization
# Tin:
pu_sp.set_attr(h=con.ref(cons_fc, 1, 0))
# for BHEs:
# Tout:
bhe1_mg.set_attr(T=303.15)
bhe2_mg.set_attr(T=303.15)
bhe3_mg.set_attr(T=303.15)
# consumer heat demand
heat.set_attr(P=-3000) # W
# solve
btes.set_printoptions(print_level='info')
btes.solve('design')
# save to csv:
btes.save('tespy_nw', structure=True)
| 29.204545 | 79 | 0.692866 |
11cc4762ea46108968ee8aa2c98fc1627da5eca3 | 981 | py | Python | pypy/jit/codegen/ppc/test/test_rgenop.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 12 | 2016-01-06T07:10:28.000Z | 2021-05-13T23:02:02.000Z | pypy/jit/codegen/ppc/test/test_rgenop.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | null | null | null | pypy/jit/codegen/ppc/test/test_rgenop.py | camillobruni/pygirl | ddbd442d53061d6ff4af831c1eab153bcc771b5a | [
"MIT"
] | 2 | 2016-07-29T07:09:50.000Z | 2016-10-16T08:50:26.000Z | import py
from pypy.jit.codegen.ppc.rgenop import RPPCGenOp
from pypy.rpython.lltypesystem import lltype
from pypy.jit.codegen.test.rgenop_tests import AbstractRGenOpTests, FUNC, FUNC2
from ctypes import cast, c_int, c_void_p, CFUNCTYPE
from pypy.jit.codegen.ppc import instruction as insn
# for the individual tests see
# ====> ../../test/rgenop_tests.py
class FewRegisters(RPPCGenOp):
freeregs = {
insn.GP_REGISTER:insn.gprs[3:6],
insn.FP_REGISTER:insn.fprs,
insn.CR_FIELD:insn.crfs[:1],
insn.CT_REGISTER:[insn.ctr]}
class FewRegistersAndScribble(FewRegisters):
DEBUG_SCRIBBLE = True
class TestRPPCGenop(AbstractRGenOpTests):
RGenOp = RPPCGenOp
class TestRPPCGenopNoRegs(TestRPPCGenop):
RGenOp = FewRegisters
def compile(self, runner, argtypes):
py.test.skip("Skip compiled tests w/ restricted register allocator")
class TestRPPCGenopNoRegsAndScribble(TestRPPCGenopNoRegs):
RGenOp = FewRegistersAndScribble
| 29.727273 | 79 | 0.755352 |
11cd2cba6c6fa6a758300d6008e0f69f4e32d609 | 996 | py | Python | app/someapp/views.py | artas728/monitoring-example-prometheus-grafana | 2d72f29c19e8a280eca82ca1f25a7fa88453559c | [
"MIT"
] | null | null | null | app/someapp/views.py | artas728/monitoring-example-prometheus-grafana | 2d72f29c19e8a280eca82ca1f25a7fa88453559c | [
"MIT"
] | null | null | null | app/someapp/views.py | artas728/monitoring-example-prometheus-grafana | 2d72f29c19e8a280eca82ca1f25a7fa88453559c | [
"MIT"
] | null | null | null | from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from django.http import JsonResponse
from .models import TestModel
import json
import redis
import time
redis_cli = redis.Redis(host='127.0.0.1', port=6379, db=0)
@csrf_exempt
def save_to_redis(request):
data = json.loads(request.body.decode())
for key, value in data.items():
redis_cli.rpush(key, value)
return JsonResponse({'success': True, 'data': 'Saved in Redis'})
@csrf_exempt
def endpoint(request):
time.sleep(0.1)
return JsonResponse({'success': True, 'data': 'Request processed'})
@csrf_exempt
def write_to_db(request):
data = json.loads(request.body.decode())
for row in data:
TestModel.objects.create(key1=row['key1'],
key2=row['key2'],
key3=row['key3'],
key4=row['key4'])
return JsonResponse({'success': True, 'data': 'Data has been saved'})
| 29.294118 | 73 | 0.638554 |
11cf52ea9a3f1fafc7387cfc0418073a1858bb56 | 1,114 | py | Python | scripts/feedforwardness_algo_compare.py | neurodata/maggot_connectome | 7a1d5dcf3a01c0d60e287efeac6b50f7ccb29cdf | [
"MIT"
] | 1 | 2021-01-20T00:37:31.000Z | 2021-01-20T00:37:31.000Z | scripts/feedforwardness_algo_compare.py | neurodata/maggot_connectome | 7a1d5dcf3a01c0d60e287efeac6b50f7ccb29cdf | [
"MIT"
] | 17 | 2021-03-03T14:48:54.000Z | 2021-09-08T15:52:50.000Z | scripts/feedforwardness_algo_compare.py | neurodata/maggot_connectome | 7a1d5dcf3a01c0d60e287efeac6b50f7ccb29cdf | [
"MIT"
] | 2 | 2021-03-05T12:23:20.000Z | 2021-03-29T11:49:53.000Z | #%% [markdown]
# # Comparing approaches to feedforwardness ordering
# For evaluating feedforwardness, we have:
# - 4 networks
# - Axo-dendritic (AD)
# - Axo-axonic (AA)
# - Dendro-dendritic (DD)
# - Dendro-axonic (DA)
# - 4+ algorithms for finding an ordering
# - Signal flow (SF)
# - Spring rank (SR)
# - Graph match flow (GMF)
# - Linear regression flow (LRF)
# - Others...
# - SyncRank
# - SVD based, introduced in SyncRank paper, and a few regularized follow ups
# - 1+ test statistic for feedforwardness
# - Proportion of edges in upper triangle after sorting ($p_{upper}$)
# - Others...
# - Spring rank energy
#
# This notebook compares the performance of the different ordering algorithms on the
# same data, as well as the ordering for each of the 4 networks predicted by a single
# algorithm.
#%% [markdown]
# ## Different algorithms, same dataset
#%% [markdown]
# ### Plot pairsplots of the ranks from each algorithm
#%% [markdown]
# ## Different datasets, same algorithm
#%% [markdown]
# ## Plot the adjacency matrices sorted by each algorithm
| 29.315789 | 85 | 0.672352 |
11d1192c076a5c79df7f15736899d5d72fa6cb5f | 1,401 | py | Python | NewEventReporter/blockmanager/blockmanager.py | Deofex/GETNFTBOTV3 | 0b8f1a77925b8f87224b2eaae93560e154b881b8 | [
"MIT"
] | null | null | null | NewEventReporter/blockmanager/blockmanager.py | Deofex/GETNFTBOTV3 | 0b8f1a77925b8f87224b2eaae93560e154b881b8 | [
"MIT"
] | null | null | null | NewEventReporter/blockmanager/blockmanager.py | Deofex/GETNFTBOTV3 | 0b8f1a77925b8f87224b2eaae93560e154b881b8 | [
"MIT"
] | null | null | null | import logging
import json
import os
# Initialize logger
logger = logging.getLogger(__name__)
class BlockManager():
def __init__(self, config, processedblock=0):
logger.info('Initialize Block Manager')
self.processedblock = int(processedblock)
self.config = config
if os.path.isfile(self.config):
self.load_config()
def set_processedblock(self,processedblock):
if int(processedblock) <= self.processedblock:
logger.warning('Block will not be set because block is '
'lower or equal than the previous block')
return
logger.info('Set processed block on: {}'.format(processedblock))
self.processedblock = int(processedblock)
blockconfig = {
'processedblock': processedblock
}
with open(self.config, 'w') as config:
json.dump(blockconfig, config)
def load_config(self):
logger.info('Loading config')
with open(self.config, 'r') as config:
loadconfig = json.load(config)
self.set_processedblock(loadconfig['processedblock'])
def get_processedblock(self):
return self.processedblock
if __name__ == '__main__':
blockprocessedconfig = './config/blockprocessed.json'
bm = BlockManager(blockprocessedconfig,300000)
bm.set_processedblock(242443)
print(bm.get_processedblock())
| 31.133333 | 72 | 0.660243 |
11d3d683bc5376ecd600cfbd620489e72ca787ca | 5,299 | py | Python | nmf_eval.py | logan-wright/INMF | 611ccdfd4608ec37629975d04e013ab97e05ff31 | [
"Apache-2.0"
] | 2 | 2017-06-16T19:18:53.000Z | 2019-04-18T02:11:45.000Z | nmf_eval.py | logan-wright/INMF | 611ccdfd4608ec37629975d04e013ab97e05ff31 | [
"Apache-2.0"
] | null | null | null | nmf_eval.py | logan-wright/INMF | 611ccdfd4608ec37629975d04e013ab97e05ff31 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 23 20:35:49 2017
@author: wrightad
"""
import numpy as N
import matplotlib.pyplot as plt
def rmse(v1,v2):
'''
rmse(v1,v2) - Calculates the root mean square error between two vectors
Version 1.0
Created On: Apr 17, 2017
Last Modified: Jun 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
- Calculates the Root-Mean-Square-Error between two vectors
- Vectors must be the same length
Inputs:
v1 - Numpy 1-dimensional array of arbitrary length
v2 - Numpy 1-dimensional array with a length equal to that of v1
Output:
rmse, the rmse value for the comparison of the two vectors
'''
dims1 = v1.shape
dims2 = v2.shape
if dims1 == dims2:
diff = v1 - v2
err = N.sum(diff**2)/dims1[0]
rms = N.sqrt(err)
else:
print('Dimension Mismatch: v1.shape ~= v2.shape!')
rms = None
return rms
def sid(v1,v2):
'''
sid(v1,v2) - Calculates the spectral information divergence (SID) between
two vectors
Version 1.0
Created On: Apr 17, 2017
Last Modified: Jun 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
- Calculates the Spectral Information Divergence between two vectors
- Vectors must be the same length
Reference:
Chang, C.-I. (2000), An information-theoretic approach to spectral
variability, similarity, and discrimination for hyperspectral image
analysis, Inf. Theory, IEEE Trans., 46(5), 1927–1932,
doi:10.1109/18.857802.
Inputs:
v1 - Numpy 1-dimensional array of arbitrary length
v2 - Numpy 1-dimensional array with a length equal to that of v1
Output:
SID, the SID value for the comparison of the two vectors
'''
p = v1 / N.sum(v1)
q = v2 / N.sum(v2)
D1 = N.sum(p * N.log(p / q))
D2 = N.sum(q * N.log(q / p))
D_sum = D1 + D2
return D_sum
def scattering_fit(data, function, sigma = 1e-9):
'''
Linear least-squares fit for a function of the form y = a * f(x)
Version 1.0
Created On: Apr 17, 2017
Last Modified: Apr 17, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
Reference:
Inputs:
wvl, wavelength in NANOMETERS, must be same length as data and function
data, the y data that the function is to be fit to. Should be a vector
(N,) or a 2D array with one single dimension.
function, the function to be scaled with a linear factor to fit the
data. Again it should be a vector (N,) or a 2D array with one
single dimension. data and function must be the same length.
OPTIONAL:
sigma, the value small value that determines when iteration stops
Output:
a, a single scalar describing the best-fit value of "a"
'''
# Initialize parametrs, including change and the initial minimum
change = 100 # Arbitrary value greater than sigma
minval = N.sum((data - function) ** 2) # Initial Min
# Calculate the intial multiplicative factor between the data and function,
# and use to set range for calculating minimums
Amin = 0
Amax = (data/function).max()
# Iterate
while change > sigma:
# Create Array of Amplitudes for the fit
Arr = N.linspace(Amin,Amax,100)
Test = N.matmul(N.reshape(Arr,(-1,1)),function)
# Calculate the square difference between the data and the fit guess
diff = Test - N.matlib.repmat(N.reshape(data,(1,-1)),100,1)
# Find Minimum, and calculate the change and difference.
val = N.sum(diff ** 2, axis = 1)
vali = N.argmin(val)
change = minval - val.min()
minval = val.min()
# Calculate New range of "a" for next iteration
Amin = Arr[max(vali-2,0)]
Amax = Arr[min(vali+2,len(Arr)-1)]
result = N.squeeze(Arr[vali] * function)
return result
def bodhaine(wvl):
'''
bodhaine(wvl) - Calculates the Bodhaine aproximation of rayleigh optical depth
Version 1.0
Created On: Apr 17, 2017
Last Modified: June 14, 2017
Author: Logan Wright, logan.wright@colorado.edu
Description:
Reference:
Bodhaine, B. A., N. B. Wood, E. G. Dutton, and J. R. Slusser (1999),
On Rayleigh optical depth calculations, J. Atmos. Ocean. Technol.,
16(11 PART 2), 1854–1861,
doi:10.1175/1520-0426(1999)016<1854:ORODC>2.0.CO;2.
Inputs:
wvl - a vector of wavelengths at which to calculate the rayleigh optical
depth. Wavelength sould be in MICROMETERS
Output:
tr - vector of rayleigh optical depths corresponding to wavelengths from the input vectora single scalar describing the best-fit value of "a"
'''
s = 0.0021520
a = 1.0455996
b = 341.29061
c = 0.90230850
d = 0.0027059889
e = 85.968563
tr = s * (a - b * wvl ** -2 - c * wvl ** 2)/(1 + d * wvl ** -2 - e * wvl ** 2)
return tr | 32.509202 | 150 | 0.60351 |
11d4d6356bac4be3d9c387ca7446a41aec22d1ea | 89 | py | Python | navedex/apps.py | SousaPedro11/navedex-api | f9b2bc0284ebf27de368ece718434a94704c3876 | [
"MIT"
] | null | null | null | navedex/apps.py | SousaPedro11/navedex-api | f9b2bc0284ebf27de368ece718434a94704c3876 | [
"MIT"
] | null | null | null | navedex/apps.py | SousaPedro11/navedex-api | f9b2bc0284ebf27de368ece718434a94704c3876 | [
"MIT"
] | null | null | null | from django.apps import AppConfig
class NavedexConfig(AppConfig):
name = 'navedex'
| 14.833333 | 33 | 0.752809 |
11d5570c1f5104f2732b1bf852cd1144b65ea155 | 61 | py | Python | fastISM/__init__.py | kundajelab/fastISM | 1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b | [
"MIT"
] | 12 | 2020-09-20T17:03:48.000Z | 2022-03-16T06:51:52.000Z | fastISM/__init__.py | kundajelab/fastISM | 1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b | [
"MIT"
] | 5 | 2020-10-24T20:43:45.000Z | 2022-02-25T19:40:47.000Z | fastISM/__init__.py | kundajelab/fastISM | 1573feccba1ad5d9f1cee508f5bb03c4aa09bb2b | [
"MIT"
] | 2 | 2020-10-14T05:18:55.000Z | 2022-02-21T07:34:14.000Z | from .fast_ism import FastISM
from .ism_base import NaiveISM
| 20.333333 | 30 | 0.836066 |
11d7cc28fca1672a8acd01df3e20ebc2577f0edc | 3,127 | py | Python | dipy/reconst/tests/test_odf.py | Garyfallidis/dipy | 4341b734995d6f51ac9c16df26a7de00c46f57ef | [
"BSD-3-Clause"
] | 3 | 2015-07-31T20:43:18.000Z | 2019-07-26T13:58:07.000Z | dipy/reconst/tests/test_odf.py | Garyfallidis/dipy | 4341b734995d6f51ac9c16df26a7de00c46f57ef | [
"BSD-3-Clause"
] | 9 | 2015-05-13T17:44:42.000Z | 2018-05-27T20:09:55.000Z | dipy/reconst/tests/test_odf.py | Garyfallidis/dipy | 4341b734995d6f51ac9c16df26a7de00c46f57ef | [
"BSD-3-Clause"
] | 3 | 2016-08-05T22:43:16.000Z | 2017-06-23T18:35:13.000Z | import numpy as np
from numpy.testing import assert_array_equal
from ..odf import OdfFit, OdfModel, gfa
from dipy.core.triangle_subdivide import (create_half_unit_sphere,
disperse_charges)
from nose.tools import (assert_almost_equal, assert_equal, assert_raises,
assert_true)
class SimpleOdfModel(OdfModel):
def __init__(self):
v, e, f = create_half_unit_sphere(4)
self.set_odf_vertices(v, e)
self.odf = (v * [1, 2, 3]).sum(-1)
def evaluate_odf(self, signal):
return self.odf
def test_OdfModelfit():
data = np.zeros((10,2))
# Test basic case
model = SimpleOdfModel()
odffit = model.fit(data, normalize_peaks=1)
assert_array_equal(odffit.gfa, gfa(model.odf))
assert_array_equal(odffit.peak_values[:, 0], 1.)
assert_array_equal(odffit.peak_values[:, 1:], 0.)
mn, mx = model.odf.min(), model.odf.max()
assert_array_equal(odffit.qa[:, 0], (mx - mn) / mx)
assert_array_equal(odffit.qa[:, 1:], 0.)
assert_array_equal(odffit.peak_indices[:, 0], 53)
assert_array_equal(odffit.peak_indices[:, 1:], -1)
# Test that odf array matches and is right shape
odffit = model.fit(data, return_odf=True)
expected_shape = (len(data), len(model.odf))
assert_equal(odffit.odf.shape, expected_shape)
assert_true((model.odf == odffit.odf).all())
assert_array_equal(odffit.peak_values[:, 0], model.odf.max())
# Test mask
mask = (np.arange(10) % 2) == 1
odffit = model.fit(data, mask=mask, normalize_peaks=True)
assert_array_equal(odffit.gfa[~mask], 0)
assert_array_equal(odffit.qa[~mask], 0)
assert_array_equal(odffit.peak_values[~mask], 0)
assert_array_equal(odffit.peak_indices[~mask], -1)
assert_array_equal(odffit.gfa[mask], gfa(model.odf))
assert_array_equal(odffit.peak_values[mask, 0], 1.)
assert_array_equal(odffit.peak_values[mask, 1:], 0.)
mn, mx = model.odf.min(), model.odf.max()
assert_array_equal(odffit.qa[mask, 0], (mx - mn) / mx)
assert_array_equal(odffit.qa[mask, 1:], 0.)
assert_array_equal(odffit.peak_indices[mask, 0], 53)
assert_array_equal(odffit.peak_indices[mask, 1:], -1)
def test_OdfModelgetpeaks():
model = SimpleOdfModel()
peaks = model.get_directions(None)
assert_array_equal(peaks, model.odf_vertices[[53]])
class TestOdfModel():
def test_angular_distance(self):
model = SimpleOdfModel()
assert_almost_equal(model.angular_distance_threshold, 45)
model.angular_distance_threshold = 60
assert_almost_equal(model.angular_distance_threshold, 60)
assert_almost_equal(model._cos_distance_threshold, .5)
def test_set_odf_vertices(self):
model = OdfModel()
v, e, f = create_half_unit_sphere(4)
model.set_odf_vertices(v, e)
assert_array_equal(v, model.odf_vertices)
assert_array_equal(e, model.odf_edges)
assert_array_equal(abs(v.dot(v.T)), model._distance_matrix)
assert_raises(ValueError, model.set_odf_vertices, v[:, :2], e)
assert_raises(ValueError, model.set_odf_vertices, v/2, e)
| 37.674699 | 73 | 0.688839 |
11d8b360dafd771af3d50fb23f126c256bc27cc5 | 423 | py | Python | recieve.py | RyuYamamoto/inter-process-communication-py | 377c73833f230ba1132006c2cda86decd3580a5b | [
"MIT"
] | null | null | null | recieve.py | RyuYamamoto/inter-process-communication-py | 377c73833f230ba1132006c2cda86decd3580a5b | [
"MIT"
] | null | null | null | recieve.py | RyuYamamoto/inter-process-communication-py | 377c73833f230ba1132006c2cda86decd3580a5b | [
"MIT"
] | null | null | null | import socket
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.bind(('127.0.0.1', 50007))
s.listen(1)
while True:
conn, addr = s.accept()
with conn:
while True:
data = conn.recv(1024)
if not data:
break
print('data: {}, add: {}'.format(data, addr))
conn.sendall(b'Recieved: ' + data)
| 28.2 | 61 | 0.486998 |
11dc5601e32f2a14e2e6dbd6c443d6cb0fdbc322 | 4,503 | py | Python | utils.py | bbpp222006/elec_nose_plus | d79faa47d3fbb63c697501dd521e834bcc8e4814 | [
"MIT"
] | 1 | 2021-04-08T04:17:04.000Z | 2021-04-08T04:17:04.000Z | utils.py | bbpp222006/elec_nose_plus | d79faa47d3fbb63c697501dd521e834bcc8e4814 | [
"MIT"
] | null | null | null | utils.py | bbpp222006/elec_nose_plus | d79faa47d3fbb63c697501dd521e834bcc8e4814 | [
"MIT"
] | null | null | null | #!/usr/bin/python
# encoding: utf-8
#!/usr/bin/python
# encoding: utf-8
import torch
import torch.nn as nn
from torch.autograd import Variable
import collections
from tqdm import tqdm
import numpy as np
import cv2
import os
import random
from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
class strLabelConverter(object):
"""Convert between str and label.
NOTE:
Insert `blank` to the alphabet for CTC.
Args:
alphabet (list): set of the possible characters.
ignore_case (bool, default=True): whether or not to ignore all of the case.
"""
def __init__(self, alphabet):
alphabet.remove('基线')
self.dict = {}
for i, char in enumerate(alphabet):
# NOTE: 0 is reserved for 'blank' required by ctc
self.dict[char] = i + 1
self.dict['基线'] = 0
self.dict_reverse = {value: key for key, value in self.dict.items()}
def encode(self, text):
"""Support batch or single str.
Args:
text (str or list of str): texts to convert.
Returns:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
"""
length = []
result = []
for char in text:
length.append(1)
index = self.dict[char]
result.append(index)
text = result
return (torch.IntTensor(text), torch.IntTensor(length))
def decode(self, t, length=1, raw=False):
"""Decode encoded texts back into strs.
Args:
torch.IntTensor [length_0 + length_1 + ... length_{n - 1}]: encoded texts.
torch.IntTensor [n]: length of each text.
Raises:
AssertionError: when the texts and its length does not match.
Returns:
text (str or list of str): texts to convert.
"""
t = t.numpy()
result = []
for value in t:
index = self.dict_reverse[value]
result.append(index)
texts = result
return texts
class averager(object):
"""Compute average for `torch.Variable` and `torch.Tensor`. """
def __init__(self):
self.reset()
def add(self, v):
if isinstance(v, Variable):
count = v.data.numel()
v = v.data.sum()
elif isinstance(v, torch.Tensor):
count = v.numel()
v = v.sum()
self.n_count += count
self.sum += v
def reset(self):
self.n_count = 0
self.sum = 0
def val(self):
res = 0
if self.n_count != 0:
res = self.sum / float(self.n_count)
return res
def props_to_onehot(props):
if isinstance(props, list):
props = np.array(props)
a = np.argmax(props, axis=1)
b = np.zeros((len(a), props.shape[1]))
b[np.arange(len(a)), a] = 1
return b
def onehot_to_num(onehot):
if isinstance(onehot, list):
onehot = np.array(onehot)
b = np.zeros((onehot.shape[0], 1))
for i, h in enumerate(onehot):
b[i, 0] = np.argwhere(onehot[i] == 1)
return b
def draw(preds, x_train_batch,x_label,ax):
predsnp = preds.cpu().detach().numpy()
x_train_batchnp = x_train_batch.cpu().detach().numpy()
x_label = x_label.cpu().detach().numpy()
# print(predsnp.shape, x_train_batchnp.shape) # (2000,6)
predsnp = props_to_onehot(predsnp)
# print(predsnp)
predsnp = onehot_to_num(predsnp)
# print(max(predsnp))
#对原数据进行kmeans分类
estimator = KMeans(n_clusters=2) # 构造聚类器
estimator.fit(x_train_batchnp) # 聚类
label_pred = estimator.labels_ # 获取聚类标签
# 绘制k-means结果
if label_pred[0]==1:
label_pred = 1-label_pred
# plt.plot(np.argwhere(label_pred == 0), np.zeros(len(np.argwhere(label_pred == 0)))*x_label,'go-')
# plt.plot(np.argwhere(label_pred == 1), np.ones(len(np.argwhere(label_pred == 1))) * x_label,'go-')
ax.scatter(np.argwhere(label_pred == 0), np.zeros(len(np.argwhere(label_pred == 0)))*x_label, c="green", marker='o',s = 10, label='kmeans')
ax.scatter(np.argwhere(label_pred == 1), np.ones(len(np.argwhere(label_pred == 1)))*x_label, c="green", marker='o',s = 10, label='kmeans')
for i in range(int(max(predsnp))+1):
x= np.argwhere(predsnp == i)[:,0]
y = np.ones(len(x))*i
# plt.plot(x, y, c = "red")
ax.scatter(x, y, c = "red", marker='.', label='pred',s = 5)
| 27.457317 | 143 | 0.584055 |
11dc7d7484bc78800544b03df7488f722be7a5ea | 2,729 | py | Python | down.py | pcahan1/CellNet_Cloud | a228953946b81ccb304fbd068e33766e134103b6 | [
"MIT"
] | 1 | 2020-11-13T10:53:27.000Z | 2020-11-13T10:53:27.000Z | down.py | pcahan1/CellNet_Cloud | a228953946b81ccb304fbd068e33766e134103b6 | [
"MIT"
] | 2 | 2020-06-28T18:17:59.000Z | 2020-12-18T14:11:29.000Z | down.py | pcahan1/CellNet_Cloud | a228953946b81ccb304fbd068e33766e134103b6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
import random
import argparse
import os
parser = argparse.ArgumentParser()
parser.add_argument("input", help="input FASTQ Directory")
parser.add_argument("-n", "--number", type=int, help="number of reads to sample")
args = parser.parse_args()
random.seed(12)
if not args.number:
print("No sample size specified. Defaulting to five million reads.")
args.number = 5000000
# LIST FILES TO BE DOWN-SAMPLED
fastq_files = os.listdir(args.input)
if int(len(fastq_files)) <= 0:
print("No files in listed directory")
exit()
# CREATE OUTPUT DIRECTORY
output_dir = "subset_"+args.input
os.mkdir(output_dir)
for fastq in fastq_files:
print("\tcounting records....")
with open(args.input+"/"+fastq) as inRead:
num_lines = sum([1 for line in inRead])
print("Num lines:" + str(num_lines) )
if int(num_lines % 4) != 0:
print("FILE " + fastq + " CORRUPTED: Number of lines in FASTQ file not divisible by 4. Is file decompressed?")
exit()
total_records = int(num_lines / 4)
number_to_sample = args.number
print("\tsampling " + str(number_to_sample) + " out of " + str(total_records) + " records")
try:
records_to_keep = set(random.sample(range(total_records), number_to_sample))
record_number = 0
with open(args.input+"/"+fastq) as inFile:
with open(output_dir+"/"+"subset_"+fastq, "w") as output:
for tag in inFile:
bases = next(inFile)
sign = next(inFile)
quality = next(inFile)
if record_number in records_to_keep:
output.write(tag)
output.write(bases)
output.write(sign)
output.write(quality)
record_number += 1
except ValueError as e:
if str(e) == "Sample larger than population or is negative":
print("Desired number of reads is greater than number of reads in original file.")
print("No down-sampling is necessary.")
elif str(e) == "sample larger than population":
print("Desired number of reads is greater than number of reads in original file.")
print("No down-sampling is necessary.")
else:
raise
print("Compressing downsampled reads")
os.system("COPYFILE_DISABLE=1 tar cvfz compressed_reads.tgz "+output_dir)
if os.path.getsize("compressed_reads.tgz") >= 4000000000:
print("WARNING: Your archive contains too many FASTQ files. Max size is 4GB.")
else:
print("Archive file size is ~"+str(os.path.getsize("compressed_reads.tgz")/1000000000)+"GB")
| 35.907895 | 118 | 0.629901 |
11dce67a3e4c4459fb478df3826b5f61db5fbe5f | 777 | py | Python | Cms/Dtcms/apps/areas/views.py | Highsir/cms | 2d820212227ad2760cd762873365c0df0604c730 | [
"MIT"
] | null | null | null | Cms/Dtcms/apps/areas/views.py | Highsir/cms | 2d820212227ad2760cd762873365c0df0604c730 | [
"MIT"
] | null | null | null | Cms/Dtcms/apps/areas/views.py | Highsir/cms | 2d820212227ad2760cd762873365c0df0604c730 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from rest_framework.viewsets import ReadOnlyModelViewSet, ModelViewSet
from areas.models import Area
from areas.serializers import AreaSerializer, SubAreaSerializer
class AreasViewSet(ModelViewSet):
"""
行政区划信息
"""
pagination_class = None # 区划信息不分页
queryset = Area.objects.all()
def get_queryset(self):
"""
提供数据集
"""
if self.action == 'list':
# 当查询区域列表数据时, 只返回省份数据
return Area.objects.filter(parent=None)
else:
return AreasViewSet.queryset
def get_serializer_class(self):
"""
提供序列化器
"""
if self.action == 'list':
return AreaSerializer
else:
return SubAreaSerializer
| 22.2 | 70 | 0.6139 |
11dd785ec5b8ae06d2da8209269f7b856ed4f908 | 173 | py | Python | 01-logica-de-programacao-e-algoritmos/Aula 03/aula03-exemplo03.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | 01-logica-de-programacao-e-algoritmos/Aula 03/aula03-exemplo03.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | 01-logica-de-programacao-e-algoritmos/Aula 03/aula03-exemplo03.py | rafaelbarretomg/Uninter | 1f84b0103263177122663e991db3a8aeb106a959 | [
"MIT"
] | null | null | null | # par ou impar ( condicional simples)
x = int(input('Digite um valor inteiro: '))
if (x % 2 == 0):
print('O numero é par!')
if(x % 2 == 1):
print('O numero é impar') | 28.833333 | 43 | 0.583815 |
11ded52efac2b1e7adb5a0379b064cebcf41d701 | 900 | py | Python | zeta_python_sdk/oracle_utils.py | prettyirrelevant/zeta-python-sdk | 536967259c89d380b8853b1cfd0621c50143b8b9 | [
"Apache-2.0"
] | 2 | 2022-03-02T04:05:07.000Z | 2022-03-10T11:49:37.000Z | zeta_python_sdk/oracle_utils.py | prettyirrelevant/zeta-python-sdk | 536967259c89d380b8853b1cfd0621c50143b8b9 | [
"Apache-2.0"
] | null | null | null | zeta_python_sdk/oracle_utils.py | prettyirrelevant/zeta-python-sdk | 536967259c89d380b8853b1cfd0621c50143b8b9 | [
"Apache-2.0"
] | null | null | null | import math
from .exceptions import OutOfBoundsException
def ERR_BUFFER_OUT_OF_BOUNDS():
return OutOfBoundsException()
def ERR_INVALID_ARG_TYPE(name: str, expected: str, actual):
return Exception(f'The "{name}" argument must be of type {expected}. Received {actual}')
def ERR_OUT_OF_RANGE(string: str, range: str, received: int):
return Exception(
f'The value of "{string}" is out of range. It must be of {range}. Received {received}'
)
def validate_number(value, name):
if type(value) != int:
raise ERR_INVALID_ARG_TYPE(name, "int", value)
def bounds_error(value: int, length: int):
if math.floor(value) != value:
validate_number(value, "offset")
raise ERR_OUT_OF_RANGE("offset", "an integer", value)
if length > 0:
raise ERR_BUFFER_OUT_OF_BOUNDS()
raise ERR_OUT_OF_RANGE("offset", f">= 0 and <= {length}", value)
| 26.470588 | 94 | 0.684444 |
11deda09dc4cd77f3a703e78c0ad5fb515e8de96 | 3,507 | py | Python | CSR/utility.py | MoreNiceJay/CAmanager_web | 29c6e35b9b1b9e8d851b2825df18e34699f6c5d2 | [
"bzip2-1.0.6"
] | null | null | null | CSR/utility.py | MoreNiceJay/CAmanager_web | 29c6e35b9b1b9e8d851b2825df18e34699f6c5d2 | [
"bzip2-1.0.6"
] | 3 | 2020-02-11T23:59:34.000Z | 2021-06-10T21:19:16.000Z | CSR/utility.py | MoreNiceJay/CAmanager_web | 29c6e35b9b1b9e8d851b2825df18e34699f6c5d2 | [
"bzip2-1.0.6"
] | null | null | null | from django.shortcuts import render
import sys, json, random, hashlib, calendar,time, datetime, os, random
import ast
from cryptography.fernet import Fernet
from django.shortcuts import redirect
from django.http import Http404, HttpResponse
import json
from cryptography.hazmat.primitives.serialization import Encoding, PrivateFormat, NoEncryption,load_pem_private_key
from cryptography import x509
from cryptography.x509.oid import NameOID
from cryptography.hazmat.primitives import serialization,hashes
from cryptography.x509 import load_pem_x509_csr
from cryptography.hazmat.primitives.asymmetric import rsa, ec
from cryptography.hazmat.backends import default_backend
def generate_private_key(algorithm):
private_key = None
if algorithm == "RSA_2048":
private_key = generate_RSA_private_key(2048)
elif algorithm == "RSA_4096":
private_key = generate_RSA_private_key(4096)
elif algorithm == "ECDSA_P256":
private_key = generate_ECP256_private_key()
elif algorithm == "ECDSA_P384":
private_key = generate_ECP384_private_key()
else:
raise AlgorithmMismatchError
return private_key
def generate_RSA_private_key(KEY_SIZE):
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=KEY_SIZE,
backend=default_backend()
)
return private_key
def generate_ECP384_private_key():
private_key = ec.generate_private_key(
ec.SECP384R1(), default_backend()
)
return private_key
def generate_ECP256_private_key():
private_key = ec.generate_private_key(
ec.SECP384R1(), default_backend()
)
return private_key
def generate_pub_key(private_key):
public_key = private_key.public_key()
return public_key
def encode_private_key_pem_format(private_key):
pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()
)
return pem
def encode_public_key_pem_format(public_key):
pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo,
)
return pem
def encode_in_Base64(key_in_pem_format):
with open("base64.key", "rb") as f:
key = f.read()
f = Fernet(key)
token = f.encrypt(key_in_pem_format)
return token.decode()
def decode_Base64(encrypted_key_token):
with open("base64.key", "rb") as f:
key = f.read()
f = Fernet(key)
return f.decrypt(encrypted_key_token.encode())
def generate_CSR(country,state,locality,organization,common_name,domain,private_key):
csr = x509.CertificateSigningRequestBuilder().subject_name(x509.Name([
x509.NameAttribute(NameOID.COUNTRY_NAME, country),
x509.NameAttribute(NameOID.STATE_OR_PROVINCE_NAME, state),
x509.NameAttribute(NameOID.LOCALITY_NAME, locality),
x509.NameAttribute(NameOID.ORGANIZATION_NAME, organization),
x509.NameAttribute(NameOID.COMMON_NAME, common_name),
])).add_extension(
x509.SubjectAlternativeName([
# Describe what sites we want this certificate for.
x509.DNSName(domain),
]),
critical=False,
# Sign the CSR with our private key.
).sign(private_key, hashes.SHA256(), default_backend())
return csr
def encode_CSR_in_pem_format(temp_csr):
return temp_csr.public_bytes(serialization.Encoding.PEM).decode() | 34.722772 | 115 | 0.741374 |
11df93a40b853400f38b4c489077ebc7674cd549 | 51,584 | py | Python | uctp_ufabc/src/uctp.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | null | null | null | uctp_ufabc/src/uctp.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | 6 | 2018-10-30T00:37:20.000Z | 2019-07-23T00:23:18.000Z | uctp_ufabc/src/uctp.py | luizfmgarcia/uctp_ufabc | 2342f5431e258a4feffdf4e7931344a9d03a8f9c | [
"MIT"
] | 1 | 2019-06-06T00:54:13.000Z | 2019-06-06T00:54:13.000Z | # UCTP Main Methods
import objects
import ioData
import random
# Set '1' to allow, during the run, the print on terminal of some steps
printSteps = 0
#==============================================================================================================
# Create the first generation of solutions
def start(solutionsNoPop, subjList, profList, init):
if(printSteps == 1): print("Creating first generation...", end='')
for _ in range(init): solutionsNoPop.addCand(newCandRand(subjList, profList))
if(printSteps == 1): print("Created first generation!")
#-------------------------------------------------------
# Create new Candidate Full-Random
def newCandRand(subjList, profList):
candidate = objects.Candidate()
# Follow the subjects in 'subjList', in order, and for each one, choose a professor randomly
for sub in subjList: candidate.addRelation(sub, profList[random.randrange(len(profList))])
return candidate
#==============================================================================================================
# Extracts info about what Subj appears in which Prof PrefList
def extractSubjIsPref(subjList, profList):
# Lists for each Prof, where it is '1' if Subj in respective index is on Prof List of Pref but not same Quadri
# '2' if same quadri
subjIsPrefList = [[0 for _ in range(len(subjList))] for _ in range(len(profList))]
# Counting the occurrences, filling the vectors
for pIndex in range(len(profList)):
# Getting data of current Prof
prefSubjLists = [i for i in profList[pIndex].getPrefSubjLists()]
# All Relations of one Prof
for sIndex in range(len(subjList)):
# Getting data of current Subj
sName = subjList[sIndex].getName()
sQuadri = subjList[sIndex].getQuadri()
# For each quadri
for i in range(3):
# Finding the Subject 'sName' in "pPrefSubjQXList+pPrefSubjLimList" list
sumList = prefSubjLists[i] + prefSubjLists[3]
# Checking if the List is not empty
if(len(sumList) > 0):
try: index_value = sumList.index(sName)
except ValueError: index_value = -1
# If the Subj name appears in the list
if(index_value != -1):
# If current Subject in analysis is on current Quadri
if(str(i+1) in sQuadri):
# Informing that the Subj appears on respective Prof-QuadriPrefList
subjIsPrefList[pIndex][sIndex] = 2
# Informing that the Subj appears on other Prof-QuadriPrefList that is not same Quadri
else:
# Granting that do not decrease a value 2 already set
if(subjIsPrefList[pIndex][sIndex] == 0): subjIsPrefList[pIndex][sIndex] = 1
return subjIsPrefList
#==============================================================================================================
# Separation of solutions into 2 populations
def twoPop(solutionsNoPop, infPool, feaPool, profList, subjList, weightsList, numInfWeights):
# Granting that the Lists will be empty to receive new Solutions
infPool.resetCandList()
feaPool.resetCandList()
for cand in solutionsNoPop.getCandList():
# Classification by checking feasibility
pop = checkFeasibility(cand, profList, subjList, weightsList, numInfWeights)
if(pop == "feasible"): feaPool.addCand(cand)
elif(pop == "infeasible"): infPool.addCand(cand)
# Granting that the List will be empty to next operations
solutionsNoPop.resetCandList()
if(printSteps == 1): print("Checked Feasibility (new Candidates)/", end='')
#==============================================================================================================
# Detect the violation of a Restriction into a candidate
def checkFeasibility(candidate, profList, subjList, weightsList, numInfWeights):
# As part of the Candidate's Prof-Subj relations (with both Feasible and the Infeasible) will be traversed to check they Feasibility here,
# instead of re-pass an entire Infeasible Candidate again in the 'calc_fitInfeas', the calculation of its Fitness will already be done
# only one time here. Only the Feasible ones will have to pass through 'calc_fitFeas' later.
fit = -1
fit = calc_fitInfeas(candidate, profList, subjList, weightsList[:numInfWeights])
if(fit < 0):
candidate.setFitness(fit)
return "infeasible"
return "feasible"
#==============================================================================================================
# Calculate the Fitness of the candidate
def calcFit(infeasibles, feasibles, profList, subjList, weightsList, numInfWeights, subjIsPrefList):
# All Infeasible Candidates - is here this code only for the representation of the default/original algorithm`s work
# The Inf. Fitness calc was already done in 'checkFeasibility()' method
# Check if the Infeasible pop. is empty
if(len(infeasibles.getCandList()) != 0):
for cand in infeasibles.getCandList():
if(cand.getFitness() == 0.0):
# Setting the Fitness with the return of calc_fitInfeas() method
cand.setFitness(calc_fitInfeas(cand, profList, subjList, weightsList[:numInfWeights]))
if(printSteps == 1): print("Fitness of all Inf./", end='')
# All Feasible Candidates
# Check if the Feasible pop. is empty
if(len(feasibles.getCandList()) != 0):
for cand in feasibles.getCandList():
if(cand.getFitness() == 0.0):
# Setting the Fitness with the return of calc_fitFeas() method
cand.setFitness(calc_fitFeas(cand, profList, subjList, weightsList[numInfWeights:], subjIsPrefList))
if(printSteps == 1): print("Fitness of all Feas./", end='')
#==============================================================================================================
# Calculate Fitness of Infeasible Candidates
def calc_fitInfeas(candidate, profList, subjList, weightsList):
# Getting information about the Candidate
prof_relationsList = calc_i1(candidate, profList, subjList)
i2_conflictsList, i3_conflictsList = calc_i2_i3(prof_relationsList, subjList)
# Setting found variables
candidate.setInfVariables(prof_relationsList, i2_conflictsList, i3_conflictsList)
# Checking if occurred violations of restrictions on the Candidate
# If there are violated restrictions, this Candidate is Infeasible and then will calculate and return a negative Fitness,
# if not, is Feasible, will return 1.0 as Fitness
if(prof_relationsList.count([]) != 0 or i2_conflictsList.count([]) != len(i2_conflictsList) or i3_conflictsList.count([]) != len(i3_conflictsList)):
# Calculating main variables
i1 = float(prof_relationsList.count([]) / (len(profList) - 1.0))
i2 = float(sum([len(i) for i in i2_conflictsList]) / len(subjList))
i3 = float(sum([len(i) for i in i3_conflictsList]) / len(subjList))
i = [i1, i2, i3]
# Final Infeasible Function Fitness Calc
Fi = -1.0 * sum([i[j] * weightsList[j] for j in range(len(i))]) / sum([w for w in weightsList])
# Returning the calculated result
return Fi
# If all Relations Prof-Subj in this Candidate passed through the restrictions)
return 1.0
#-------------------------------------------------------
# i1: penalty to how many Professors does not have at least one relation with a Subject
def calc_i1(candidate, profList, subjList):
# List of lists of Subjects that are related to the same Professor, where the position in this list is the same of the same professor in 'profList' list
# Empty list in this list means that some Professor (p) does not exists on the Candidate
prof_relationsList = [[] for _ in range(len(profList))]
# Filling the list according to the candidate
for s, p in candidate.getRelationsList():
indexp = profList.index(p)
indexs = subjList.index(s)
prof_relationsList[indexp].append(indexs)
return prof_relationsList
#-------------------------------------------------------
# i2: penalty to how many Subjects, related to the same Professor, are teach in the same day, hour and quadri
# i3: penalty to how many Subjects, related to the same Professor, are teach in the same day and quadri but in different campus
def calc_i2_i3(prof_relationsList, subjList):
# List of the subjects that have a conflict between them - always the two conflicts are added, that is,
# there can be repetitions of subjects
i2_conflictsList, i3_conflictsList = [[] for _ in range(len(prof_relationsList))], [[] for _ in range(len(prof_relationsList))]
# Searching, in each professor (one at a time), conflicts of schedules between subjects related to it
for list_subj in prof_relationsList:
# Current Prof in analysis
profIndex = prof_relationsList.index(list_subj)
# Check if the professor has more than 1 relation Prof-Subj to analyze
if(len(list_subj) > 1):
# Getting the data of all Subjects related to current Professor in analysis
timetableList_List = [subjList[i].getTimeTableList() for i in list_subj]
quadri_List = [subjList[i].getQuadri() for i in list_subj]
campus_List = [subjList[i].getCampus() for i in list_subj]
period_List = [subjList[i].getPeriod() for i in list_subj]
# Comparing the data of one Subject (i) with all next subjects listed, and do the same with next ones
i = 0
for timeTable in timetableList_List:
# all [day/hour/frequency] of the Timetable of the Subject (i) in 'timetableList_List'
i_day = [j[0] for j in timeTable]
i_hour = [j[1] for j in timeTable]
i_frequency = [j[2] for j in timeTable]
# Now, comparing current (i) subject data with next ones (k), one at a time
k = i + 1
rest = timetableList_List[k:]
# repeat this 'len(rest)' times
for nextK in rest:
# Already check if both Subj (i, k) is on same Quadri
if(quadri_List[i] == quadri_List[k]):
# Variables that flags if a conflict was already detected (do not count 2 or more times same 2 subjects in conflict)
verified_i2, verified_i3 = False, False
# all [day/hour/frequency] of the Timetable of the Subject (k) in 'timetableList_List'
inext_day = [j[0] for j in nextK]
inext_hour = [j[1] for j in nextK]
inext_frequency = [j[2] for j in nextK]
# Finally comparing one-to-one timetables - between i and k subjects
for a in i_day:
for b in inext_day:
if(a == b):
# There is, at least, two subjects teach in the same day and quadri, but in different campus
if(campus_List[i] != campus_List[k]):
if(verified_i3 == False):
i3_conflictsList[profIndex].append(list_subj[i])
i3_conflictsList[profIndex].append(list_subj[k])
verified_i3 = True
# There is, at least, two subjects teach in the same day, hour and quadri
# First check if they have the same Period
if(period_List[i] == period_List[k] and i_hour[i_day.index(a)] == inext_hour[inext_day.index(b)]):
# if one 'frequency' is "QUINZENAL I" and the other is "QUINZENAL II" then DO NOT count
if('SEMANAL' in i_frequency[i_day.index(a)] or 'SEMANAL' in inext_frequency[inext_day.index(b)]):
if(verified_i2 == False):
i2_conflictsList[profIndex].append(list_subj[i])
i2_conflictsList[profIndex].append(list_subj[k])
#print(subjList[list_subj[i]].get(), subjList[list_subj[k]].get(), '\n')
verified_i2 = True
elif('QUINZENAL I' in i_frequency[i_day.index(a)] and 'QUINZENAL I' in inext_frequency[inext_day.index(b)]):
if(verified_i2 == False):
i2_conflictsList[profIndex].append(list_subj[i])
i2_conflictsList[profIndex].append(list_subj[k])
#print(subjList[list_subj[i]].get(), subjList[list_subj[k]].get(), '\n')
verified_i2 = True
elif('QUINZENAL II' in i_frequency[i_day.index(a)] and 'QUINZENAL II' in inext_frequency[inext_day.index(b)]):
if(verified_i2 == False):
i2_conflictsList[profIndex].append(list_subj[i])
i2_conflictsList[profIndex].append(list_subj[k])
#print(subjList[list_subj[i]].get(), subjList[list_subj[k]].get(), '\n')
verified_i2 = True
# Going to the next Subject (k+1) to compare with the same, current, main, Subject (i)
k = k + 1
# Going to the next Subject (i+1) related to the same Professor
i = i + 1
# Removing from 'i2_conflictsList' and 'i3_conflictsList' duplicates
final_i2 = [[] for _ in range(len(prof_relationsList))]
final_i3 = [[] for _ in range(len(prof_relationsList))]
for i in range(len(prof_relationsList)):
for j in i2_conflictsList[i]:
if(final_i2[i].count(j) == 0): final_i2[i].append(j)
for j in i3_conflictsList[i]:
if(final_i3.count(j) == 0): final_i3[i].append(j)
return final_i2, final_i3
#==============================================================================================================
# Calculate Fitness of Feasible Candidates
def calc_fitFeas(candidate, profList, subjList, weightsList, subjIsPrefList):
prof_relationsList, _, _, _, _, _ = candidate.getFeaVariables()
# Looking for good Relations into the Candidate using "Quality Amplifiers"
# Getting information about the Candidate
sum_chargesRelative, difChargeList = calc_f1(subjList, profList, prof_relationsList)
sum_Satisfaction, numSubjPrefList = calc_f2(subjList, profList, prof_relationsList, subjIsPrefList)
sum_quadSabbNotPref, quadSabbNotPrefList = calc_f3(subjList, profList, prof_relationsList)
sum_periodPref, periodPrefList = calc_f4(subjList, profList, prof_relationsList)
sum_campusPref, campPrefList = calc_f5(subjList, profList, prof_relationsList)
sum_relationsRelative, _ = calc_f6(subjList, profList, prof_relationsList)
sum_qualityRelative, _ = calc_f7(subjList, profList, prof_relationsList, subjIsPrefList)
# Setting found variables
candidate.setFeaVariables(prof_relationsList, numSubjPrefList, periodPrefList, quadSabbNotPrefList, campPrefList, difChargeList)
# Calculating main variables
f1 = 1.0 - float(sum_chargesRelative / len(profList))
f2 = float(sum_Satisfaction / len(profList))
f3 = float(sum_quadSabbNotPref / len(subjList))
f4 = float(sum_periodPref / len(subjList))
f5 = float(sum_campusPref / len(subjList))
f6 = 1.0 - float(sum_relationsRelative / len(profList))
f7 = float(sum_qualityRelative / len(profList))
f = [f1, f2, f3, f4, f5, f6, f7]
# Final Feasible Function Fitness Calc
Ff = sum([f[j] * weightsList[j] for j in range(len(f))]) / sum([w for w in weightsList])
# Returning the result calculated
return Ff
#-------------------------------------------------------
# f1: how balanced is the distribution of Subjects, considering the "Charge" of each Professor and its Subj related
def calc_f1(subjList, profList, prof_relationsList):
# List of all 'Effective Charges', that is, the sum of the charges of all the subjects related to the professor
charges_eachProfRelations = [0 for _ in range(len(profList))]
# List of requested charges of each professor
charges_EachProf = [profList[i].getCharge() for i in range(len(profList))]
# Counting the occurrences, filling the vectors
for i in range(len(prof_relationsList)):
# Summing all chargers of all relations of this Prof
charges_eachProfRelations[i] = sum([subjList[sIndex].getCharge() for sIndex in prof_relationsList[i]])
# Difference of Prof Charge and the sum of all of its Subj-Relations
difChargeList = [charges_EachProf[i] - charges_eachProfRelations[i] for i in range(len(profList))]
# Relative weigh of excess or missing charge for each Prof - based on the absolute credit difference
# between the credits requested by the Prof and the sum off all Subj related to it
charges_relative = [float(abs(difChargeList[i]) / charges_EachProf[i]) for i in range(len(profList))]
# Making a simple adjust on the value
charges_relativeFinal = [charge if charge < 1.0 else 1.0 for charge in charges_relative]
# The sum of charge discrepancies of all professors
sum_chargesRelative = sum([charge for charge in charges_relativeFinal])
return sum_chargesRelative, difChargeList
#-------------------------------------------------------
# f2: how many and which Subjects are the professors preference, considering "prefSubj..." Lists
def calc_f2(subjList, profList, prof_relationsList, subjIsPrefList):
# These are Lists (each quadri - 3) of Lists (each professor) of Lists (each PrefList+LimList)
# In each List (inside the List inside the List) we have 1 if the same index Subject (from same Quadri X Pref List + Lim Pref List) is related to Same Prof
# or we have 0 if it is not related
qX_relations = [[[] for _ in range(len(profList))] for _ in range(3)]
# List with the number of subjects that are on respective Prof's List of Preferences
numSubjPrefList = [0 for _ in range(len(profList))]
# Counting the occurrences, filling the vectors
for relations in prof_relationsList:
# Setting Index of current Prof
pIndex = prof_relationsList.index(relations)
# Getting data of current Prof
prefSubjLists = [i for i in profList[pIndex].getPrefSubjLists()]
# For each Quadri - Filling QX Lists of current Prof
# in each one appends "pPrefSubjQXList" with "pPrefSubjLimList" to have the length of the subList
for i in range(3):
qX_relations[i][pIndex] = [0 for _ in range(len(prefSubjLists[i]) + len(prefSubjLists[3]))]
# All Relations of one Prof
for sIndex in relations:
# Getting data of current Subj
sName = subjList[sIndex].getName()
sQuadri = subjList[sIndex].getQuadri()
# For each quadri
for i in range(3):
# Looking for only in the list of respective quadri of current Subject in analysis
if(str(i+1) in sQuadri):
# Finding the Subject 'sName' in "pPrefSubjQXList+pPrefSubjLimList" list
sumList = prefSubjLists[i] + prefSubjLists[3]
# Checking if the List is not empty
if(len(sumList) > 0):
try: index_value = sumList.index(sName)
except ValueError: index_value = -1
# If the Subj name appears in the list
if(index_value != -1):
# Putting '1' in same position found 'index_value' in the subList (which this one, is in same position of profList)
qX_relations[i][pIndex][index_value] = 1
# Adding the Subj that is on Prof Pref List
numSubjPrefList[pIndex] = numSubjPrefList[pIndex] + 1
# Calculating intermediate variables
# Lists of the calculation of "satisfaction" based on the order of Subjects choose by a Professor (index = 0 has more weight)
finalQX = [[0.0 for _ in range(len(profList))] for _ in range(3)]
# For each Qaudri
for i in range(3):
# Calculating the Satisfaction from QX relations for each Professor
for list_choice_relation in qX_relations[i]:
# Setting current Prof Index and current List Relations-Preference
prof_index = qX_relations[i].index(list_choice_relation)
len_current_list = len(list_choice_relation)
# Initializing current position and total weight that will be calculated next
total_weight = 0
# Checking if the Relations-Preference List is empty
if(len_current_list == 0): finalQX[i][prof_index] = 1.0
# If is needed to be calculated (is not empty)
else:
# QX Relations of each Professor
for h in list_choice_relation:
# Setting current Subject Preference Position
pref_index = list_choice_relation.index(h)
# Summing the Total Weight of this list of preferences to normalize later (+1 because first index is 0)
total_weight = total_weight + pref_index + 1
# If the current Subj, in this specific position on the Preference List of current Prof, is related to it
if(h == 1):
# Summing the respective weight the Subj has in the Prof List of Preferences
finalQX[i][prof_index] = finalQX[i][prof_index] + (len_current_list - pref_index + 1)
# Calculate the final value of "Satisfaction" normalized, after obtained and summed all weights from Subjects related to current professor
finalQX[i][prof_index] = float(finalQX[i][prof_index] / total_weight)
# Calculate the final value of a Prof "satisfaction" summing all 3 values (from finalQ1, finalQ2 and finalQ3 lists) and normalizing it
final_Satisf = [float((finalQX[0][i] + finalQX[1][i] + finalQX[2][i]) / 3.0) for i in range(len(finalQX[0]))]
# Finally, calculating all Professors Satisfaction summing all final values
sum_Satisfaction = sum([value for value in final_Satisf])
return sum_Satisfaction, numSubjPrefList
#-------------------------------------------------------
# f3: how many Subjects are teach in a "Quadri" that is not the same of Professors 'quadriSabbath'
def calc_f3(subjList, profList, prof_relationsList):
# List of Subjs related to a Prof that is on different Quadri of prof's QuadSabb
quadSabbNotPrefList = [[] for _ in range(len(profList))]
# Getting the occurrences, filling the vector
for i in range(len(prof_relationsList)):
# Getting data of current Prof
pQuadriSabbath = profList[i].getQuadriSabbath()
# All Relations of one Prof
for sIndex in prof_relationsList[i]:
# Getting data of current Subj
sQuadri = subjList[sIndex].getQuadri()
# Adding to count if the Subj is not in the same 'pQuadriSabbath' (if Prof choose 'nenhum' he does not have a 'pQuadriSabbath')
if('NENHUM' in pQuadriSabbath or sQuadri != pQuadriSabbath): quadSabbNotPrefList[i].append(sIndex)
# Calculating intermediate variable
sum_quadSabbNotPref = sum([len(listSubj) for listSubj in quadSabbNotPrefList])
return sum_quadSabbNotPref, quadSabbNotPrefList
#-------------------------------------------------------
# f4: how many Subjects are teach in the same "Period" of the Professor preference "pPeriod"
def calc_f4(subjList, profList, prof_relationsList):
# List of Subjs related to a Prof that is on same Period of prof's Period
periodPrefList = [[] for _ in range(len(profList))]
# Getting the occurrences, filling the vector
for i in range(len(prof_relationsList)):
# Getting data of current Prof
pPeriod = profList[i].getPeriod()
# All Relations of one Prof
for sIndex in prof_relationsList[i]:
# Getting data of current Subj
sPeriod = subjList[sIndex].getPeriod()
# Adding to count if the Subj is in the same 'pPeriod' or if Prof do not care about 'pPeriod' equal to 'NEGOCIAVEL'
if('NEGOCI' in pPeriod or sPeriod == pPeriod): periodPrefList[i].append(sIndex)
# Calculating intermediate variable
sum_periodPref = sum([len(listSubj) for listSubj in periodPrefList])
return sum_periodPref, periodPrefList
#-------------------------------------------------------
# f5: how many Subjects are teach in the same "Campus" of the Professor preference "prefCampus"
def calc_f5(subjList, profList, prof_relationsList):
# List of Subjs related to a Prof that is on same Campus of prof's Campus
campPrefList = [[] for _ in range(len(profList))]
# Getting the occurrences, filling the vector
for i in range(len(prof_relationsList)):
# Getting data of current Prof
pPrefCampus = profList[i].getPrefCampus()
# All Relations of one Prof
for sIndex in prof_relationsList[i]:
# Getting data of current Subj
sCampus = subjList[sIndex].getCampus()
# Adding to count if the Subj is in the same 'pPrefCampus'
if(sCampus == pPrefCampus): campPrefList[i].append(sIndex)
# Calculating intermediate variable
sum_campusPref = sum([len(listSubj) for listSubj in campPrefList])
return sum_campusPref, campPrefList
#-------------------------------------------------------
# f6: average of relations between profs
def calc_f6(subjList, profList, prof_relationsList):
# Number of Subjs ideal for each professor
avgSubjperProf = float(len(subjList)/len(profList))
# Difference between num of relations of each prof and the average
difNumRel = [len(relations) - avgSubjperProf for relations in prof_relationsList]
# Relative weigh of excess or missing relations for each Prof - based on the absolute relations difference
relations_relative = [float(abs(difNumRel[i]) / avgSubjperProf) for i in range(len(prof_relationsList))]
# Making a simple adjust on the values
relations_relativeFinal = [value if value < 1.0 else 1.0 for value in relations_relative]
# The sum of relations discrepancies of all professors
sum_relationsRelative = sum([charge for charge in relations_relativeFinal])
return sum_relationsRelative, difNumRel
#-------------------------------------------------------
# f7: quality of relations (subj appears in some list of pref or/and same quadri)
def calc_f7(subjList, profList, prof_relationsList, subjIsPrefList):
# Summing, for each professor, its relations qualities
sumRelationsQuality = [sum([subjIsPrefList[i][pos] for pos in prof_relationsList[i]]) for i in range(len(prof_relationsList))]
# Relative value of quality of all relations for each Prof (2 is the max value of quality - same quadri of pref list)
qualityRelative = [float(sumRelationsQuality[i] / (2 * len(prof_relationsList[i]))) for i in range(len(prof_relationsList))]
# The sum of relative qualities of all professors
sum_qualityRelative = sum([value for value in qualityRelative])
return sum_qualityRelative, qualityRelative
#==============================================================================================================
# Generate new solutions from the current Infeasible population
def offspringI(solutionsNoPop, solutionsI, profList, subjList, subjIsPrefList, mutWithRand):
# Check if the Infeasible pop. is empty
if(len(solutionsI.getCandList()) != 0):
# Make a Mutation for each candidate, trying to repair a restriction problem maker
for cand in solutionsI.getCandList():
newCand = mutationI(cand, profList, subjList, subjIsPrefList, mutWithRand)
# Adding the new Candidate generated by Mutation to 'solutionsNoPop'
solutionsNoPop.addCand(newCand)
if(printSteps == 1): print("Inf. Offspring/", end='')
#==============================================================================================================
# Generate new solutions from the current Feasible population
def offspringF(solutionsNoPop, solutionsF, profList, subjList, subjIsPrefList, maxNumCand_perPop, pctParentsCross, reposCross, twoPointsCross, mutWithRand):
# Check if the Feasible pop. is empty
if(len(solutionsF.getCandList()) != 0):
# 'objectiveNum': number of solutions to become parents - based on 'pctParentsCross'
objectiveNum = int(pctParentsCross * len(solutionsF.getCandList()) / 100)
# Turning 'objectiveNum' to Even if it is Odd -> summing +1 to it only if the new 'objectiveNum' is not bigger then len(solutionsF)
if(objectiveNum % 2 != 0):
if((objectiveNum + 1) <= len(solutionsF.getCandList())): objectiveNum = objectiveNum + 1
else: objectiveNum = objectiveNum - 1
# Granting that are solutions enough to became fathers (more than or equal 2)
if(objectiveNum < 2):
# If have at most 1 solution (insufficient to make any crossover) - then all solutions will generate a child through a mutation
for cand in solutionsF.getCandList(): solutionsNoPop.addCand(mutationF(cand, profList, subjList, subjIsPrefList,mutWithRand))
# If we have at least 2 solutions
else:
# Roulette Wheel to choose solutions to become Parents
fitnessList = [cand.getFitness() for cand in solutionsF.getCandList()]
parentsSolFeas, notParents_objectsList, _ = rouletteWheel(solutionsF.getCandList(), fitnessList, objectiveNum, reposCross)
# Solutions 'children' created by crossover
childSolFeas = []
# Make a Crossover (create two new candidates) for each pair of parents candidates randomly choose
# Granting the number of children is equal of parents
while(len(childSolFeas) != objectiveNum):
# If there are only 2 parents, make a crossover between them
if(len(parentsSolFeas) <= 2): parent1, parent2 = 0, 1
# If there are more then 2, choosing the parents Randomly
else:
parent1, parent2 = random.randrange(len(parentsSolFeas)), random.randrange(len(parentsSolFeas))
# Granting the second parent is not the same of first one
while(parent1 == parent2): parent2 = random.randrange(len(parentsSolFeas))
# Making the Crossover with the selected parents
newCand1, newCand2 = crossover(parentsSolFeas[parent1], parentsSolFeas[parent2], twoPointsCross)
# Removing used parents to make a new selection of Parents
parent2 = parentsSolFeas[parent2]
parentsSolFeas.remove(parentsSolFeas[parent1])
parentsSolFeas.remove(parent2)
# adding the new candidates generated to childSolFeas
childSolFeas.append(newCand1)
childSolFeas.append(newCand2)
# Adding the child generated by crossover to 'solutionsNoPop'
for cand in childSolFeas:
solutionsNoPop.addCand(cand)
# Make Mutation with all the candidates that were not chosen to be Parents right before
for cand in notParents_objectsList:
# Making a not random mutation
newCand = mutationF(cand, profList, subjList, subjIsPrefList,mutWithRand)
# Adding the child not generated by crossover to 'solutionsNoPop'
solutionsNoPop.addCand(newCand)
if(printSteps == 1): print("Feas. Offspring/", end='')
#==============================================================================================================
# Make a mutation into a infeasible candidate
def mutationI(candidate, profList, subjList, subjIsPrefList, mutWithRand=1):
# Getting data to work with
relations = candidate.getRelationsList()[:]
prof_relationsList, i2_conflictsList, i3_conflictsList = candidate.getInfVariables()
# This While ensures that 'problemType' will choose Randomly one 'restriction repair'
flag_work_done = False
while(flag_work_done == False):
# Choosing one type of restriction to repair
if(mutWithRand == 0): problemType = random.randrange(1,4)
if(mutWithRand == 1): problemType = random.randrange(0,4)
if(mutWithRand == 2): problemType = 0
# (0) No repair -> Random Change
if(problemType == 0): flag_work_done, newCand = mutationRand(candidate, profList)
# (1) Prof without relations (with no Subjects) in 'prof_relationsList'
elif(problemType == 1):
# Granting that the 'problemType' do not change good relations without restrictions to repair
if(prof_relationsList.count([]) != 0):
flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, prof_relationsList)
else:
# (2) 2 or more Subjects (related to the same Prof) with same 'quadri', 'day' and 'hour' in 'i2_conflictsList'
if(problemType == 2): iX_conflictsList = i2_conflictsList
# (3) 2 or more Subjects (related to the same Prof) with same 'day' and 'quadri' but different 'campus' in 'i3_conflictsList'
if(problemType == 3): iX_conflictsList = i3_conflictsList
# Granting that the 'problemType' do not change good relations without restrictions to repair
if(len(iX_conflictsList) != 0 and iX_conflictsList.count([]) != len(iX_conflictsList)):
flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, iX_conflictsList)
return newCand
#==============================================================================================================
# Make a mutation into a feasible candidate
def mutationF(candidate, profList, subjList, subjIsPrefList, mutWithRand=1):
# Getting data to work with
relations = candidate.getRelationsList()[:]
prof_relationsList, _, periodPrefList, quadSabbNotPrefList, campPrefList, _ = candidate.getFeaVariables()
# This While ensures that 'adjustType' will choose Randomly one 'Improvement work'
flag_work_done = False
while(flag_work_done == False):
# Choosing one type of 'Improvement work'
if(mutWithRand == 0): adjustType = random.randrange(1,6)
if(mutWithRand == 1): adjustType = random.randrange(0,6)
if(mutWithRand == 2): adjustType = 0
# (0) No 'Improvement work' -> Random Change
if(adjustType == 0): flag_work_done, newCand = mutationRand(candidate, profList)
# (1) Improving number of Relations
elif(adjustType == 1): flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, prof_relationsList)
# (2) Improving number of Subj Preferences
elif(adjustType == 2):
# Building a list with relations that is NOT Pref
notPrefList = [[subjIndex for subjIndex in prof_relationsList[i] if subjIsPrefList[i][subjIndex] == 0] for i in range(len(prof_relationsList))]
# Granting that the 'adjustType' do not change good relations without Problems to improve
if(notPrefList.count([]) != len(notPrefList)):
flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, prof_relationsList)
else:
# (3) Improving number of Periods
if(adjustType == 3): XPref = periodPrefList
# (4) Improving number of QuadSabb
if(adjustType == 4): XPref = quadSabbNotPrefList
# (5) Improving number of Campus
if(adjustType == 5): XPref = campPrefList
if(len(XPref) != 0):
# Building a list with relations that is NOT Pref
notPrefList = [[subjIndex for subjIndex in prof_relationsList[i] if [i].count(subjIndex) == 0] for i in range(len(prof_relationsList))]
# Granting that the 'adjustType' do not change good relations without Problems to improve
if(notPrefList.count([]) != len(notPrefList)):
flag_work_done, newCand = mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, notPrefList)
return newCand
#==============================================================================================================
# Make a selection of the solutions from all Infeasible Pop.('infPool' and 'solutionsI')
def selectionI(infPool, solutionsI, maxNumCand_perPop, reposSelInf):
# Check if the Infeasible pop. is empty
if(len(solutionsI.getCandList()) != 0 or len(infPool.getCandList()) != 0):
# Gathering both lists (infPool and solutionsI)
infeasibles_List = solutionsI.getCandList() + infPool.getCandList()
# Check if is needed to make a selection process
if(len(infeasibles_List) > maxNumCand_perPop):
# Roulette Wheel Selection
# Since the value of Fitness is in the range of '-1' and '0' it is needed to be modified to a range of '0' and '1'
fitnessList = [1.0 + cand.getFitness() for cand in infeasibles_List]
infeasibles_List, _, _ = rouletteWheel(infeasibles_List, fitnessList, maxNumCand_perPop, reposSelInf)
# Updating the (new) 'solutionsI' list to the next generation
solutionsI.setCandList(infeasibles_List)
if(printSteps == 1): print("Inf. Selection/", end='')
#==============================================================================================================
# Make a Selection of the best solutions from Feasible Pop.
def selectionF(feaPool, solutionsF, maxNumCand_perPop, pctElitism, reposSelFea):
# Check if the Feasible pop. is empty
if(len(solutionsF.getCandList()) != 0 or len(feaPool.getCandList()) != 0):
# Gathering both lists (feaPool and solutions)
feasibles_List = solutionsF.getCandList() + feaPool.getCandList()
# Check if is needed to make a selection process
if(len(feasibles_List) > maxNumCand_perPop):
# Defining the division of number of candidates between selections process
elitismNum = maxNumCand_perPop * pctElitism / 100.0
if(elitismNum > 0.0 and elitismNum < 1.0): elitismNum = 1
else: elitismNum = int(elitismNum)
roulNum = maxNumCand_perPop - elitismNum
# Elitism and Roulette Selection
listFit = [cand.getFitness() for cand in feasibles_List]
maxFeasibles_List, rest_objectsList, rest_valuesList = elitismSelection(feasibles_List, listFit, elitismNum)
selectedObj, _, _ = rouletteWheel(rest_objectsList, rest_valuesList, roulNum, reposSelFea)
feasibles_List = maxFeasibles_List + selectedObj
# Updating the (new) 'solutionsF' list to the next generation
solutionsF.setCandList(feasibles_List)
if(printSteps == 1): print("Feas. Selection/", end='')
#==============================================================================================================
# Make a rand mutation into a solution
def mutationRand(candidate, profList):
# Getting all relations from Candidate
relations = candidate.getRelationsList()[:]
# Choosing randomly a relation to be modified
original = random.randrange(len(relations))
# Recording the Original Relation
subj, oldProf = relations[original]
# Granting that the 'newProf' is different from the 'oldProf'
newProf = oldProf
while(oldProf == newProf):
# Finding randomly a new Prof
change = random.randrange(len(profList))
newProf = profList[change]
# Setting the new Relation modified, creating and setting a new Candidate
relations[original]=[subj,newProf]
newCand = objects.Candidate()
newCand.setRelationsList(relations)
# Setting the flag to finish the while
flag_work_done = True
# Returning the new Candidate generated
return flag_work_done, newCand
#==============================================================================================================
# Make some deterministic type of adjustment changing some 'bad' relation
def mutationDeterm(profList, prof_relationsList, relations, subjIsPrefList, problemList):
# Choosing a professor to lose a relation
# Roulette Wheel - more 'bad' relations -> more weight
weightList = [len(i) for i in problemList]
problemSubList_selected, _, _ = rouletteWheel(problemList, weightList, objectiveNum=1, repos=0)
profLost_Index = problemList.index(problemSubList_selected[0])
# Choosing the relation to be modified
# Roulette Wheel - less preference -> more weight
lessPrefValue = [2 - subjIsPrefList[profLost_Index][subjIndex] for subjIndex in problemList[profLost_Index]]
will_change_index, _, _ = rouletteWheel(problemSubList_selected[0], lessPrefValue, objectiveNum=1, repos=0)
relation_will_change_index = will_change_index[0]
# Recording original relation that will be modified
subjList, oldProf = relations[relation_will_change_index]
# Choosing new Prof to be in the selected relation
# Granting that the new Prof is different from the old one
newProf = oldProf
while(oldProf == newProf):
# Roulette Wheel - more preference AND less relations -> more weight
SubjPrefValuesList = [subjIsPref_subList[relation_will_change_index] for subjIsPref_subList in subjIsPrefList]
# Removing possible Zeros to make the division
prof_relations_final = [len(i) if len(i) != 0 else 0.5 for i in prof_relationsList]
# Getting the weights values
morePrefValueList = [float(SubjPrefValuesList[i] / prof_relations_final[i]) for i in range(len(profList))]
# If there is only one Prof with value != 0.0
if(morePrefValueList.count(0.0) == len(morePrefValueList) - 1):
indexNotZero = [i for i in range(len(profList)) if morePrefValueList[i] != 0.0]
# If is the same of the old one - random choice
if(oldProf == profList[indexNotZero[0]]): newProf = profList[random.randrange(len(profList))]
# If not
else: newProf = profList[indexNotZero[0]]
# If there are more then 1 Prof to chose
else:
newProf, _, _ = rouletteWheel(profList, morePrefValueList, objectiveNum=1, repos=0)
newProf = newProf[0]
# Setting the new relation, creating new Candidate and returning it
relations[relation_will_change_index]=[subjList, newProf]
# Setting the flag to finish the while
flag_work_done = True
# Generating a new candidate
newCand = objects.Candidate()
newCand.setRelationsList(relations)
return flag_work_done, newCand
#==============================================================================================================
# Make a crossover between two solutions
def crossover(cand1, cand2, twoPointsCross=-1):
# The number of changes between parents will always be equal (same crossover segment size), never same size of Num of Parents Relations
# twoPointsCross = False -> its chosen only one point, will have changes from the 0 relation till the chosed point
# What is equal '-1' will be a random choice
if(twoPointsCross == -1): twoPointsCross = random.choice([True, False])
# Getting all relations from Candidates to work with
relations1 = cand1.getRelationsList()[:]
relations2 = cand2.getRelationsList()[:]
# OnePoint type:
if(not twoPointsCross):
point1 = 0 # Default initial point ('first-half') - if we make changes on 'second-half' the result woud be the same
point2 = random.randrange(len(relations1)) # Randomly choosing other point that can be equal to 'point1'
# Granting that not occur only a copy of parents - the chosen point is not the last relation
while(point2 == len(relations1)-1): point2 = random.randrange(len(relations1))
# twoPointsCross Type
else:
# Generating, randomly two numbers to create a patch - can be a single modification (when p1=p2)
point1, point2 = random.randrange(len(relations1)), random.randrange(len(relations1))
# Granting that 'point2' is bigger than 'point1'
if(point2 < point1):
p = point1
point1 = point2
point2 = p
# Granting that the crossover do not only copy all relations of one Cand to the another
while(point2 - point1 == len(relations1) - 1):
# Generating, randomly two numbers to create a patch - can be a single modification (when p1==p2)
point1, point2 = random.randrange(len(relations1)), random.randrange(len(relations1))
# Granting that 'point2' is bigger than 'point1'
if(point2 < point1):
p = point1
point1 = point2
point2 = p
# Passing through the relations between Parents making all changes
while (point1 <= point2):
# Recording the original relations
s1, p1 = relations1[point1]
s2, p2 = relations2[point1]
# Making the exchange of relations (changing only professors)
relations1[point1] = s1, p2
relations2[point1] = s2, p1
# Next relation
point1 = point1 + 1
# Creating and setting the two new Candidates
newCand1, newCand2 = objects.Candidate(), objects.Candidate()
newCand1.setRelationsList(relations1)
newCand2.setRelationsList(relations2)
# Returning the new Candidates
return newCand1, newCand2
#==============================================================================================================
# Selection by elitism
def elitismSelection(objectsList, valuesList, objectiveNum):
selectedObj = [] # List with the selected Objects
objectsList = objectsList[:]
valuesList = valuesList[:]
# Getting the maximal Value Solutions
while(len(selectedObj) < objectiveNum):
# Finding the maximal value in the list and its respective index
maxValue = max(valuesList)
maxIndex = valuesList.index(maxValue)
# Adding selected object to list
selectedObj.append(objectsList[maxIndex])
# Removing maximal Value/Object to next selection
valuesList.pop(maxIndex)
objectsList.pop(maxIndex)
return selectedObj, objectsList, valuesList
#==============================================================================================================
# Make selection of objects by Roulette Wheel
def rouletteWheel(objectsList, valuesList, objectiveNum, repos=0):
# objectiveNum: Num of objects will be selected
# repos: Type of wheel (with reposition)
# Making a copy of the original lists to work with
objectsList = objectsList[:]
valuesList = valuesList[:]
# List with the selected Objects
selectedObj = []
# Flag that allows to make all important calcs at least one time when the Roulette is configured to have Reposition
reCalc = True
while(len(selectedObj) < objectiveNum):
# Allow the Updating of the data for the next Roulette Round without the object that was recent selected on past round
if(reCalc):
# When the Roulette process does have reposition of objects
if(repos): reCalc = False
# Find the total Value of the Objects
totalValue = sum([value for value in valuesList])
# If all values are Zero
if(totalValue == 0.0):
valuesList = [1.0 for _ in valuesList]
totalValue = len(valuesList)
# Calculate the prob. of a selection for each object
probObj = [float(value / totalValue) for value in valuesList]
# Calculate a cumulative prob. for each object
cumulative = 0.0
cumulativeProbObj = []
for q in probObj:
qNew = q + cumulative
cumulativeProbObj.append(qNew)
cumulative = qNew
# MAIN Roulette Wheel Selection process (one round)
probPrev = 0.0
r = float(random.randrange(101) / 100.0)
#r = float(random.randrange(0, 1, 0.001))
for i in range(len(cumulativeProbObj)):
if(probPrev < r and r <= cumulativeProbObj[i]):
# Adding the selected Object to 'selectedObj'
selectedObj.append(objectsList[i])
if(not repos):
# Removing the selected object/value from 'valuesList' to do next roulette process
valuesList.pop(i)
objectsList.pop(i)
break
probPrev = cumulativeProbObj[i]
# Removing from 'objectsList' the selected objects (not removed before because of the reposition)
# If there are repeated objects, (objectsList + selectedObj) will be larger then original objectsList size
if(repos):
for i in selectedObj:
try:
index = objectsList.index(i)
objectsList.pop(index)
valuesList.pop(index)
except ValueError: index = -1
return selectedObj, objectsList, valuesList
#==============================================================================================================
# Detect the stop condition
def stop(asks, curr_Iter, maxNum_Iter, lastMaxFit_Iter, convergDetect, maxFitFea):
if(curr_Iter == maxNum_Iter): return (True if asks == 0 else ioData.askStop()) # Reached max num of iterations
if(convergDetect != 0 and curr_Iter - lastMaxFit_Iter == convergDetect): return (True if asks == 0 else ioData.askStop()) # Reached convergence num of iterations
return False # Continues the run with same num of iterations
#==============================================================================================================
| 53.015416 | 165 | 0.615462 |
11e06d5dd0202783c3e0b55b6bc21794e4419ef3 | 840 | py | Python | tests/ManualLoggerTests.py | velexio/pyLegos | 64d3622f2b6d78a02b171e0438a0224a951d2644 | [
"MIT"
] | null | null | null | tests/ManualLoggerTests.py | velexio/pyLegos | 64d3622f2b6d78a02b171e0438a0224a951d2644 | [
"MIT"
] | 2 | 2016-11-23T00:36:34.000Z | 2016-11-23T00:39:08.000Z | tests/ManualLoggerTests.py | velexio/pyLegos | 64d3622f2b6d78a02b171e0438a0224a951d2644 | [
"MIT"
] | null | null | null |
from pylegos import LogFactory
def consoleOnlyTest():
logFactory = LogFactory()
logLevel = logFactory.LogLevel.INFO
log = logFactory.getConsoleLogger()
log.debug('This is a console debug message')
log.info('This is an console info message')
log.warn('This is a warning message')
log.error('This is a error message')
log.critical('This is a critical message')
log.info('Logging to a file now')
fileLogger = logFactory.getFileLogger('/tmp/ManLogTest.log',logFactory.LogLevel.DEBUG)
fileLogger.debug('Debug message for file')
fileLogger.info('Info message for file')
fileLogger.warn('Warn message for file')
fileLogger.error('Error message for file')
fileLogger.critical('Critical message for file')
def main():
consoleOnlyTest()
if __name__ == '__main__':
main() | 25.454545 | 90 | 0.703571 |
11e388ebd565f092940b5ad2ddba87b868dac5de | 3,171 | py | Python | HyperV/WS2012R2/stress/StorVSCIOZoneTest.py | microsoft/FreeBSD-Test-Automation | e96a84054d771ece83908299d37e3c02a19f98b3 | [
"Apache-2.0"
] | 1 | 2020-01-16T08:45:59.000Z | 2020-01-16T08:45:59.000Z | HyperV/WS2012R2/stress/StorVSCIOZoneTest.py | LIS/FreeBSD-Test-Automation | e96a84054d771ece83908299d37e3c02a19f98b3 | [
"Apache-2.0"
] | null | null | null | HyperV/WS2012R2/stress/StorVSCIOZoneTest.py | LIS/FreeBSD-Test-Automation | e96a84054d771ece83908299d37e3c02a19f98b3 | [
"Apache-2.0"
] | 1 | 2021-08-03T00:22:40.000Z | 2021-08-03T00:22:40.000Z | #!/usr/bin/env python
import sys
import os
import time
import test_class
import subprocess
class StorVSCIOZoneTest(test_class.TestClass):
def _set_up_vm(self, vm_name, args):
# this piece of code will be executed first thing after the VM is
# booted up
args['working_dir'] = self._test_param(None)['working_dir']
test_class._run_on_vm(self, vm_name, "install_iozone", args)
test_class._run_on_vm(self, vm_name, "format_drive", args)
def _set_up_host(self, host_name, args):
# BEFORE the VM boots up, this function will be called to prepare
# the host.
# Tasks could include creating VM, configuring VM and install host
# software.
pass
def format_drive(self, args):
DEFAULT_SCSI_DRIVE = '/dev/da1'
if os.path.exists(DEFAULT_SCSI_DRIVE + 'p1'):
# delete the partition
subprocess.call(["gpart", "delete", "-i", "1", DEFAULT_SCSI_DRIVE])
subprocess.call(["gpart", "destroy", DEFAULT_SCSI_DRIVE])
time.sleep(2)
subprocess.call(["gpart", "create", "-s", "GPT", DEFAULT_SCSI_DRIVE])
subprocess.call(["gpart", "add", "-t", "freebsd-ufs", DEFAULT_SCSI_DRIVE])
subprocess.call(["newfs", DEFAULT_SCSI_DRIVE + "p1"])
time.sleep(5)
subprocess.call(["mount", DEFAULT_SCSI_DRIVE + "p1", args['working_dir']])
def install_iozone(self, args):
logfile = open('install-iozone.log', 'w')
p = subprocess.Popen(["pkg", "install", "-y" , "iozone"],
stdout = logfile,
stderr = logfile)
p.wait()
logfile.close()
def run_iozone(self, args):
# remember to copy the logs
logfile = open('iozone.log', 'w')
# make IOZone run on a separate drive
os.chdir(args['working_dir'])
p = subprocess.Popen(["iozone", "-a", "-z", "-g10g", "-Vshostc"],
stdout=logfile,
stderr=logfile)
p.wait()
logfile.close()
def _run(self, args):
# get a host...
# yes I know it's ugly
host_one = self._machines[0]['host']
# get a VM
vm_one = self._machines[0]['vms'][0]['name']
args['working_dir'] = self._test_param(None)['working_dir']
test_class._run_on_vm(self, vm_one, "run_iozone", args)
def _tear_down(self, args):
pass
def _request_machines(self):
# EXAMPLE: requesting machines from pool
# the size of the request array will be the number of hosts
# required, and each array element indicates how many VMs are
# required on that host.
# only 1 VM on 1 host is required
request = {'pool': 'stress',
'desc': 'storvsc_IOZone',
'req': [1]
}
return request
def _test_param(self, args):
param = {
'multi-threaded': True,
'snapshot': 'ICABase',
'remote_path': '/root/',
'working_dir': '/mnt/test'
}
return param
| 32.030303 | 82 | 0.561337 |
11e3f9c5f47a0f678f4c4be381a8ca3e9eaec6d2 | 16,809 | py | Python | LDDMM_Python/lddmm_python/lib/plotly/colors.py | tt6746690/lddmm-ot | 98e45d44969221b0fc8206560d9b7a655ef7e137 | [
"MIT"
] | 48 | 2017-08-04T03:30:22.000Z | 2022-03-09T03:24:11.000Z | LDDMM_Python/lddmm_python/lib/plotly/colors.py | hushunbo/lddmm-ot | 5af26fe32ae440c598ed403ce2876e98d6e1c692 | [
"MIT"
] | null | null | null | LDDMM_Python/lddmm_python/lib/plotly/colors.py | hushunbo/lddmm-ot | 5af26fe32ae440c598ed403ce2876e98d6e1c692 | [
"MIT"
] | 15 | 2017-09-30T18:55:48.000Z | 2021-04-27T18:27:55.000Z | """
colors
=====
Functions that manipulate colors and arrays of colors
There are three basic types of color types: rgb, hex and tuple:
rgb - An rgb color is a string of the form 'rgb(a,b,c)' where a, b and c are
floats between 0 and 255 inclusive.
hex - A hex color is a string of the form '#xxxxxx' where each x is a
character that belongs to the set [0,1,2,3,4,5,6,7,8,9,a,b,c,d,e,f]. This is
just the list of characters used in the hexadecimal numeric system.
tuple - A tuple color is a 3-tuple of the form (a,b,c) where a, b and c are
floats between 0 and 1 inclusive.
"""
from __future__ import absolute_import
from plotly import exceptions
from numbers import Number
DEFAULT_PLOTLY_COLORS = ['rgb(31, 119, 180)', 'rgb(255, 127, 14)',
'rgb(44, 160, 44)', 'rgb(214, 39, 40)',
'rgb(148, 103, 189)', 'rgb(140, 86, 75)',
'rgb(227, 119, 194)', 'rgb(127, 127, 127)',
'rgb(188, 189, 34)', 'rgb(23, 190, 207)']
PLOTLY_SCALES = {
'Greys': [
[0, 'rgb(0,0,0)'], [1, 'rgb(255,255,255)']
],
'YlGnBu': [
[0, 'rgb(8,29,88)'], [0.125, 'rgb(37,52,148)'],
[0.25, 'rgb(34,94,168)'], [0.375, 'rgb(29,145,192)'],
[0.5, 'rgb(65,182,196)'], [0.625, 'rgb(127,205,187)'],
[0.75, 'rgb(199,233,180)'], [0.875, 'rgb(237,248,217)'],
[1, 'rgb(255,255,217)']
],
'Greens': [
[0, 'rgb(0,68,27)'], [0.125, 'rgb(0,109,44)'],
[0.25, 'rgb(35,139,69)'], [0.375, 'rgb(65,171,93)'],
[0.5, 'rgb(116,196,118)'], [0.625, 'rgb(161,217,155)'],
[0.75, 'rgb(199,233,192)'], [0.875, 'rgb(229,245,224)'],
[1, 'rgb(247,252,245)']
],
'YlOrRd': [
[0, 'rgb(128,0,38)'], [0.125, 'rgb(189,0,38)'],
[0.25, 'rgb(227,26,28)'], [0.375, 'rgb(252,78,42)'],
[0.5, 'rgb(253,141,60)'], [0.625, 'rgb(254,178,76)'],
[0.75, 'rgb(254,217,118)'], [0.875, 'rgb(255,237,160)'],
[1, 'rgb(255,255,204)']
],
'Bluered': [
[0, 'rgb(0,0,255)'], [1, 'rgb(255,0,0)']
],
# modified RdBu based on
# www.sandia.gov/~kmorel/documents/ColorMaps/ColorMapsExpanded.pdf
'RdBu': [
[0, 'rgb(5,10,172)'], [0.35, 'rgb(106,137,247)'],
[0.5, 'rgb(190,190,190)'], [0.6, 'rgb(220,170,132)'],
[0.7, 'rgb(230,145,90)'], [1, 'rgb(178,10,28)']
],
# Scale for non-negative numeric values
'Reds': [
[0, 'rgb(220,220,220)'], [0.2, 'rgb(245,195,157)'],
[0.4, 'rgb(245,160,105)'], [1, 'rgb(178,10,28)']
],
# Scale for non-positive numeric values
'Blues': [
[0, 'rgb(5,10,172)'], [0.35, 'rgb(40,60,190)'],
[0.5, 'rgb(70,100,245)'], [0.6, 'rgb(90,120,245)'],
[0.7, 'rgb(106,137,247)'], [1, 'rgb(220,220,220)']
],
'Picnic': [
[0, 'rgb(0,0,255)'], [0.1, 'rgb(51,153,255)'],
[0.2, 'rgb(102,204,255)'], [0.3, 'rgb(153,204,255)'],
[0.4, 'rgb(204,204,255)'], [0.5, 'rgb(255,255,255)'],
[0.6, 'rgb(255,204,255)'], [0.7, 'rgb(255,153,255)'],
[0.8, 'rgb(255,102,204)'], [0.9, 'rgb(255,102,102)'],
[1, 'rgb(255,0,0)']
],
'Rainbow': [
[0, 'rgb(150,0,90)'], [0.125, 'rgb(0,0,200)'],
[0.25, 'rgb(0,25,255)'], [0.375, 'rgb(0,152,255)'],
[0.5, 'rgb(44,255,150)'], [0.625, 'rgb(151,255,0)'],
[0.75, 'rgb(255,234,0)'], [0.875, 'rgb(255,111,0)'],
[1, 'rgb(255,0,0)']
],
'Portland': [
[0, 'rgb(12,51,131)'], [0.25, 'rgb(10,136,186)'],
[0.5, 'rgb(242,211,56)'], [0.75, 'rgb(242,143,56)'],
[1, 'rgb(217,30,30)']
],
'Jet': [
[0, 'rgb(0,0,131)'], [0.125, 'rgb(0,60,170)'],
[0.375, 'rgb(5,255,255)'], [0.625, 'rgb(255,255,0)'],
[0.875, 'rgb(250,0,0)'], [1, 'rgb(128,0,0)']
],
'Hot': [
[0, 'rgb(0,0,0)'], [0.3, 'rgb(230,0,0)'],
[0.6, 'rgb(255,210,0)'], [1, 'rgb(255,255,255)']
],
'Blackbody': [
[0, 'rgb(0,0,0)'], [0.2, 'rgb(230,0,0)'],
[0.4, 'rgb(230,210,0)'], [0.7, 'rgb(255,255,255)'],
[1, 'rgb(160,200,255)']
],
'Earth': [
[0, 'rgb(0,0,130)'], [0.1, 'rgb(0,180,180)'],
[0.2, 'rgb(40,210,40)'], [0.4, 'rgb(230,230,50)'],
[0.6, 'rgb(120,70,20)'], [1, 'rgb(255,255,255)']
],
'Electric': [
[0, 'rgb(0,0,0)'], [0.15, 'rgb(30,0,100)'],
[0.4, 'rgb(120,0,100)'], [0.6, 'rgb(160,90,0)'],
[0.8, 'rgb(230,200,0)'], [1, 'rgb(255,250,220)']
],
'Viridis': [
[0, '#440154'], [0.06274509803921569, '#48186a'],
[0.12549019607843137, '#472d7b'], [0.18823529411764706, '#424086'],
[0.25098039215686274, '#3b528b'], [0.3137254901960784, '#33638d'],
[0.3764705882352941, '#2c728e'], [0.4392156862745098, '#26828e'],
[0.5019607843137255, '#21918c'], [0.5647058823529412, '#1fa088'],
[0.6274509803921569, '#28ae80'], [0.6901960784313725, '#3fbc73'],
[0.7529411764705882, '#5ec962'], [0.8156862745098039, '#84d44b'],
[0.8784313725490196, '#addc30'], [0.9411764705882353, '#d8e219'],
[1, '#fde725']
]
}
def color_parser(colors, function):
"""
Takes color(s) and a function and applies the function on the color(s)
In particular, this function identifies whether the given color object
is an iterable or not and applies the given color-parsing function to
the color or iterable of colors. If given an iterable, it will only be
able to work with it if all items in the iterable are of the same type
- rgb string, hex string or tuple
"""
if isinstance(colors, str):
return function(colors)
if isinstance(colors, tuple) and isinstance(colors[0], Number):
return function(colors)
if hasattr(colors, '__iter__'):
if isinstance(colors, tuple):
new_color_tuple = tuple(function(item) for item in colors)
return new_color_tuple
else:
new_color_list = [function(item) for item in colors]
return new_color_list
def validate_colors(colors):
"""
Validates color(s) and returns an error for invalid colors
"""
colors_list = []
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
return
elif 'rgb' in colors or '#' in colors:
colors_list = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a "
"Plotly scale, an rgb color or a hex color."
)
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
if isinstance(colors, dict):
colors_list.extend(colors.values())
elif isinstance(colors, list):
colors_list = colors
# Validate colors in colors_list
for j, each_color in enumerate(colors_list):
if 'rgb' in each_color:
each_color = color_parser(
each_color, unlabel_rgb
)
for value in each_color:
if value > 255.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your rgb colors "
"tuples cannot exceed 255.0."
)
elif '#' in each_color:
each_color = color_parser(
each_color, hex_to_rgb
)
elif isinstance(each_color, tuple):
for value in each_color:
if value > 1.0:
raise exceptions.PlotlyError(
"Whoops! The elements in your colors tuples "
"cannot exceed 1.0."
)
return colors
def convert_colors_to_same_type(colors, colortype='rgb'):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors and outputs a list of the
color(s) converted all to an rgb or tuple color type. If colors is a
Plotly Scale name then the cooresponding colorscale will be outputted and
colortype will not be applicable
"""
colors_list = []
if isinstance(colors, str):
if colors in PLOTLY_SCALES:
return PLOTLY_SCALES[colors]
elif 'rgb' in colors or '#' in colors:
colors_list = [colors]
else:
raise exceptions.PlotlyError(
"If your colors variable is a string, it must be a Plotly "
"scale, an rgb color or a hex color.")
elif isinstance(colors, tuple):
if isinstance(colors[0], Number):
colors_list = [colors]
else:
colors_list = list(colors)
elif isinstance(colors, list):
colors_list = colors
# convert all colors to rgb
for j, each_color in enumerate(colors_list):
if '#' in each_color:
each_color = color_parser(
each_color, hex_to_rgb
)
each_color = color_parser(
each_color, label_rgb
)
colors_list[j] = each_color
elif isinstance(each_color, tuple):
each_color = color_parser(
each_color, convert_to_RGB_255
)
each_color = color_parser(
each_color, label_rgb
)
colors_list[j] = each_color
if colortype == 'rgb':
return colors_list
elif colortype == 'tuple':
for j, each_color in enumerate(colors_list):
each_color = color_parser(
each_color, unlabel_rgb
)
each_color = color_parser(
each_color, unconvert_from_RGB_255
)
colors_list[j] = each_color
return colors_list
else:
raise exceptions.PlotlyError("You must select either rgb or tuple "
"for your colortype variable.")
def convert_dict_colors_to_same_type(colors, colortype='rgb'):
"""
Converts color(s) to the specified color type
Takes a single color or an iterable of colors and outputs a list of the
color(s) converted all to an rgb or tuple color type. If colors is a
Plotly Scale name then the cooresponding colorscale will be outputted
"""
for key in colors:
if '#' in colors[key]:
colors[key] = color_parser(
colors[key], hex_to_rgb
)
colors[key] = color_parser(
colors[key], label_rgb
)
elif isinstance(colors[key], tuple):
colors[key] = color_parser(
colors[key], convert_to_RGB_255
)
colors[key] = color_parser(
colors[key], label_rgb
)
if colortype == 'rgb':
return colors
elif colortype == 'tuple':
for key in colors:
colors[key] = color_parser(
colors[key], unlabel_rgb
)
colors[key] = color_parser(
colors[key], unconvert_from_RGB_255
)
return colors
else:
raise exceptions.PlotlyError("You must select either rgb or tuple "
"for your colortype variable.")
def make_colorscale(colors, scale=None):
"""
Makes a colorscale from a list of colors and a scale
Takes a list of colors and scales and constructs a colorscale based
on the colors in sequential order. If 'scale' is left empty, a linear-
interpolated colorscale will be generated. If 'scale' is a specificed
list, it must be the same legnth as colors and must contain all floats
For documentation regarding to the form of the output, see
https://plot.ly/python/reference/#mesh3d-colorscale
"""
colorscale = []
# validate minimum colors length of 2
if len(colors) < 2:
raise exceptions.PlotlyError("You must input a list of colors that "
"has at least two colors.")
if not scale:
scale_incr = 1./(len(colors) - 1)
return [[i * scale_incr, color] for i, color in enumerate(colors)]
else:
# validate scale
if len(colors) != len(scale):
raise exceptions.PlotlyError("The length of colors and scale "
"must be the same.")
if (scale[0] != 0) or (scale[-1] != 1):
raise exceptions.PlotlyError(
"The first and last number in scale must be 0.0 and 1.0 "
"respectively."
)
for j in range(1, len(scale)):
if scale[j] <= scale[j-1]:
raise exceptions.PlotlyError(
"'scale' must be a list that contains an increasing "
"sequence of numbers where the first and last number are"
"0.0 and 1.0 respectively."
)
colorscale = [list(tup) for tup in zip(scale, colors)]
return colorscale
def find_intermediate_color(lowcolor, highcolor, intermed):
"""
Returns the color at a given distance between two colors
This function takes two color tuples, where each element is between 0
and 1, along with a value 0 < intermed < 1 and returns a color that is
intermed-percent from lowcolor to highcolor
"""
diff_0 = float(highcolor[0] - lowcolor[0])
diff_1 = float(highcolor[1] - lowcolor[1])
diff_2 = float(highcolor[2] - lowcolor[2])
inter_colors = (lowcolor[0] + intermed * diff_0,
lowcolor[1] + intermed * diff_1,
lowcolor[2] + intermed * diff_2)
return inter_colors
def unconvert_from_RGB_255(colors):
"""
Return a tuple where each element gets divided by 255
Takes a (list of) color tuple(s) where each element is between 0 and
255. Returns the same tuples where each tuple element is normalized to
a value between 0 and 1
"""
un_rgb_color = (colors[0]/(255.0),
colors[1]/(255.0),
colors[2]/(255.0))
return un_rgb_color
def convert_to_RGB_255(colors):
"""
Multiplies each element of a triplet by 255
"""
return (colors[0]*255.0, colors[1]*255.0, colors[2]*255.0)
def n_colors(lowcolor, highcolor, n_colors):
"""
Splits a low and high color into a list of n_colors colors in it
Accepts two color tuples and returns a list of n_colors colors
which form the intermediate colors between lowcolor and highcolor
from linearly interpolating through RGB space
"""
diff_0 = float(highcolor[0] - lowcolor[0])
incr_0 = diff_0/(n_colors - 1)
diff_1 = float(highcolor[1] - lowcolor[1])
incr_1 = diff_1/(n_colors - 1)
diff_2 = float(highcolor[2] - lowcolor[2])
incr_2 = diff_2/(n_colors - 1)
color_tuples = []
for index in range(n_colors):
new_tuple = (lowcolor[0] + (index * incr_0),
lowcolor[1] + (index * incr_1),
lowcolor[2] + (index * incr_2))
color_tuples.append(new_tuple)
return color_tuples
def label_rgb(colors):
"""
Takes tuple (a, b, c) and returns an rgb color 'rgb(a, b, c)'
"""
return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))
def unlabel_rgb(colors):
"""
Takes rgb color(s) 'rgb(a, b, c)' and returns tuple(s) (a, b, c)
This function takes either an 'rgb(a, b, c)' color or a list of
such colors and returns the color tuples in tuple(s) (a, b, c)
"""
str_vals = ''
for index in range(len(colors)):
try:
float(colors[index])
str_vals = str_vals + colors[index]
except ValueError:
if colors[index] == ',' or colors[index] == '.':
str_vals = str_vals + colors[index]
str_vals = str_vals + ','
numbers = []
str_num = ''
for char in str_vals:
if char != ',':
str_num = str_num + char
else:
numbers.append(float(str_num))
str_num = ''
return (numbers[0], numbers[1], numbers[2])
def hex_to_rgb(value):
"""
Calculates rgb values from a hex color code.
:param (string) value: Hex color string
:rtype (tuple) (r_value, g_value, b_value): tuple of rgb values
"""
value = value.lstrip('#')
hex_total_length = len(value)
rgb_section_length = hex_total_length // 3
return tuple(int(value[i:i + rgb_section_length], 16)
for i in range(0, hex_total_length, rgb_section_length))
def colorscale_to_colors(colorscale):
"""
Converts a colorscale into a list of colors
"""
color_list = []
for color in colorscale:
color_list.append(color[1])
return color_list
| 32.638835 | 77 | 0.55268 |
11e3feaa8eddda799c32e0dc2f9c36ee4b41ba9c | 420 | py | Python | nonebot/consts.py | he0119/nonebot2 | bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e | [
"MIT"
] | 1 | 2022-01-26T12:52:33.000Z | 2022-01-26T12:52:33.000Z | nonebot/consts.py | he0119/nonebot2 | bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e | [
"MIT"
] | null | null | null | nonebot/consts.py | he0119/nonebot2 | bd7ee0a1bafc0ea7a7501ba37541349d4a81b73e | [
"MIT"
] | null | null | null | # used by Matcher
RECEIVE_KEY = "_receive_{id}"
LAST_RECEIVE_KEY = "_last_receive"
ARG_KEY = "{key}"
REJECT_TARGET = "_current_target"
REJECT_CACHE_TARGET = "_next_target"
# used by Rule
PREFIX_KEY = "_prefix"
CMD_KEY = "command"
RAW_CMD_KEY = "raw_command"
CMD_ARG_KEY = "command_arg"
SHELL_ARGS = "_args"
SHELL_ARGV = "_argv"
REGEX_MATCHED = "_matched"
REGEX_GROUP = "_matched_groups"
REGEX_DICT = "_matched_dict"
| 20 | 36 | 0.757143 |
11e42e8d7b995de0658689f4a01d37ca6d28aa0b | 2,124 | py | Python | todo/views.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | todo/views.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | todo/views.py | haidoro/TODO_lesson | fa0b92eb5d6f05ee15900dcc407e1ae3451fee5b | [
"CECILL-B"
] | null | null | null | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.db import IntegrityError
from django.contrib.auth import authenticate, login, logout
from .models import TodoModel
from django.views.generic import ListView, DetailView, CreateView, DeleteView, UpdateView
from django.urls import reverse_lazy
class TodoList(ListView):
template_name = 'index.html'
model = TodoModel
class TodoDetail(DetailView):
template_name = 'detail.html'
model = TodoModel
def signupview(request):
if request.method == 'POST':
username_data = request.POST['username_data']
password_data = request.POST['password_data']
try:
user = User.objects.create_user(username_data, '', password_data)
except IntegrityError:
return render(request, 'signup.html', {'error': 'このユーザーは既に登録されています'})
else:
return render(request, 'signup.html', {})
return render(request, 'signup.html', {})
def loginview(request):
if request.method == 'POST':
username_data = request.POST['username_data']
password_data = request.POST['password_data']
user = authenticate(request, username=username_data,
password=password_data)
if user is not None:
login(request, user)
return redirect('list')
else:
return redirect('login')
return render(request, 'login.html')
class TodoCreate(CreateView):
template_name = 'create.html'
model = TodoModel
fields = ('title', 'memo', 'auther', 'priority',
'progress', 'duedate', 'complete')
success_url = reverse_lazy('list')
class TodoDelete(DeleteView):
template_name = 'delete.html'
model = TodoModel
success_url = reverse_lazy('list')
class TodoUpdate(UpdateView):
template_name = 'update.html'
model = TodoModel
fields = ('title', 'memo', 'auther', 'priority',
'progress', 'duedate', 'complete')
success_url = reverse_lazy('list')
def logoutview(request):
logout(request)
return redirect('login')
| 29.5 | 89 | 0.664313 |
11e827caf9a2f6b79a2d0287af4086e1ef14f2b8 | 269 | py | Python | bindings/kepler.gl-jupyter/keplergl/__init__.py | sw1227/kepler.gl | 14c35fc048a745faab0c6770cab7a4625ccedda3 | [
"MIT"
] | 4,297 | 2019-05-04T01:29:14.000Z | 2022-03-31T19:28:10.000Z | bindings/kepler.gl-jupyter/keplergl/__init__.py | sw1227/kepler.gl | 14c35fc048a745faab0c6770cab7a4625ccedda3 | [
"MIT"
] | 968 | 2019-05-05T16:13:03.000Z | 2022-03-30T13:11:31.000Z | bindings/kepler.gl-jupyter/keplergl/__init__.py | sw1227/kepler.gl | 14c35fc048a745faab0c6770cab7a4625ccedda3 | [
"MIT"
] | 1,082 | 2019-05-04T15:55:24.000Z | 2022-03-30T16:27:53.000Z | from ._version import version_info, __version__
from .keplergl import *
def _jupyter_nbextension_paths():
return [{
'section': 'notebook',
'src': 'static',
'dest': 'keplergl-jupyter',
'require': 'keplergl-jupyter/extension'
}]
| 22.416667 | 47 | 0.624535 |
11ed16385a989b7c743480e1ee477feb796f62cc | 9,845 | py | Python | iaso/tests/api/test_token.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 29 | 2020-12-26T07:22:19.000Z | 2022-03-07T13:40:09.000Z | iaso/tests/api/test_token.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 150 | 2020-11-09T15:03:27.000Z | 2022-03-07T15:36:07.000Z | iaso/tests/api/test_token.py | ekhalilbsq/iaso | e6400c52aeb4f67ce1ca83b03efa3cb11ef235ee | [
"MIT"
] | 4 | 2020-11-09T10:38:13.000Z | 2021-10-04T09:42:47.000Z | from django.test import tag
from django.core.files import File
from unittest import mock
from iaso import models as m
from iaso.test import APITestCase
class TokenAPITestCase(APITestCase):
@classmethod
def setUpTestData(cls):
data_source = m.DataSource.objects.create(name="counsil")
version = m.SourceVersion.objects.create(data_source=data_source, number=1)
star_wars = m.Account.objects.create(name="Star Wars", default_version=version)
cls.yoda = cls.create_user_with_profile(username="yoda", account=star_wars)
cls.yoda.set_password("IMomLove")
cls.yoda.save()
cls.jedi_council = m.OrgUnitType.objects.create(name="Jedi Council", short_name="Cnc")
cls.jedi_council_corruscant = m.OrgUnit.objects.create(name="Corruscant Jedi Council")
cls.project = m.Project.objects.create(
name="Hydroponic gardens",
app_id="stars.empire.agriculture.hydroponics",
account=star_wars,
needs_authentication=True,
)
cls.form_1 = m.Form.objects.create(name="Hydroponics study", period_type=m.MONTH, single_per_period=True)
cls.form_2 = m.Form.objects.create(
name="Hydroponic public survey",
form_id="sample2",
device_field="deviceid",
location_field="geoloc",
period_type="QUARTER",
single_per_period=True,
)
form_2_file_mock = mock.MagicMock(spec=File)
form_2_file_mock.name = "test.xml"
cls.form_2.form_versions.create(file=form_2_file_mock, version_id="2020022401")
cls.form_2.org_unit_types.add(cls.jedi_council)
cls.create_form_instance(form=cls.form_2, period="202001", org_unit=cls.jedi_council_corruscant)
cls.form_2.save()
cls.project.unit_types.add(cls.jedi_council)
cls.project.forms.add(cls.form_1)
cls.project.forms.add(cls.form_2)
cls.project.save()
def authenticate_using_token(self):
response = self.client.post(f"/api/token/", data={"username": "yoda", "password": "IMomLove"}, format="json")
self.assertJSONResponse(response, 200)
response_data = response.json()
access_token = response_data.get("access")
self.client.credentials(HTTP_AUTHORIZATION=f"Bearer {access_token}")
return response_data
def test_acquire_token_and_authenticate(self):
"""Test token authentication"""
self.authenticate_using_token()
response = self.client.get("/api/forms/?app_id=stars.empire.agriculture.hydroponics")
self.assertJSONResponse(response, 200)
response_data = response.json()
form_ids = [f["id"] for f in response_data["forms"]]
self.assertTrue(self.form_2.id in form_ids)
def test_acquire_token_and_post_instance(self):
"""Test upload to a project that requires authentication"""
# Unauthenticated case is already tested in test_api
self.authenticate_using_token()
uuid = "4b7c3954-f69a-4b99-83b1-df73957b32E1"
instance_body = [
{
"id": uuid,
"latitude": 4.4,
"created_at": 1565258153704,
"updated_at": 1565258153704,
"orgUnitId": self.jedi_council_corruscant.id,
"formId": self.form_2.id,
"longitude": 4.4,
"accuracy": 10,
"altitude": 100,
"file": "\/storage\/emulated\/0\/odk\/instances\/RDC Collecte Data DPS_2_2019-08-08_11-54-46\/RDC Collecte Data DPS_2_2019-08-08_11-54-46.xml",
"name": "the name",
}
]
response = self.client.post(
"/api/instances/?app_id=stars.empire.agriculture.hydroponics", data=instance_body, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(m.Instance.objects.filter(uuid=uuid).first() is not None)
def test_unauthenticated_post_instance(self):
"""Test unauthenticated upload to a project that requires authentication"""
# Unauthenticated case is already tested in test_api
uuid = "4b7c3954-f69a-4b99-83b1-df73957b32E2"
instance_body = [
{
"id": uuid,
"latitude": 4.4,
"created_at": 1565258153704,
"updated_at": 1565258153704,
"orgUnitId": self.jedi_council_corruscant.id,
"formId": self.form_2.id,
"longitude": 4.4,
"accuracy": 10,
"altitude": 100,
"file": "\/storage\/emulated\/0\/odk\/instances\/RDC Collecte Data DPS_2_2019-08-08_11-54-46\/RDC Collecte Data DPS_2_2019-08-08_11-54-46.xml",
"name": "the name",
}
]
response = self.client.post(
"/api/instances/?app_id=stars.empire.agriculture.hydroponics", data=instance_body, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertIsNone(m.Instance.objects.filter(uuid=uuid).first())
# The result is that the instance is not created, even though the api sent back a 200
# this is normal: we want the api to accept all creations requests to be able to debug on the server
# and not have data stuck on a mobile phone.
# An APIImport record with has_problem set to True should be created
self.assertAPIImport(
"instance",
request_body=instance_body,
has_problems=True,
exception_contains_string="Could not find project for user",
)
def test_refresh(self):
"""Test refreshing authentication token"""
# Unauthenticated case is already tested in test_api
response_data = self.authenticate_using_token()
refresh_token = response_data.get("refresh")
response = self.client.post(f"/api/token/refresh/", data={"refresh": refresh_token}, format="json")
self.assertJSONResponse(response, 200)
response_data = response.json()
access_token_2 = response_data.get("access")
self.client.credentials(HTTP_AUTHORIZATION=f"Bearer {access_token_2}")
# test an endpoint that requires authentication
response = self.client.get("/api/orgunits/?app_id=stars.empire.agriculture.hydroponics")
self.assertJSONResponse(response, 200)
def test_no_token(self):
"""Test invalid authentication tokens"""
# Unauthenticated case is already tested in test_api
self.client.credentials(HTTP_AUTHORIZATION=f"Bearer ")
# test an endpoint that requires authentication
response = self.client.get("/api/groups/?app_id=stars.empire.agriculture.hydroponics")
self.assertJSONResponse(response, 403)
self.client.credentials(HTTP_AUTHORIZATION=f"Bearer WRONG")
# test an endpoint that requires authentication
response = self.client.get("/api/groups/?app_id=stars.empire.agriculture.hydroponics")
self.assertJSONResponse(response, 403)
def test_acquire_token_and_post_org_unit(self):
"""Test upload to a project that requires authentication"""
# Unauthenticated case is already tested in test_api
self.authenticate_using_token()
uuid = "r5dx2671-bb59-4fb2-a4a0-4af80573e2de"
name = "Kashyyyk Wookies Council"
unit_body = [
{
"id": uuid,
"latitude": 0,
"created_at": 1565194077692,
"updated_at": 1565194077693,
"org_unit_type_id": self.jedi_council.id,
"parent_id": None,
"longitude": 0,
"accuracy": 0,
"altitude": 0,
"time": 0,
"name": name,
}
]
response = self.client.post(
"/api/orgunits/?app_id=stars.empire.agriculture.hydroponics", data=unit_body, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertTrue(m.OrgUnit.objects.filter(uuid=uuid).first() is not None)
self.assertAPIImport("orgUnit", request_body=unit_body, has_problems=False, check_auth_header=True)
def test_unauthenticated_post_org_unit(self):
"""Test upload to a project that requires authentication without token"""
# Unauthenticated case is already tested in test_api
uuid = "s5dx2671-ac59-4fb2-a4a0-4af80573e2de"
name = "Antar 4 Council"
unit_body = [
{
"id": uuid,
"latitude": 0,
"created_at": 1565194077692,
"updated_at": 1565194077693,
"org_unit_type_id": self.jedi_council.id,
"parent_id": None,
"longitude": 0,
"accuracy": 0,
"altitude": 0,
"time": 0,
"name": name,
}
]
response = self.client.post(
"/api/orgunits/?app_id=stars.empire.agriculture.hydroponics", data=unit_body, format="json"
)
self.assertEqual(response.status_code, 200)
self.assertIsNone(m.OrgUnit.objects.filter(uuid=uuid).first())
# The result is that the org unit is not created, even though the api sent back a 200
# this is normal: we want the api to accept all creations requests to be able to debug on the server
# and not have data stuck on a mobile phone.
# An APIImport record with has_problem set to True should be created
self.assertAPIImport(
"orgUnit",
request_body=unit_body,
has_problems=True,
exception_contains_string="Could not find project for user",
)
| 40.020325 | 159 | 0.622854 |
11f08a8bd257b57737ab450a04da370a5b819540 | 302 | py | Python | core/shortname.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | core/shortname.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | core/shortname.py | huioo/tornadoWeb | 001efbae9815b30d8a0c0b4ba8819cc711b99dc4 | [
"Apache-2.0"
] | null | null | null | import world
import api.captcha.captcha_phone
from api.token.jwt_token import JWTToken
"""
Django 的 shortcuts.py
"""
world_instance = world.World.instance()
redis_server = world_instance.redis
captcha_manager = api.captcha.captcha_phone.CaptchaPhone(redis_server)
jwt_cli = JWTToken()
import django.db | 25.166667 | 70 | 0.817881 |
11f229b9297d3ad1a65bef9c394df841a9ccc992 | 6,552 | py | Python | interpro.py | TAMU-CPT/blast-db-download | 53261f08d1f9193c4f538fa90983a465502190a9 | [
"BSD-3-Clause"
] | null | null | null | interpro.py | TAMU-CPT/blast-db-download | 53261f08d1f9193c4f538fa90983a465502190a9 | [
"BSD-3-Clause"
] | 3 | 2017-09-15T18:58:21.000Z | 2020-03-24T19:11:16.000Z | interpro.py | TAMU-CPT/blast-db-download | 53261f08d1f9193c4f538fa90983a465502190a9 | [
"BSD-3-Clause"
] | null | null | null | #!/usr/bin/env python
import os
import sys
import time
import datetime
import logging
import subprocess
logging.basicConfig(level=logging.INFO)
log = logging.getLogger('dl')
NOW = datetime.datetime.now()
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
DOWNLOAD_ROOT = os.getcwd()
VERSION = '5.22-61.0'
PANTHER_VERSION = '11.1'
class Timer:
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.interval = self.end - self.start
class XUnitReportBuilder(object):
XUNIT_TPL = """<?xml version="1.0" encoding="UTF-8"?>
<testsuite name="{suite_name}" tests="{total}" errors="{errors}" failures="{failures}" skip="{skips}">
{test_cases}
</testsuite>
"""
TESTCASE_TPL = """ <testcase classname="{classname}" name="{name}" {time}>
{error}
</testcase>"""
ERROR_TPL = """ <error type="{test_name}" message="{errorMessage}">{errorDetails}
</error>"""
def __init__(self, suite_name):
self.xunit_data = {
'total': 0, 'errors': 0, 'failures': 0, 'skips': 0
}
self.test_cases = []
self.suite_name = suite_name
def ok(self, classname, test_name, time=0):
log.info("OK: [%s] %s", classname, test_name)
self.xunit_data['total'] += 1
self.__add_test(test_name, classname, errors="", time=time)
def error(self, classname, test_name, errorMessage, errorDetails="", time=0):
log.info("ERROR: [%s] %s", classname, test_name)
self.xunit_data['total'] += 1
self.__add_test(test_name, classname, errors=self.ERROR_TPL.format(
errorMessage=errorMessage, errorDetails=errorDetails, test_name=test_name), time=time)
def failure(self, classname, test_name, errorMessage, errorDetails="", time=0):
log.info("FAIL: [%s] %s", classname, test_name)
self.xunit_data['total'] += 1
self.__add_test(test_name, classname, errors=self.ERROR_TPL.format(
errorMessage=errorMessage, errorDetails=errorDetails, test_name=test_name), time=time)
def skip(self, classname, test_name, time=0):
log.info("SKIP: [%s] %s", classname, test_name)
self.xunit_data['skips'] += 1
self.xunit_data['total'] += 1
self.__add_test(test_name, classname, errors=" <skipped />", time=time)
def __add_test(self, name, classname, errors, time=0):
t = 'time="%s"' % time
self.test_cases.append(
self.TESTCASE_TPL.format(name=name, error=errors, classname=classname, time=t))
def serialize(self):
self.xunit_data['test_cases'] = '\n'.join(self.test_cases)
self.xunit_data['suite_name'] = self.suite_name
return self.XUNIT_TPL.format(**self.xunit_data)
xunit = XUnitReportBuilder('interpro_installer')
def timedCommand(classname, testname, errormessage, test_file, command, shell=False, cwd=None):
if os.path.exists(test_file):
xunit.skip(classname, testname)
else:
try:
if not cwd:
cwd = DOWNLOAD_ROOT
with Timer() as t:
# If it's a shell command we automatically join things
# to make our timedCommand calls completely uniform
log.info('cd %s && ' % cwd + ' '.join(command))
if shell:
command = ' '.join(command)
subprocess.check_call(command, shell=shell, cwd=cwd)
xunit.ok(classname, testname, time=t.interval)
except subprocess.CalledProcessError as cpe:
xunit.failure(classname, testname, errormessage, errorDetails=str(cpe), time=t.interval)
raise Exception("Cannot continute")
def interpro():
classname = 'interpro'
extracted_dir = os.path.join(DOWNLOAD_ROOT, 'interproscan-' + VERSION)
data_dir = os.path.join(extracted_dir, 'data')
tarball = 'interproscan-%s-64-bit.tar.gz' % VERSION
panther_tarball = 'panther-data-%s.tar.gz' % PANTHER_VERSION
panther_tarball_md5 = panther_tarball + '.md5'
base_data_url = 'ftp://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/data/'
# wget
md5sum = tarball + '.md5'
base_url = 'ftp://ftp.ebi.ac.uk/pub/software/unix/iprscan/5/%s/' % VERSION
timedCommand(classname, 'download.tarball', 'Download failed', tarball, [
'wget',
base_url + tarball,
'-O', tarball,
])
timedCommand(classname, 'download.md5sum', 'Download failed', md5sum, [
'wget',
base_url + md5sum,
'-O', md5sum,
])
timedCommand(classname, 'contents.verify', 'MD5SUM failed to validate', os.path.join(extracted_dir, 'interproscan.sh'), [
'md5sum', '-c', md5sum
])
timedCommand(classname, 'contents.extract', 'Failed to extract', os.path.join(extracted_dir, 'interproscan.sh'), [
'tar', 'xvfz', tarball
])
timedCommand(classname, 'setup.phobius', 'Failed to install phobius', os.path.join(extracted_dir, 'bin', 'phobius', '1.01', 'phobius.pl'), [
'tar', 'xvfz', os.path.join(os.path.pardir, 'phobius.tgz')
], cwd=extracted_dir)
timedCommand(classname, 'setup.signalp', 'Failed to install signalp', os.path.join(extracted_dir, 'bin', 'signalp', '4.1', 'signalp'), [
'tar', 'xvfz', os.path.join(os.path.pardir, 'signalp.tgz')
], cwd=extracted_dir)
timedCommand(classname, 'panther.download_tarball', 'Download failed', os.path.join(extracted_dir, 'data', panther_tarball), [
'wget',
base_data_url + panther_tarball,
'-O', os.path.join(extracted_dir, 'data', panther_tarball),
])
timedCommand(classname, 'panther.download_md5sum', 'Download failed', os.path.join(extracted_dir, 'data', panther_tarball_md5), [
'wget',
base_data_url + panther_tarball_md5,
'-O', os.path.join(extracted_dir, 'data', panther_tarball_md5),
])
timedCommand(classname, 'panther.verify', 'MD5SUM failed to validate', os.path.join(extracted_dir, 'data', 'panther'), [
'md5sum', '-c', panther_tarball_md5
], cwd=data_dir)
timedCommand(classname, 'panther.extract', 'Failed to extract', os.path.join(extracted_dir, 'data', 'panther'), [
'tar', 'xvfz', panther_tarball
], cwd=data_dir)
if __name__ == '__main__':
try:
interpro()
except Exception:
pass
finally:
# Write out the report
with open(sys.argv[1], 'w') as handle:
handle.write(xunit.serialize())
| 36.808989 | 144 | 0.6308 |
11f2ee6d545351fbf6460813569b0d154e97b751 | 2,572 | py | Python | modules/stat/agd_stat.py | epfl-dcsl/persona-orig | d94a8b60f07622bb61736127ff328329c7b131a9 | [
"Apache-2.0"
] | null | null | null | modules/stat/agd_stat.py | epfl-dcsl/persona-orig | d94a8b60f07622bb61736127ff328329c7b131a9 | [
"Apache-2.0"
] | null | null | null | modules/stat/agd_stat.py | epfl-dcsl/persona-orig | d94a8b60f07622bb61736127ff328329c7b131a9 | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 École Polytechnique Fédérale de Lausanne. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from tensorflow.python.ops import data_flow_ops, string_ops
from ..common.service import Service
from common.parse import numeric_min_checker, path_exists_checker
import tensorflow as tf
persona_ops = tf.contrib.persona.persona_ops()
from tensorflow.contrib.persona import queues, pipeline
class StatService(Service):
""" A class representing a service module in Persona """
#default inputs
def get_shortname(self):
return "stat"
def output_dtypes(self):
return []
def output_shapes(self):
return []
def extract_run_args(self, args):
dataset = args.dataset
paths = [ a["path"] for a in dataset["records"] ]
return (a for a in paths)
def add_graph_args(self, parser):
pass
def make_graph(self, in_queue, args):
""" Make the graph for this service. Returns two
things: a list of tensors which the runtime will
evaluate, and a list of run-once ops"""
# make the graph
dataset = args.dataset
total_reads = 0
chunk_size = dataset['records'][0]['last'] - dataset['records'][0]['first']
for chunk in dataset['records']:
total_reads += chunk['last'] - chunk['first']
print("Total Reads: {}".format(total_reads))
print("AGD Chunk size: {}".format(chunk_size))
print("Columns present: {}".format(dataset['columns']))
if 'sort' in dataset:
print("Sort order: {}".format(dataset['sort']))
else:
print("Sort order unspecified")
if 'results' in dataset['columns']:
print("Aligned to reference: {}".format(dataset['reference']))
run_once = [tf.constant(0)]
return [], run_once
| 35.232877 | 83 | 0.651633 |
11f3026c5b723ebaca4c3ade5e133a02d8fccef0 | 6,423 | py | Python | Developing.../main01.py | MuhikaThomas/Pro-forma | da97d9a6581f4dfbd06fe4a0db1128ebb7472d81 | [
"MIT"
] | null | null | null | Developing.../main01.py | MuhikaThomas/Pro-forma | da97d9a6581f4dfbd06fe4a0db1128ebb7472d81 | [
"MIT"
] | null | null | null | Developing.../main01.py | MuhikaThomas/Pro-forma | da97d9a6581f4dfbd06fe4a0db1128ebb7472d81 | [
"MIT"
] | null | null | null | import kivy
from kivy.app import App
from kivy.uix.tabbedpanel import TabbedPanelHeader
from kivy.uix.tabbedpanel import TabbedPanel
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.gridlayout import GridLayout
from kivy.uix.textinput import TextInput
from kivy.uix.slider import Slider
from kivy.uix.button import Button
from kivy.uix.label import Label
from kivy.lang import Builder
Builder.load_string("""
""")
class Proforma(App):
def build(self):
#*******ROOTWIDGET*******
layout = GridLayout(rows=2)
#*******SUB-WIDGETS*******
layoutTop = GridLayout(cols=3,rows=1)#SUB-WIDGET-1
layoutTop.size_hint = (1, 0.1)
layoutMid = GridLayout(cols=1, size_hint_x=1)#SUB-WIDGET-2
#*******CONTENT-OF-SUB-WIDGET-1*******
backbtn = Button()
title = Label(text = 'Pro-Forma App', font_size = '20sp', pos = (0,300), size_hint_y = None,size_hint_x=None, width=200, halign ='right', valign='middle')
title.size_hint = (None, 0.1)
dropbtn = Button()
#*******CONTENT-OF-SUB-WIDGET-2*******
tp_panel = TabbedPanel()
tp_panel.default_tab_text = "Login Tab"
#*******TAB1*******
th_tab1 = TabbedPanelHeader(text = 'Pro-Forma')
#*******MAIN-LAYOUT-FOR-TAB1*******
mainlayout = GridLayout(cols=1, spacing=10)
#*******LAYOUT-FOR-PROPERTY-INFORMATION*******
layouttab1 = GridLayout(cols=2,rows=6, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40)
#*******LAYOUT-FOR-UNIT-MIX*******
layoutmix = GridLayout(cols=4, pos_hint ={'center_x': .5, 'center_y': .5},row_force_default=True, row_default_height=40)
#*******LAYOUT-FOR-EXPENSES*******
layoutexpense = GridLayout(cols=2)
#*******LAYOUT-FOR-ACCOUNTS*******
#*******CONTENT1*******
mainlayout.add_widget(Label(text='Property Information',size_hint_y=None, height=50))
#*******CONTENT2*******
layouttab1.add_widget(Label(text= 'Property Name', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Property Address', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Town/City', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Asking Price', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Total Units', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
layouttab1.add_widget(Label(text= 'Square Footage', size_hint_x=None, width=200,size_hint_y=None, height=50, font_size='20sp'))
layouttab1.add_widget(TextInput(text='input', font_size=15, halign ='left', valign='middle'))
mainlayout.add_widget(layouttab1)
#*******CONTENT3*******
mainlayout.add_widget(Label(text='Unit Mix',size_hint_x=None, width=200, size_hint_y=None, height=50))
#*******CONTENT4*******
layoutmix.add_widget(Label(text='# of Units'))
layoutmix.add_widget(Label(text='Unit Type'))
layoutmix.add_widget(Label(text='SquareFeet'))
layoutmix.add_widget(Label(text='Monthly Rent'))
layoutmix.add_widget(TextInput(text='Input', font_size=15))
layoutmix.add_widget(TextInput(text='Input', font_size=15))
layoutmix.add_widget(TextInput(text='Input', font_size=15))
layoutmix.add_widget(TextInput(text='Input', font_size=15))
mainlayout.add_widget(layoutmix)
#*******CONTENT5*******
mainlayout.add_widget(Label(text='Expenses',size_hint_x=None, width=200, size_hint_y=None, height=50))
#*******CONTENT6*******
layoutexpense.add_widget(Label(text='Accounting'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Advertising'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Bank Charges'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Electricity'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Gas'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Security'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='All insurance'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Permits and fees'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Maintenance'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='Trash Pick-up'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
layoutexpense.add_widget(Label(text='All other'))
layoutexpense.add_widget(TextInput(text='Input', font_size=15))
mainlayout.add_widget(layoutexpense)
#*******CONTENT7*******
mainlayout.add_widget(Label(text='Accounts'))
#*******CONTENT7*******
#*******SCOLLABILITY*******
#*******CALLING-MAINLAYOUT-IN-TAB1*******
th_tab1.content = mainlayout
#___*******TAB2*******___#
th_tab2 = TabbedPanelHeader(text = 'Info. Tab')
#___*******TAB3*******___#
th_tab3 = TabbedPanelHeader(text = 'Due Deligence')
#___*******TAB4*******___#
th_tab4 = TabbedPanelHeader(text = 'Saved Reports')
#*******CALLING-TABS-TO-tp_panel*******
tp_panel.add_widget(th_tab1)
tp_panel.add_widget(th_tab2)
tp_panel.add_widget(th_tab3)
tp_panel.add_widget(th_tab4)
#*******ADDING-CONTENTS-OF-SUB-WIDGETS*******
layoutTop.add_widget(backbtn)
layoutTop.add_widget(title)
layoutTop.add_widget(dropbtn)
layoutMid.add_widget(tp_panel)
#*******ADDING-CONTENTS-OF-ROOT-WIDGET*******
layout.add_widget(layoutTop)
layout.add_widget(layoutMid)
#*******CALLING-THE-ROOT-WIDGET*******
return layout
if __name__ == '__main__':
Proforma().run()
| 47.932836 | 156 | 0.717422 |
11f30bdb0ea58245a57190b0de64ce5ae30b036d | 1,943 | py | Python | day8/day8.py | jwhitex/AdventOfCode2018 | e552185f7d6413ccdad824911c66a6590e8de9bb | [
"MIT"
] | null | null | null | day8/day8.py | jwhitex/AdventOfCode2018 | e552185f7d6413ccdad824911c66a6590e8de9bb | [
"MIT"
] | null | null | null | day8/day8.py | jwhitex/AdventOfCode2018 | e552185f7d6413ccdad824911c66a6590e8de9bb | [
"MIT"
] | null | null | null | import itertools
from io import StringIO
from queue import LifoQueue
inputs = "2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2"
#data = [int(v) for v in StringIO(inputs).read().split(' ')]
data = [int(v) for v in open("day8.input").read().split(' ')]
def parse_packet(idata, lifoq_children, tc_metadata):
if not lifoq_children.empty():
c_childn_level = q.get()
else: c_childn_level = 1
for i in range(0, c_childn_level):
c_childn = next(idata, None)
if c_childn is None: break
# know iter not empty
c_metad = next(idata) # know has value
if c_childn > 0:
lifoq_children.put(c_childn)
tc_metadata += parse_packet(idata, lifoq_children, 0)
for i in range(0, c_metad):
md = next(idata)
tc_metadata += md
return tc_metadata
# idata = iter(data)
# q = LifoQueue()
# tc_metadata = parse_packet(idata, q, 0)
# print(tc_metadata)
# pt2
def parse_packet_pt2(idata, lifoq_children):
level_values = []
if not lifoq_children.empty():
c_childn_level = q.get()
else: c_childn_level = 1
for i in range(0, c_childn_level):
child_values = []
tc_metadata = 0
c_childn = next(idata, None)
if c_childn is None: break
# know iter not empty
c_metad = next(idata)
if c_childn > 0:
lifoq_children.put(c_childn)
# list of values from children
child_values = parse_packet_pt2(idata, lifoq_children)
for i in range(0, c_metad):
md = next(idata)
if c_childn == 0:
tc_metadata += md
else:
if md == 0: continue
if len(child_values) >= md:
tc_metadata += child_values[md-1]
level_values += [tc_metadata]
return level_values
idata = iter(data)
q = LifoQueue()
tc_metadata = parse_packet_pt2(idata, q)[0]
print(tc_metadata)
| 30.359375 | 66 | 0.5965 |
11f3952caf0eac585e166a957bfe31975eafdc39 | 2,971 | py | Python | dataset_utils/roi.py | kocurvik/retinanet_traffic_3D | 592ceac767750c65bb3d6678b36e6880a7bb0403 | [
"Apache-2.0"
] | 12 | 2021-04-06T00:50:41.000Z | 2022-03-23T03:27:02.000Z | dataset_utils/roi.py | kocurvik/retinanet_traffic_3D | 592ceac767750c65bb3d6678b36e6880a7bb0403 | [
"Apache-2.0"
] | 7 | 2021-07-13T12:47:41.000Z | 2022-03-05T15:08:51.000Z | dataset_utils/roi.py | kocurvik/retinanet_traffic_3D | 592ceac767750c65bb3d6678b36e6880a7bb0403 | [
"Apache-2.0"
] | 4 | 2021-07-15T12:22:06.000Z | 2022-03-01T03:12:36.000Z | import json
import os
import cv2
import numpy as np
from dataset_utils.geometry import computeCameraCalibration
def line_to_point(p1, p2, p3):
return np.abs(np.cross(p2 - p1, p3 - p1, axis=2) / np.linalg.norm(p2 - p1, axis=2))
def get_pts(vid_dir, json_path):
video_path = os.path.join(vid_dir, 'video.avi')
mask_path = os.path.join(vid_dir, 'video_mask.png')
with open(json_path, 'r+') as file:
# with open(os.path.join(os.path.dirname(json_path), 'system_retinanet_first.json'), 'r+') as file:
structure = json.load(file)
camera_calibration = structure['camera_calibration']
vp0, vp1, vp2, _, _, _ = computeCameraCalibration(camera_calibration["vp1"], camera_calibration["vp2"],
camera_calibration["pp"])
vp0 = vp0[:-1] / vp0[-1]
vp1 = vp1[:-1] / vp1[-1]
vp2 = vp2[:-1] / vp2[-1]
cap = cv2.VideoCapture(video_path)
ret, frame = cap.read()
frame = cv2.resize(frame, (640, 360))
vp0 = vp0 / 3
prvs = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
hsv = np.zeros_like(frame)
accumulator = np.zeros([360, 640])
hsv[..., 1] = 255
y, x = np.mgrid[0:360, 0:640]
yx = np.stack([x, y], axis=2)
cnt = 0
while (cap.isOpened(), cnt < 10000):
ret, frame2 = cap.read()
frame2 = cv2.resize(frame2, (640, 360))
next = cv2.cvtColor(frame2, cv2.COLOR_BGR2GRAY)
flow = cv2.calcOpticalFlowFarneback(prvs, next, None, 0.5, 3, 15, 3, 7, 1.5, 0)
d = line_to_point(yx, yx + flow, vp0)
# for y in range(360):
# for x in range(640):
# p1 = np.array([x,y])
# d[y,x] = line_to_point(p1, p1 + flow[y,x], vp0)
accepted = np.zeros_like(d)
accepted[d < 3] = 1
n = np.linalg.norm(flow, axis=2)
accepted[n < 1] = 0
accepted[flow[:, :, 1] < 0] = 0
accumulator = accumulator + accepted
# mag, ang = cv2.cartToPolar(flow[..., 0], flow[..., 1])
# hsv[..., 0] = ang * 180 / np.pi / 2
# hsv[..., 2] = cv2.normalize(mag, None, 0, 255, cv2.NORM_MINMAX)
# bgr = cv2.cvtColor(hsv, cv2.COLOR_HSV2BGR)
# cv2.imshow('frame2', bgr)
cv2.imshow('accepted', accepted)
cv2.imshow('frame', frame2)
final = np.zeros_like(accumulator)
final[accumulator > 0.01 * np.max(accumulator)] = 1
cv2.imshow('accumulator', accumulator / np.max(accumulator))
cv2.imshow('norm', n / np.max(n))
cv2.imshow('final', final)
k = cv2.waitKey(30) & 0xff
if k == 27:
break
prvs = next
cv2.destroyAllWindows()
cap.release()
if __name__ == '__main__':
vid_dir = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/dataset/session5_left'
result_path = 'D:/Skola/PhD/data/2016-ITS-BrnoCompSpeed/results/session5_left/system_SochorCVIU_Edgelets_BBScale_Reg.json'
get_pts(vid_dir, result_path)
| 32.648352 | 126 | 0.582969 |
11f661d7ecc4156688dc11d7e9f3988ffd85ee03 | 1,292 | py | Python | src/ansible_remote_checks/modules/check_process.py | davidvoit/ansible_remote_checks | 491f31855c96297e5466b238e648fa57c1e646d0 | [
"MIT"
] | null | null | null | src/ansible_remote_checks/modules/check_process.py | davidvoit/ansible_remote_checks | 491f31855c96297e5466b238e648fa57c1e646d0 | [
"MIT"
] | null | null | null | src/ansible_remote_checks/modules/check_process.py | davidvoit/ansible_remote_checks | 491f31855c96297e5466b238e648fa57c1e646d0 | [
"MIT"
] | 1 | 2019-08-20T13:19:16.000Z | 2019-08-20T13:19:16.000Z | #!/usr/bin/python2
import re
import subprocess
from ansible.module_utils.basic import AnsibleModule
def get_procs(process_regex, cmdline_regex):
cmd=["ps","-hax","-o","comm pid args"]
process = subprocess.Popen(cmd, stdout=subprocess.PIPE)
output, error = process.communicate()
lines = output.splitlines()
processes = []
# Python3 reads the output as byte and needs decoding
try:
output = output.decode()
except (UnicodeDecodeError, AttributeError):
pass
for line in lines:
process = line.split()[0]
cmdline = ' '.join(line.split()[2:])
if (not process_regex or re.findall(process_regex, process)) and (not cmdline_regex or re.findall(cmdline_regex, cmdline)):
processes.append(line)
return {
"processes": processes
}
def main():
module_args= dict(
process = dict(type='str', required=True),
cmdline = dict(type='str')
)
module = AnsibleModule(
argument_spec=module_args,
supports_check_mode=True
)
process_regex = module.params['process']
cmdline_regex = module.params['cmdline']
procs = get_procs(process_regex, cmdline_regex)
try:
result = dict(procs = procs)
except Exception as ex:
module.fail_json(msg=str(ex))
module.exit_json(**result)
if __name__ == '__main__':
main()
| 23.490909 | 127 | 0.69195 |
11f7ea214def9b4195dd57f26ec40b4d4be26bb2 | 972 | py | Python | RESSPyLab/modified_cholesky.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 7 | 2019-10-15T09:16:41.000Z | 2021-09-24T11:28:45.000Z | RESSPyLab/modified_cholesky.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 3 | 2020-10-22T14:27:22.000Z | 2021-11-15T17:46:49.000Z | RESSPyLab/modified_cholesky.py | ioannis-vm/RESSPyLab | 306fc24d5f8ece8f2f2de274b56b80ba2019f605 | [
"MIT"
] | 6 | 2019-07-22T05:47:10.000Z | 2021-10-24T02:06:26.000Z | """@package modified_cholesky
Function to perform the modified Cholesky decomposition.
"""
import numpy as np
import numpy.linalg as la
def modified_cholesky(a):
""" Returns the matrix A if A is positive definite, or returns a modified A that is positive definite.
:param np.array a: (n, n) The symmetric matrix, A.
:return list: [np.array (n, n), float] Positive definite matrix, and the factor required to do so.
See Bierlaire (2015) Alg. 11.7, pg. 278.
"""
iteration = 0
maximum_iterations = 10
identity = np.identity(len(a))
a_mod = a * 1.0
identity_factor = 0.
successful = False
while not successful and iteration < maximum_iterations:
try:
la.cholesky(a_mod)
successful = True
except la.LinAlgError:
identity_factor = np.max([2 * identity_factor, 0.5 * la.norm(a, 'fro')])
a_mod = a + identity_factor * identity
return [a_mod, identity_factor]
| 31.354839 | 106 | 0.653292 |
11f88b21b7c293777ac5db6fccf25f3653b3095f | 1,528 | py | Python | docker_parser.py | hodolizer/HB_LittleBot | 4750c7c8e5bda22fcd5f48ea9248d919b7ca7fb2 | [
"MIT"
] | null | null | null | docker_parser.py | hodolizer/HB_LittleBot | 4750c7c8e5bda22fcd5f48ea9248d919b7ca7fb2 | [
"MIT"
] | null | null | null | docker_parser.py | hodolizer/HB_LittleBot | 4750c7c8e5bda22fcd5f48ea9248d919b7ca7fb2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Python Slack Bot docker parser class for use with the HB Bot
"""
import os
import re
DOCKER_SUPPORTED = ["image", "container", "help"]
SUBCOMMAND_SUPPORTED = ["ls",]
def docker_usage_message():
return ("I'm sorry. I don't understand your docker command."
"I understand docker [%s] if you would like to try one of those." % "|".join(DOCKER_SUPPORTED))
def parse_command(incoming_text):
"""
incoming_text: A text string to parse for docker commands
returns: a fully validated docker command
"""
docker_action = ''
parse1 = re.compile(r"(?<=\bdocker\s)(\w+)")
match_obj = parse1.search(incoming_text)
if match_obj:
docker_action = match_obj.group()
print("Got docker action %s" % (docker_action,))
if docker_action and docker_action in DOCKER_SUPPORTED:
# Use this type of code if we want to limit the docker commands
#parse2 = re.compile(r"(?<=\b%s\s)(\w+)" % docker_action)
#match_obj = parse2.search(incoming_text)
#if match_obj:
# docker_subcommand = match_obj.group()
# if docker_subcommand in SUBCOMMAND_SUPPORTED:
# return "docker %s %s" % (docker_action, docker_subcommand)
# Otherwise let it fly and return help if it pumps mud.
print "returning docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return "docker %s%s" % (docker_action, incoming_text[match_obj.end():])
return docker_usage_message()
| 35.534884 | 102 | 0.650524 |
11f9627891295b2fef341d114f820b8acfae0f4d | 1,713 | py | Python | estudo/bingo/bingo.py | PedroMoreira87/python | 7f8ed2d17ba12a8089618477b2738e3b1c809e74 | [
"MIT"
] | null | null | null | estudo/bingo/bingo.py | PedroMoreira87/python | 7f8ed2d17ba12a8089618477b2738e3b1c809e74 | [
"MIT"
] | null | null | null | estudo/bingo/bingo.py | PedroMoreira87/python | 7f8ed2d17ba12a8089618477b2738e3b1c809e74 | [
"MIT"
] | null | null | null | # Entregar arquivo com o código da função teste_cartela
#
# Verificador de cartela de bingo
#
# CRIAR UMA FUNÇÃO DO TIPO:
#
# def teste_cartela(numeros_bilhete,numeros_sorteados): #numeros_bilhete e numeros_sorteados tipo lista com valores inteiros
#
# ...
#
# return([bingo,n_acertos,p_acertos,[numeros_acertados],[numeros_faltantes]]) #retorno tipo lista
#
# ps: a função deve suportar qualquer tamanho das listas
#
# exemplo1:
#
# bilhete=[1,2,3,4,6]
#
# sorteados=[1,2,3,4,5,6,7,8,9,10]
#
# x=teste_cartela(bilhete,sorteados)
#
# print(x)
#
# [true,5,100.0,[1,2,3,4,6],[]]
#
# print(x[1])
#
# 5
#
# exemplo2:
# bilhete=[1,4,7,13,20,22]
#
# sorteados=[11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30]
#
# x=teste_cartela(bilhete,sorteados)
#
# print(x)
#
# [False,3,50.0,[13,20,22],[1,4,7]]
#
# print(x[3])
#
# [13,20,22]
bilhete1 = [1, 2, 3, 4, 6]
sorteados1 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
bilhete2 = [1, 4, 7, 13, 20, 22]
sorteados2 = [11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30]
def teste_cartela(numeros_bilhete, numeros_sorteados):
bingo = False
n_acertos = 0
numeros_acertados = []
for element in numeros_sorteados: # outro modo de fazer list(set(sorteados).intersection(bilhete))
if element in numeros_bilhete:
numeros_acertados.append(element)
n_acertos += 1
numeros_faltantes = list(set(numeros_bilhete) - set(numeros_sorteados))
if numeros_bilhete == numeros_acertados:
bingo = True
p_acertos = len(numeros_acertados) * 100 / len(numeros_bilhete)
return [bingo, n_acertos, p_acertos, numeros_acertados, numeros_faltantes]
print(teste_cartela(bilhete1, sorteados1))
| 23.148649 | 124 | 0.669002 |
11fbeaa0cdadcae10084a5b3b7d7792a3d86cf42 | 103 | py | Python | scripts/pymarkovchain_dynamic/__init__.py | jfahrg/augentbot | 2f26f9287928bb405696366c60f1193b6f34ab4a | [
"CC-BY-2.0",
"MIT"
] | 3 | 2017-10-16T14:05:47.000Z | 2017-10-23T07:18:46.000Z | scripts/pymarkovchain_dynamic/__init__.py | jfahrg/augentbot | 2f26f9287928bb405696366c60f1193b6f34ab4a | [
"CC-BY-2.0",
"MIT"
] | 1 | 2017-10-24T18:11:14.000Z | 2017-10-24T18:11:14.000Z | scripts/pymarkovchain_dynamic/__init__.py | jfde/augentbot | 2f26f9287928bb405696366c60f1193b6f34ab4a | [
"CC-BY-2.0",
"MIT"
] | null | null | null | from pymarkovchain_dynamic.MarkovChain import *
from pymarkovchain_dynamic.DynamicMarkovChain import *
| 34.333333 | 54 | 0.883495 |
11fc76302eb18d7762bad32d8a7fb8d4acc13c44 | 3,033 | py | Python | word_breakdown.py | imjeffhi4/word-breakdown | 7edf823fbc49ac56a5dc356067938d3828edc014 | [
"MIT"
] | null | null | null | word_breakdown.py | imjeffhi4/word-breakdown | 7edf823fbc49ac56a5dc356067938d3828edc014 | [
"MIT"
] | null | null | null | word_breakdown.py | imjeffhi4/word-breakdown | 7edf823fbc49ac56a5dc356067938d3828edc014 | [
"MIT"
] | null | null | null | from transformers import GPTNeoForCausalLM, GPT2Tokenizer
from fastapi import FastAPI
import re
import json
from pydantic import BaseModel
from typing import Optional
import torch
app = FastAPI()
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
morph_path = './Model'
morph_tokenizer = GPT2Tokenizer.from_pretrained(morph_path)
special_tokens = {'bos_token': '<|startoftext|>', 'pad_token': '<PAD>', 'additional_special_tokens':['<DEF>', '<SYLLABLES>', '<NULL>', '<ETY>', '<MORPH>']}
morph_tokenizer.add_special_tokens(special_tokens)
morph_model = GPTNeoForCausalLM.from_pretrained(morph_path).to(device)
class UserInput(BaseModel):
word: str
definition: Optional[str] = None
# returning WikiMorph output
@app.post('/')
async def main(x: UserInput):
return get_morpheme_output(x.word, x.definition)
def get_etymology(ety_txt):
"""Parses text to return a list of dict containing the etymology compound and definitions"""
etys = re.findall('<ETY>.+?(?=<ETY>|$)', ety_txt)
for ety in etys:
compound = re.findall("<ETY>(.+?)(?=<DEF>|$)", ety)[0].strip()
if "<NULL>" not in compound:
ety_dict = {
"Etymology Compound": re.findall("<ETY>(.+?)(?=<DEF>)", ety)[0].strip(),
"Compound Meaning": re.findall("<DEF>(.+)", ety)[0].strip()
}
yield ety_dict
else:
yield {"Etymology Compound": None, "Compound Meaning": None}
def parse_morphemes(morph_txt):
"""Parses text to return a list of affixes and a definition for each affix"""
morphs = re.findall('<MORPH>.+?(?=<MORPH>|$)', morph_txt)
for morph in morphs:
yield {
"Morpheme": re.findall("<MORPH>(.+?)(?=<DEF>)", morph)[0].strip(),
"Definition": re.findall("<DEF>(.+?)(?=<ETY>)", morph)[0].strip(),
"Etymology Compounds": list(get_etymology(re.findall("(<ETY>.+?)$", morph)[0].strip()))
}
def to_dict(generated_txt):
"""Returns a dictionary containing desired items"""
return {
"Word": re.findall('<\|startoftext\|> (.+?)(?= \w )', generated_txt)[0].strip().replace(' ', ''),
"Definition": re.findall("<DEF>(.+?)(?=<SYLLABLES>)", generated_txt)[0].strip(),
"Syllables": re.findall("<SYLLABLES> (.+?)(?=<MORPH>)", generated_txt)[0].strip().split(),
"Morphemes": list(parse_morphemes(re.findall("(<MORPH>.+?)(?=<\|endoftext\|>)", generated_txt)[0].strip()))
}
def get_morpheme_output(word, definition):
"""Calls the GPT-based model to generated morphemes"""
split_word = ' '.join(word)
if definition:
word_def = f'<|startoftext|> {word} {split_word} <DEF> {definition} <SYLLABLES>'
else:
word_def = f'<|startoftext|> {word} {split_word} <DEF> '
tokenized_string = morph_tokenizer.encode(word_def, return_tensors='pt').to(device)
output = morph_model.generate(tokenized_string, max_length=400)
generated_txt = morph_tokenizer.decode(output[0])
return to_dict(generated_txt)
| 40.986486 | 155 | 0.636334 |
11fe5c633fd36a2c77c71b22b430bb0c40ce5ec0 | 504 | py | Python | mini_book/_build/jupyter_execute/docs/enrollment.py | rebeccajohnson88/qss20 | f936e77660e551bb10a82abb96a36369ccbf3d18 | [
"CC0-1.0"
] | 1 | 2021-04-01T18:42:36.000Z | 2021-04-01T18:42:36.000Z | mini_book/_build/jupyter_execute/docs/enrollment.py | rebeccajohnson88/qss20 | f936e77660e551bb10a82abb96a36369ccbf3d18 | [
"CC0-1.0"
] | 1 | 2021-02-14T22:36:59.000Z | 2021-02-24T23:33:24.000Z | mini_book/_build/jupyter_execute/docs/enrollment.py | rebeccajohnson88/qss20 | f936e77660e551bb10a82abb96a36369ccbf3d18 | [
"CC0-1.0"
] | null | null | null | (enrollment)=
# Enrollment and Waitlist
For Dartmouth students, you can track the enrollment status using the ORC timetable.
The course is capped at 20 students to facilitate small group collaboration for the final data science project. Please fill out this Google form (while logged in via your Dartmouth email) if you are interested in joining the waiting list for spots freeing up! [Waitlist form](https://docs.google.com/forms/d/e/1FAIpQLScxTwR9A8gZ1_uvlEzCtsVFaoQnXmaYQq3kNdfG5Tv3ECUrcA/viewform) | 72 | 376 | 0.821429 |
11ffbc12ee29d6ded59501a82368db14e943d2d0 | 1,683 | py | Python | decode.py | imandr/image_encode | 9828d5dc570fc0feb729b365b13ab50cfdb8c85e | [
"BSD-3-Clause"
] | null | null | null | decode.py | imandr/image_encode | 9828d5dc570fc0feb729b365b13ab50cfdb8c85e | [
"BSD-3-Clause"
] | null | null | null | decode.py | imandr/image_encode | 9828d5dc570fc0feb729b365b13ab50cfdb8c85e | [
"BSD-3-Clause"
] | null | null | null | import sys, getopt
from zlib import adler32
from PIL import Image
from rnd import Generator, sample
Usage = """
python decode.py <password> <input image file> <output file>
"""
def bits(text):
out = []
for c in text:
n = ord(c)
for _ in range(8):
out.append(n&1)
n >>= 1
return out
def frombin(bits):
n = 0
k = 1
for b in bits:
if b:
n |= k
k <<= 1
return n
def read_bits(image, inx):
out = []
pixels = image.load()
for i in inx:
row = i//dimx
col = i%dimx
rgb = pixels[col, row]
r,g,b = rgb
parity = (r^(g^b))&1
out.append(parity)
return out
def bitstotext(bits):
text = []
for i in range(0, len(bits), 8):
cbits = bits[i:i+8]
#print("cbits:", cbits)
n = 0
k = 1
for b in cbits:
if b: n |= k
k <<= 1
#print("cbits:", cbits, " n:", n)
text.append(n)
return bytes(text)
password, image_file, out_file = sys.argv[1:]
password = password.encode("utf-8")
image = Image.open(image_file)
dimx, dimy = image.size
npixels = dimx*dimy
gen = Generator(password)
pixel_list = list(range(npixels))
length_inx = sample(gen, pixel_list, 32)
for inx in length_inx:
pixel_list.remove(inx)
length_bits = read_bits(image, length_inx)
length = frombin(length_bits)
#print ("length:", length_bits, length)
text_inx = sample(gen, pixel_list, length*8)
#print("text_inx:", text_inx[:20])
text_bits = read_bits(image, text_inx)
#print("bits:", text_bits[:20])
open(out_file, "wb").write(bitstotext(text_bits))
| 19.569767 | 60 | 0.565657 |
f504c2cb47e19abd70638d4564e9477e15e1315f | 379 | py | Python | member/views.py | comcidis/comcidis-portal | 40eb6d37874f60eac123a15a03661bd48cecd382 | [
"MIT"
] | null | null | null | member/views.py | comcidis/comcidis-portal | 40eb6d37874f60eac123a15a03661bd48cecd382 | [
"MIT"
] | null | null | null | member/views.py | comcidis/comcidis-portal | 40eb6d37874f60eac123a15a03661bd48cecd382 | [
"MIT"
] | null | null | null | from django.shortcuts import render
from .models import Member
def index(request):
"""List all members
"""
advisors = Member.objects.filter(advisor=True)
members = Member.objects.filter(advisor=False)
context = {'mobile_title_page': 'Equipe',
'advisors': advisors, 'members': members}
return render(request, 'member/index.html', context)
| 29.153846 | 56 | 0.683377 |
f506803cc0725d8f77786e4264a390f804bf912b | 447 | py | Python | ping_pong.py | kpbochenek/codewarz | 20f600623bddd269fb845d06b1826c9e50b49594 | [
"Apache-2.0"
] | null | null | null | ping_pong.py | kpbochenek/codewarz | 20f600623bddd269fb845d06b1826c9e50b49594 | [
"Apache-2.0"
] | null | null | null | ping_pong.py | kpbochenek/codewarz | 20f600623bddd269fb845d06b1826c9e50b49594 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
import sys
import requests
ping = sys.argv[1]
pong = sys.argv[2]
word = sys.argv[3]
if not ping.startswith('http'):
ping = 'http://' + ping
if not pong.startswith('http'):
pong = 'http://' + pong
while True:
r = requests.post(ping, data={'food': word})
answer = r.text
if 'serving' not in answer:
print(answer, end='')
break
word = answer.split()[2]
ping, pong = pong, ping
| 17.88 | 48 | 0.592841 |
f506a97a368ef7e32d2a9750ae1f1a3c19762e70 | 437 | py | Python | fenixstroy/shop/forms.py | wiky-avis/fenixstroy_shop | 9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c | [
"MIT"
] | null | null | null | fenixstroy/shop/forms.py | wiky-avis/fenixstroy_shop | 9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c | [
"MIT"
] | 3 | 2021-09-22T18:44:30.000Z | 2022-03-12T00:58:02.000Z | fenixstroy/shop/forms.py | wiky-avis/fenixstroy_shop | 9e5ed0425e8fc5bcd77b7a0a640484a87c2f888c | [
"MIT"
] | null | null | null | from django import forms
from .models import Comment, Rating, RatingStar
class RatingForm(forms.ModelForm):
star = forms.ModelChoiceField(
queryset=RatingStar.objects.all(),
widget=forms.RadioSelect(),
empty_label=None
)
class Meta:
model = Rating
fields = ('star',)
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['author', 'text']
| 19.863636 | 47 | 0.631579 |
f50709f23a7db10987ca6be48b2058d6a849444a | 527 | py | Python | lumicks/pylake/tests/test_import_time.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 8 | 2019-02-18T07:56:39.000Z | 2022-03-19T01:14:48.000Z | lumicks/pylake/tests/test_import_time.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 42 | 2018-11-30T14:40:35.000Z | 2022-03-29T11:43:45.000Z | lumicks/pylake/tests/test_import_time.py | lumicks/pylake | b5875d156d6416793a371198f3f2590fca2be4cd | [
"Apache-2.0"
] | 4 | 2019-01-09T13:45:53.000Z | 2021-07-06T14:06:52.000Z | from textwrap import dedent
import numpy as np
import subprocess
import sys
import pytest
@pytest.mark.slow
def test_disabling_capturing(report_line):
repeats = 3
code = dedent("""\
import time
tic = time.time()
import lumicks.pylake
print(time.time() - tic)
""")
times = [float(subprocess.check_output([f'{sys.executable}', '-c', code])) for i in range(repeats)]
report_line(f"Module import time: {np.mean(times):.2f} +- {np.std(times):.2f} seconds (N={repeats})")
| 23.954545 | 105 | 0.643264 |
ee921704bb61e5ef659b3c250a5774e67e1fc9fd | 3,433 | py | Python | lib/aquilon/consistency/checks/branch.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 7 | 2015-07-31T05:57:30.000Z | 2021-09-07T15:18:56.000Z | lib/aquilon/consistency/checks/branch.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 115 | 2015-03-03T13:11:46.000Z | 2021-09-20T12:42:24.000Z | lib/aquilon/consistency/checks/branch.py | ned21/aquilon | 6562ea0f224cda33b72a6f7664f48d65f96bd41a | [
"Apache-2.0"
] | 13 | 2015-03-03T11:17:59.000Z | 2021-09-09T09:16:41.000Z | #!/usr/bin/env python
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2013,2014,2017 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from aquilon.consistency.checker import ConsistencyChecker
from aquilon.aqdb.model.branch import Branch
from aquilon.worker.processes import run_git
from aquilon.worker.dbwrappers.branch import merge_into_trash
class BranchChecker(ConsistencyChecker):
"""
Branch Consistency Checker
This module performs validation that is common for all branches (both
domains and sandboxes) in template-king.
"""
def check(self, repair=False):
# Find all of the branches that are listed in the database
db_branches = {}
for branch in self.session.query(Branch):
db_branches[branch.name] = branch
# Find all of the branches that are in the template king, this
# includes both domains and sandbox's
kingdir = self.config.get("broker", "kingdir")
out = run_git(['for-each-ref', '--format=%(refname:short)',
'refs/heads'], path=kingdir, loglevel=logging.DEBUG)
git_branches = set(out.splitlines())
# The trash branch is special
if self.config.has_option("broker", "trash_branch"):
git_branches.remove(self.config.get("broker", "trash_branch"))
# Branches in the database and not in the template-king
for branch in set(db_branches.keys()).difference(git_branches):
self.failure(branch, format(db_branches[branch]),
"found in the database but not in template-king")
# No repair mode. We consider AQDB more canonical than
# template-king, so we should not delete the DB object, and we don't
# have any information how to restore the branch in template-king.
# Branches in the template-king and not in the database
for branch in git_branches.difference(db_branches.keys()):
if repair:
self.logger.info("Deleting branch %s", branch)
merge_msg = []
merge_msg.append("Delete orphaned branch %s" % branch)
merge_msg.append("")
merge_msg.append("The consistency checker found this branch to be ")
merge_msg.append("orphaned.")
if self.config.has_option("broker", "trash_branch"):
merge_into_trash(self.config, self.logger, branch,
"\n".join(merge_msg),
loglevel=logging.DEBUG)
run_git(['branch', '-D', branch], path=kingdir,
loglevel=logging.DEBUG)
else:
self.failure(branch, "Branch %s" % branch,
"found in template-king but not in the database")
| 42.9125 | 84 | 0.637635 |
ee92be80023074621572bda99d5be62e1b63d427 | 1,418 | py | Python | server.py | aoii103/magicworld | cad0df6aa872cd5dcd4142f83ea9fde821652551 | [
"MIT"
] | 7 | 2018-02-05T03:14:08.000Z | 2019-07-28T18:49:41.000Z | server.py | aoii103/magicworld | cad0df6aa872cd5dcd4142f83ea9fde821652551 | [
"MIT"
] | null | null | null | server.py | aoii103/magicworld | cad0df6aa872cd5dcd4142f83ea9fde821652551 | [
"MIT"
] | 3 | 2019-05-21T08:58:32.000Z | 2019-12-26T17:03:07.000Z | import json
import os
from extra import MainStart
import threading
import moment
from jinja2 import Environment, PackageLoader
from sanic import Sanic, response
from sanic.log import logger
from termcolor import colored
from conf import config
from spider import bot
env = Environment(loader=PackageLoader(__name__, './template'))
app = Sanic(__name__)
app.static('static_path',config.static)
@app.route('/')
async def handle_request(request):
return response.text('')
idList = list(os.walk("./img"))[0][1]
logger.info(colored(f'{max(idList)}','red'))
if idList:
return response.redirect(f"/{max(idList)}")
@app.route('/<docid>')
async def handle_request(request, docid):
datapath = f"{config.static}/{docid}/data.json"
logger.info(colored(f'load {datapath}', 'yellow'))
if os.path.exists(datapath):
try:
with open(datapath, "r") as f:
template = env.get_template('index.html')
return response.html(template.render(data=json.loads(f.read())))
except Exception as e:
logger.error(e)
return response.html('',status=404)
def run_bot():
spider = bot()
spider.start()
if __name__ == '__main__':
SPY = threading.Thread(target=MainStart, args=(run_bot, None, config.delay))
SPY.start()
app.run(host=config.host,port=config.port)
| 26.754717 | 81 | 0.648096 |
ee969271d5aeb101a427f273a5ac443c35b1fd94 | 1,891 | py | Python | build/lib/pubsubsql/net/testheader.py | pubsubsql/python | a62f76490222380375d20399dbe3812ff3451815 | [
"Apache-2.0"
] | 1 | 2016-03-17T15:16:16.000Z | 2016-03-17T15:16:16.000Z | src/pubsubsql/net/testheader.py | pubsubsql/pss-py | a62f76490222380375d20399dbe3812ff3451815 | [
"Apache-2.0"
] | null | null | null | src/pubsubsql/net/testheader.py | pubsubsql/pss-py | a62f76490222380375d20399dbe3812ff3451815 | [
"Apache-2.0"
] | 1 | 2015-04-24T10:24:42.000Z | 2015-04-24T10:24:42.000Z | #! /usr/bin/env python
"""
Copyright (C) 2014 CompleteDB LLC.
This program is free software: you can redistribute it and/or modify
it under the terms of the Apache License Version 2.0 http://www.apache.org/licenses.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
"""
import unittest
from header import Header as NetHeader
class TestHeader(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def testGetBytes(self):
header1 = NetHeader(32567, 9875235)
header2 = NetHeader(0, 0)
#
buffer_bytes = header1.getBytes()
header2.getBytes()[:] = buffer_bytes
header2.unpackBuffer()
#
self.assertEqual(header1.getMessageSizeB(),
header2.getMessageSizeB(),
"MessageSize do not match")
#
self.assertEqual(header1.getRequestId(),
header2.getRequestId(),
"RequestId do not match")
def testSetData(self):
header1 = NetHeader()
header2 = NetHeader()
#
header1.setData(32567, 9875235)
buffer_bytes = bytearray(100)
buffer_bytes[:] = header1.getBytes()
#
header2.getBytes()[:] = buffer_bytes
header2.unpackBuffer()
#
self.assertEqual(header1.getMessageSizeB(),
header2.getMessageSizeB(),
"MessageSize do not match")
#
self.assertEqual(header1.getRequestId(),
header2.getRequestId(),
"RequestId do not match")
if __name__ == "__main__":
unittest.main()
| 30.5 | 85 | 0.565838 |
ee97351f4698d9e63dc5fa142d72abe0e05ecfef | 5,189 | py | Python | app/giturl_class/routes.py | KnowledgeCaptureAndDiscovery/somef-web | 220ced6a3cb778dd2ba5e2da7c440a6e33447f67 | [
"Apache-2.0"
] | 1 | 2020-04-12T17:03:37.000Z | 2020-04-12T17:03:37.000Z | app/giturl_class/routes.py | KnowledgeCaptureAndDiscovery/somef-web | 220ced6a3cb778dd2ba5e2da7c440a6e33447f67 | [
"Apache-2.0"
] | null | null | null | app/giturl_class/routes.py | KnowledgeCaptureAndDiscovery/somef-web | 220ced6a3cb778dd2ba5e2da7c440a6e33447f67 | [
"Apache-2.0"
] | 1 | 2021-09-21T20:11:50.000Z | 2021-09-21T20:11:50.000Z |
from flask import render_template, flash, send_from_directory, send_file
from app.giturl_class.url_form import UrlForm
from app.giturl_class.download_form import DownloadButton
from app.giturl_class import bp
import json
import os
USE_TEST_FILE = False
if(os.getenv('SM2KG_TEST_MODE') == 'TRUE'):
USE_TEST_FILE = True
print('SM2KG in Test Mode')
else:
from somef import cli
#from somef import cli
dirname = os.path.dirname(__file__)
#class names in json to be added in the header section. Can either add classes, or rearrange the order of these to change the display
headerClassNames = [
"topics",
"languages",
"license",
"forks_url",
"readme_url"
]
#class names in json to be added to body of metadata section, similar to headerClassNames
bodyClassNames = [
"citation",
"installation",
"invocation",
"description"
]
#this is a defaultHeaderClassBreak value, could change it for different formatting
headerClassBreak = int(len(headerClassNames)/2)
#helper function to display array of data in string format
def arrayToStr(arrayToCon):
returnStr = arrayToCon[0]
for i in range(1, len(arrayToCon)):
returnStr = returnStr + " " + arrayToCon[i]
return returnStr
#checks if string starts with https:
def isURL(urlString):
if(not(type(urlString is str))):
return False
elif(len(urlString) < 6):
return False
print(urlString[0:6])
return urlString[0:6] == "https:"
def getAvgOfArr(numArray):
avg = 0
numOfNums = 0
for i in numArray:
avg += i
numOfNums += 1
return avg/numOfNums
@bp.route('/index', methods = ['GET', 'POST'])
def urlPage():
urlForm = UrlForm()
downloadForm = DownloadButton()
citation_list = []
installation_list = []
invocation_list = []
description_list = []
description_conf_list = None
showDownload = None
git_name = None
git_owner = None
headerClassesToPass = []
bodyClassesToPass = []
if downloadForm.submit_download.data:
output_file = os.path.join(dirname, '../data/output.txt')
return send_file("../data/output.json", as_attachment=True)
#flash("Download")
if urlForm.validate_on_submit() and urlForm.submit.data:
#page only shows json data if showDownload is True. Sets to true if json file generated or test file env var set(would be nice to be able to set test file)
showDownload = True
try:
cli.run_cli(urlForm.giturl.data, .8, 'data/output.json')
except:
print("Error generating json")
flash("There must be a problem with your link")
showDownload = False
inputFile = 'data/output.json'
if(USE_TEST_FILE):
inputFile = 'data/test.json'
showDownload = True
with open(inputFile) as json_file:
data = json.load(json_file)
storedData = data
for i in bodyClassNames:
classData = []
for j in data[i]:
j["confidencAvg"] = getAvgOfArr(j["confidence"])
classData.append(j)
tempDict = {"className" : i,
"metadata" : classData }
bodyClassesToPass.append(tempDict)
print(bodyClassesToPass)
for i in headerClassNames:
#if excerpt is a list, converts it to string for display
if(type(data[i]["excerpt"]) is list):
data[i]["excerpt"] = arrayToStr(data[i]["excerpt"])
#if excerpt is url, makes into link
tempDict = {"className" : i,
"excerpt" : data[i]["excerpt"],
"confidence" : data[i]["confidence"],
"isURL" : isURL(data[i]["excerpt"]) }
headerClassesToPass.append(tempDict)
#two headerClasses that take special formating, could be passed in as special params but eh, ima lazy boy
git_name = data["name"]["excerpt"]
git_owner = data["owner"]["excerpt"]
return render_template('giturl_class/giturl.html',
form = urlForm,
downloadForm = downloadForm,
showDownload = showDownload,
citation = citation_list,
installation = installation_list,
invocation = invocation_list,
description = description_list,
headerClasses = headerClassesToPass,
headerClassBreak = headerClassBreak,
bodyClasses = bodyClassesToPass,
git_name = git_name,
git_owner = git_owner)
@bp.route('/about', methods = ['GET'])
def aboutPage():
return render_template('aboutpage/aboutpage.html')
@bp.route('/help', methods = ['GET'])
def helpPage():
return render_template('helppage/helppage.html')
| 31.448485 | 163 | 0.582386 |
ee98e5cd0e12c0ac4700f16fd1175dbaba124f1a | 681 | py | Python | type.py | pfeak/pymarkdown | e136c361c935785267535734394c579d8c7002f5 | [
"MIT"
] | null | null | null | type.py | pfeak/pymarkdown | e136c361c935785267535734394c579d8c7002f5 | [
"MIT"
] | 1 | 2020-09-17T07:46:58.000Z | 2020-09-17T07:46:58.000Z | type.py | pfeak/pymarkdown | e136c361c935785267535734394c579d8c7002f5 | [
"MIT"
] | null | null | null | import platform
from enum import Enum, unique
@unique
class Type(Enum):
"""Markdown type
"""
# Separation
SEP = '\n'
# Place holder
NULL = ""
SPACE = " "
# Markdown single symbol
H1 = "#"
H2 = "##"
H3 = "###"
H4 = "####"
H5 = "#####"
H6 = "######"
QUOTE = ">"
UNORDERED_LIST = "+"
# Markdown double symbol
BOLD = "**"
STRIKETHROUGH = "~~"
# Special symbol
SPLIT_LINE = "---"
IMAGE = ''
URL = '[REPLACE_TITLE](REPLACE_URL)'
ORDERED_LIST = "REPLACE_NUMBER."
CODE = '```REPLACE_CODE'
# Table
TABLE = ":-:"
TABLE_SPLIT = "|"
| 17.025 | 55 | 0.50514 |
ee9a90e09df8676533abaa0b7de5176954a8137e | 3,542 | py | Python | server/server/apps/course/views.py | tjsga/study-bank | f4cb17bc642d2fd28affde89d2af6a8ecd2286f2 | [
"MIT"
] | null | null | null | server/server/apps/course/views.py | tjsga/study-bank | f4cb17bc642d2fd28affde89d2af6a8ecd2286f2 | [
"MIT"
] | null | null | null | server/server/apps/course/views.py | tjsga/study-bank | f4cb17bc642d2fd28affde89d2af6a8ecd2286f2 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.core.exceptions import PermissionDenied
from django.http import Http404
from .models import Course
from ..mod.models import Moderator
from ..files.models import File
from ..decorators import login
# Create your views here.
@login
def index(request):
courses = Course.objects.all()
return render(request, 'class/index.html', {'classes': courses})
@login
def show(request, course_url):
course = get_object_or_404(Course, url=course_url)
is_mod = False
try:
mod = Moderator.objects.get(username=request.session['user'])
except Moderator.DoesNotExist:
is_mod = False
return render(request, 'class/show.html', {'course': course, 'is_mod': is_mod})
if mod.admin:
is_mod = True
elif course in mod.classes.all():
is_mod = True
return render(request, 'class/show.html', {'course': course, 'is_mod': is_mod})
@login
def approve(request, course_url, doc_id):
course = get_object_or_404(Course, url=course_url)
try:
mod = Moderator.objects.get(username=request.session['user'])
except Moderator.DoesNotExist:
raise PermissionDenied
if mod.admin or (course in mod.classes.all()):
try:
doc = course.unapproved_files.get(id=doc_id)
except File.DoesNotExist:
try:
doc = course.files.get(id=doc_id)
except File.DoesNotExist:
raise Http404("Error: Document Not Related to this Course")
raise Http404("Error: Document Already Approved")
course.unapproved_files.remove(doc)
course.files.add(doc)
return render(request, 'class/approve.html', {'doc': doc, 'course': course})
else:
raise PermissionDenied
@login
def remove(request, course_url, doc_id):
course = get_object_or_404(Course, url=course_url)
try:
mod = Moderator.objects.get(username=request.session['user'])
except Moderator.DoesNotExist:
raise PermissionDenied
if mod.admin or (course in mod.classes.all()):
try:
doc = course.files.get(id=doc_id)
except File.DoesNotExist:
try:
doc = course.unapproved_files.get(id=doc_id)
except File.DoesNotExist:
raise Http404("Error: Document Not Related to this Course")
course.unapproved_files.remove(doc)
course.rejected_files.add(doc)
return render(request, 'class/remove.html', {'doc': doc, 'course': course})
course.files.remove(doc)
course.rejected_files.add(doc)
return render(request, 'class/remove.html', {'doc': doc, 'course': course})
else:
raise PermissionDenied
@login
def undelete(request, course_url, doc_id):
course = get_object_or_404(Course, url=course_url)
try:
mod = Moderator.objects.get(username=request.session['user'])
except Moderator.DoesNotExist:
raise PermissionDenied
if mod.admin or (course in mod.classes.all()):
try:
doc = course.rejected_files.get(id=doc_id)
except File.DoesNotExist:
raise Http404("Error: Document Not Related to this Course")
course.rejected_files.remove(doc)
course.files.add(doc)
return render(request, 'class/undelete.html', {'doc': doc, 'course': course})
else:
raise PermissionDenied | 31.625 | 87 | 0.634952 |
ee9c2a66660c6fef43012d8c38ea0b3de96ca075 | 1,134 | py | Python | animepicker/apps/picker/migrations/0001_initial.py | Onosume/anime-picker | 635f260ebb3b63b50e3b461b78d4c3295b4ff703 | [
"MIT"
] | null | null | null | animepicker/apps/picker/migrations/0001_initial.py | Onosume/anime-picker | 635f260ebb3b63b50e3b461b78d4c3295b4ff703 | [
"MIT"
] | null | null | null | animepicker/apps/picker/migrations/0001_initial.py | Onosume/anime-picker | 635f260ebb3b63b50e3b461b78d4c3295b4ff703 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-01-26 14:26
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Anime',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('anime_title', models.TextField(blank=True)),
('alt_title', models.TextField(blank=True, null=True)),
('anime_type', models.CharField(choices=[('TV', 'TV'), ('Movie', 'Movie'), ('OVA', 'OVA'), ('ONA', 'ONA'), ('Special', 'Special')], default='TV', max_length=10)),
('anime_genre', models.TextField(blank=True)),
('current_episodes', models.IntegerField(default=0)),
('max_episodes', models.IntegerField(default=0)),
('release_season', models.CharField(max_length=15)),
('studio_name', models.TextField(blank=True)),
],
),
]
| 36.580645 | 178 | 0.574956 |
ee9c514425fe52fb6f66f62ee9d6108d08382363 | 5,332 | py | Python | solutions/solution_14.py | claudiobierig/adventofcode19 | 40dabd7c780ab1cd8bad4292550cd9dd1d178365 | [
"MIT"
] | null | null | null | solutions/solution_14.py | claudiobierig/adventofcode19 | 40dabd7c780ab1cd8bad4292550cd9dd1d178365 | [
"MIT"
] | null | null | null | solutions/solution_14.py | claudiobierig/adventofcode19 | 40dabd7c780ab1cd8bad4292550cd9dd1d178365 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import math
def read_input(path):
with open(path) as file:
reactions = [line.strip().split('=>') for line in file.readlines()]
reactions2 = [[r[0].strip().split(","), r[1].strip()] for r in reactions]
result = {}
for reaction in reactions2:
goal = reaction[1].strip().split()
goal_resource = goal[1]
goal_amount = int(goal[0])
start = [r.strip().split() for r in reaction[0]]
start2 = [[int(s[0]), s[1]] for s in start]
result[goal_resource] = [goal_amount, start2]
return result
def get_amount(resource, amount_needed, reactions, leftovers):
reaction = reactions[resource]
leftover = leftovers.get(resource, 0)
number_of_reactions = math.ceil((amount_needed - leftover)/reaction[0])
leftovers[resource] = leftover + number_of_reactions*reaction[0] - amount_needed
return [[number_of_reactions*r[0], r[1]] for r in reaction[1]]
def need_reaction(required_resources):
for resource in required_resources:
if resource[1] != "ORE":
return True
return False
def reduce_leftovers(leftovers, reactions):
"""
>>> reactions = read_input("input/14.txt")
>>> leftovers = {"DWBL": 10}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'DWBL': 1, 'ORE': 149}
>>> leftovers = {"ZKZHV": 9}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 1, 'KFKWH': 0, 'DWBL': 2, 'ORE': 532}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 1, 'KFKWH': 0, 'DWBL': 2, 'ORE': 532}
>>> leftovers = {'FUEL': 0, 'BRTX': 1, 'CFBP': 1, 'HJPD': 3, 'HDRMK': 1, 'LWGNJ': 2, 'JVGRC': 2, 'CVZLJ': 2, 'PZRSQ': 2, 'LQBJP': 1, 'DVRS': 4, 'TNRGW': 2, 'QGVJV': 0, 'NSWDH': 6, 'XMHN': 0, 'PDKZ': 1, 'NDNP': 3, 'DBKL': 1, 'RLKDF': 0, 'DQPX': 0, 'BWHKF': 0, 'QMQB': 0, 'QZMZ': 3, 'HJFV': 0, 'SLQN': 2, 'XHKG': 6, 'KXHQW': 3, 'GHNG': 1, 'CSNS': 1, 'JVRQ': 0, 'PHBMP': 6, 'LZWR': 1, 'JKRZH': 0, 'WKFTZ': 2, 'GFDP': 3, 'ZKZHV': 0, 'XJFQR': 3, 'JQFM': 0, 'WQCT': 0, 'QMTMN': 0, 'QDJD': 0, 'FRTK': 2, 'MLJN': 8, 'LHXN': 2, 'DWBL': 1, 'MCWF': 2, 'VCMPS': 0, 'SVTK': 7, 'XNGTQ': 2, 'MXQF': 2, 'XCMJ': 3, 'NHVQD': 6, 'WGLN': 1, 'KFKWH': 0, 'VMDSG': 2, 'BMSNV': 0, 'WCMV': 4, 'ZJKB': 2, 'TDPN': 0}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'FUEL': 0, 'BRTX': 1, 'CFBP': 1, 'HJPD': 3, 'HDRMK': 1, 'LWGNJ': 2, 'JVGRC': 2, 'CVZLJ': 2, 'PZRSQ': 2, 'LQBJP': 1, 'DVRS': 4, 'TNRGW': 2, 'QGVJV': 0, 'NSWDH': 6, 'XMHN': 0, 'PDKZ': 1, 'NDNP': 3, 'DBKL': 1, 'RLKDF': 0, 'DQPX': 0, 'BWHKF': 0, 'QMQB': 0, 'QZMZ': 3, 'HJFV': 0, 'SLQN': 2, 'XHKG': 6, 'KXHQW': 3, 'GHNG': 1, 'CSNS': 1, 'JVRQ': 0, 'PHBMP': 6, 'LZWR': 1, 'JKRZH': 0, 'WKFTZ': 2, 'GFDP': 3, 'ZKZHV': 0, 'XJFQR': 3, 'JQFM': 0, 'WQCT': 0, 'QMTMN': 0, 'QDJD': 0, 'FRTK': 2, 'MLJN': 8, 'LHXN': 2, 'DWBL': 1, 'MCWF': 2, 'VCMPS': 0, 'SVTK': 7, 'XNGTQ': 2, 'MXQF': 2, 'XCMJ': 3, 'NHVQD': 6, 'WGLN': 1, 'KFKWH': 0, 'VMDSG': 2, 'BMSNV': 0, 'WCMV': 4, 'ZJKB': 2, 'TDPN': 0}
>>> leftovers = {"ZKZHV": 8, 'DWBL': 7}
>>> reduce_leftovers(leftovers, reactions)
>>> leftovers
{'ZKZHV': 0, 'DWBL': 0, 'KFKWH': 0, 'ORE': 681}
"""
can_reduce = True
while can_reduce:
can_reduce = False
to_add = {}
for key in leftovers.keys():
if key == "ORE":
continue
if reactions[key][0] <= leftovers[key]:
times = int(leftovers[key]/reactions[key][0])
can_reduce = True
leftovers[key] -= times*reactions[key][0]
for r in reactions[key][1]:
to_add[r[1]] = to_add.get(r[1], 0) + times*r[0]
for key, value in to_add.items():
leftovers[key] = leftovers.get(key, 0) + value
if __name__ == "__main__":
input = read_input("input/14.txt")
leftovers = {}
required_resources = get_amount("FUEL", 1, input, leftovers)
while need_reaction(required_resources):
i = 0
while required_resources[i][1] == "ORE":
i += 1
required_resources += get_amount(required_resources[i][1], required_resources[i][0], input, leftovers)
required_resources.pop(i)
required_ore = 0
for r in required_resources:
required_ore += r[0]
print("Solution1")
print(required_ore)
max_ore = 1000000000000
without_problems = int(max_ore/required_ore)
leftovers2 = {k:without_problems*leftovers[k] for k in leftovers.keys()}
ore = required_ore*without_problems
fuel = without_problems
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
leftovers2["ORE"] = 0
while without_problems > 0:
without_problems = int((max_ore-ore)/required_ore)
for key, value in leftovers.items():
leftovers2[key] = leftovers2.get(key, 0) + value*without_problems
ore += required_ore*without_problems
fuel += without_problems
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
leftovers2["ORE"] = 0
leftovers2["FUEL"] = 1
reduce_leftovers(leftovers2, input)
ore -= leftovers2.get("ORE", 0)
if ore<=max_ore:
fuel += 1
print("Solution 2")
print(fuel)
| 45.186441 | 693 | 0.564891 |
ee9daa8c3f24ee0e5956c82c505b318b5493b1d6 | 471 | py | Python | src/actions/action_sleep.py | JohnVillalovos/webhook-proxy | fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3 | [
"MIT"
] | null | null | null | src/actions/action_sleep.py | JohnVillalovos/webhook-proxy | fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3 | [
"MIT"
] | null | null | null | src/actions/action_sleep.py | JohnVillalovos/webhook-proxy | fbb2df31b10a0c3ffb9572a0abde4df7e1ad2ef3 | [
"MIT"
] | null | null | null | import time
from actions import Action, action
@action("sleep")
class SleepAction(Action):
def __init__(
self, seconds, output="Waiting {{ seconds }} seconds before continuing ..."
):
self.seconds = seconds
self.output_format = output
def _run(self):
seconds = float(self._render_with_template(str(self.seconds)))
print(self._render_with_template(self.output_format, seconds=seconds))
time.sleep(seconds)
| 23.55 | 83 | 0.673036 |
ee9fab028e33102060e656a46df7bd6afed90358 | 1,262 | py | Python | a1d05eba1/special_fields/choice_filter.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | null | null | null | a1d05eba1/special_fields/choice_filter.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | 28 | 2020-06-23T19:00:58.000Z | 2021-03-26T22:13:07.000Z | a1d05eba1/special_fields/choice_filter.py | dorey/a1d05eba1 | eb6f66a946f3c417ab6bf9047ba9715be071967c | [
"0BSD"
] | null | null | null | from ..utils.kfrozendict import kfrozendict
from ..utils.kfrozendict import kassertfrozen
class ChoiceFilter:
ROW_KEYS = {
'1': ['choice_filter'],
'2': ['choice_filter'],
}
EXPORT_KEY = 'choice_filter'
@classmethod
def in_row(kls, row, schema):
return 'choice_filter' in row
@classmethod
def pull_from_row(kls, row, content):
schema = content.schema_version
if schema == '2':
cfdata = row.get('choice_filter')
if not cfdata:
return
assert 'raw' in cfdata
yield ChoiceFilter(content=content, val=cfdata)
elif schema == '1':
cfdata = {'raw': row['choice_filter']}
yield ChoiceFilter(content=content, val=cfdata)
def __init__(self, content, val):
self.content = content
self.key = 'choice_filter'
self.val = val
self._string = val.get('raw')
def dict_key_vals_old(self, renames=None):
# print(('choice_filter', self._string,))
yield ('choice_filter', self._string,)
@kassertfrozen
def dict_key_vals_new(self, renames=None):
return (
'choice_filter',
kfrozendict(raw=self.val.get('raw')),
)
| 28.044444 | 59 | 0.585578 |
ee9ff38e8ac3eaab8a58f8de6b4ed70735c17d0f | 3,878 | py | Python | hamster_control_test_version.py | iamnotmarcel/HamsterModell | ce8391e8e120e2cf957f9d49e812be3c4f757f75 | [
"MIT"
] | null | null | null | hamster_control_test_version.py | iamnotmarcel/HamsterModell | ce8391e8e120e2cf957f9d49e812be3c4f757f75 | [
"MIT"
] | 1 | 2022-03-26T17:27:30.000Z | 2022-03-26T17:27:30.000Z | hamster_control_test_version.py | iamnotmarcel/HamsterModell | ce8391e8e120e2cf957f9d49e812be3c4f757f75 | [
"MIT"
] | null | null | null | '''
Author: Marcel Miljak
Klasse: 5aHEL - HTL Anichstraße
Diplomarbeit: Entwicklung eines Hamster Roboters
Jahrgang: 2021/22
'''
import time
from time import sleep
import RPi.GPIO as GPIO
DIR_2 = 18 # Direction-Pin vom 2ten Modul
DIR_1 = 24 # Direction-pin vom 1sten Modul
STEP_1 = 25 # Step-Pin vom 1sten Modul
STEP_2 = 23 # Step-Pin vom 2ten Modul
CW = 1 # Clockwise Rotation
CCW = 0 # Counterclockwise Rotation
SENS_TRIG = 6 # Trigger-Pin HC-SR04
SENS_ECHO = 5 # Echo-Pin HC-SR04
whole_cycle = 300 # ganze Umdrehung (360 / 7.5) was aber foisch is
cycle_left = 548 # Viertel Umdrehung
delay = 0.005
def setup():
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(DIR_2, GPIO.OUT)
GPIO.setup(STEP_1, GPIO.OUT)
GPIO.setup(STEP_2, GPIO.OUT)
GPIO.setup(DIR_1, GPIO.OUT)
GPIO.setup(SENS_TRIG, GPIO.OUT)
GPIO.setup(SENS_ECHO, GPIO.IN)
def vor():
'''
lässt den Hamster eine ganze Motor-Umdrehung
nach vorne fahren (360°)
'''
setup()
GPIO.output(DIR_1, CW)
GPIO.output(DIR_2, CW)
print("Vorwärts...")
for i in range(3):
dist = vornFrei()
if dist < 20.0:
print("Achtung - Hinderniss voraus!")
stop()
time.sleep(delay)
linksUm()
time.sleep(delay)
break
else:
for i in range (100):
GPIO.output(STEP_1, GPIO.HIGH)
GPIO.output(STEP_2, GPIO.HIGH)
sleep(delay)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.LOW)
sleep(delay)
def linksUm():
'''
Dreht sich um 90° nach links
'''
setup()
GPIO.output(DIR_1, CW)
GPIO.output(DIR_2, CCW)
print("Ausrichtung nach links...")
for i in range(298):
GPIO.output(STEP_1, GPIO.HIGH)
GPIO.output(STEP_2, GPIO.LOW)
sleep(delay)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.HIGH)
sleep(delay)
def rechtsUm():
'''
Nur als Test angesehen, ob Hamster auch wirklich nach
rechts ausrichtet
'''
setup()
print("Ausrichtung nach rechts...")
linksUm()
linksUm()
linksUm()
GPIO.cleanup()
def vornFrei():
'''
liefert true, wenn sich keine Mauer vor dem Hamster
befindet.
Kommt gemeinsam mit Obstacle-Avoidance-Sensor in
Einsatz.
'''
setup()
GPIO.output(SENS_TRIG,1)
time.sleep(0.00001)
GPIO.output(SENS_TRIG,0)
while GPIO.input(SENS_ECHO) == 0:
pass
start = time.time()
timer = 0
while (GPIO.input(SENS_ECHO) == 1 and timer <= 12):
timer +=1
time.sleep(0.0001)
stop = time.time()
return (stop-start) * 34300 / 2
def stop():
'''
Wenn sich eine Mauer vor dem Hamster befindet,
soll diese Funktion die Motoren stoppen.
'''
setup()
print("Stop des Hamsters...")
GPIO.output(DIR_1, GPIO.LOW)
GPIO.output(DIR_2, GPIO.LOW)
GPIO.output(STEP_1, GPIO.LOW)
GPIO.output(STEP_2, GPIO.LOW)
'''
def kornDa():
liefert true, wenn sich auf dem Feld, auf der der
Hamster steht, sich mindestens ein Korn befindet.
setup()
print("Check ob Korn auf Feld vorhanden...")
korn_indicator = GPIO.input(SENS_Korn)
if korn_indicator == 0:
print("Es befindet sich ein Korn auf dem Feld")
return True
else:
return False
'''
def nimm():
'''
nimmt von dem Feld, auf dem er gerade steht, ein Korn auf
'''
pass
def gib():
'''
lege auf dem Feld, auf dem er gerade steht, ein Korn
aus seinem Maul ab.
'''
pass
def maulLeer():
'''
liefert true, wenn der Hamster keinen Körner
im Maul hat.
'''
pass | 22.678363 | 70 | 0.581227 |