id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
11373 | <gh_stars>0
########################################################################
# import default libraries
########################################################################
import os
import csv
import sys
import gc
########################################################################
########################################################################
# import additional libraries
########################################################################
import numpy as np
import scipy.stats
import torch
import torch.nn as nn
# from import
from tqdm import tqdm
from sklearn import metrics
try:
from sklearn.externals import joblib
except:
import joblib
# original lib
import common as com
from pytorch_model import AutoEncoder
########################################################################
########################################################################
# load parameter.yaml
########################################################################
param = com.yaml_load()
#######################################################################
########################################################################
# output csv file
########################################################################
def save_csv(save_file_path,
save_data):
with open(save_file_path, "w", newline="") as f:
writer = csv.writer(f, lineterminator='\n')
writer.writerows(save_data)
########################################################################
########################################################################
# main 01_test.py
########################################################################
if __name__ == "__main__":
####################################################################
# set device
####################################################################
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
print("device : {}".format(device))
####################################################################
# check mode
# "development": mode == True
# "evaluation": mode == False
mode = com.command_line_chk()
if mode is None:
sys.exit(-1)
# make output result directory
os.makedirs(param["result_directory"], exist_ok=True)
# load base directory
dirs = com.select_dirs(param=param, mode=mode)
# initialize lines in csv for AUC and pAUC
csv_lines = []
if mode:
performance_over_all = []
# loop of the base directory
for idx, target_dir in enumerate(dirs):
print("\n===========================")
print("[{idx}/{total}] {target_dir}".format(target_dir=target_dir, idx=idx+1, total=len(dirs)))
machine_type = os.path.split(target_dir)[1]
print("============== MODEL LOAD ==============")
# load model file
model_file = "{model}/model_{machine_type}.hdf5".format(model=param["model_directory"],
machine_type=machine_type)
if not os.path.exists(model_file):
com.logger.error("{} model not found ".format(machine_type))
sys.exit(-1)
input_channel = param["feature"]["n_mels"] * param["feature"]["n_frames"]
model = AutoEncoder(input_channel).to(device)
model.eval()
if device.type == "cuda":
model.load_state_dict(torch.load(model_file))
elif device.type == "cpu":
model.load_state_dict(torch.load(model_file, map_location=torch.device("cpu")))
# load anomaly score distribution for determining threshold
score_distr_file_path = "{model}/score_distr_{machine_type}.pkl".format(model=param["model_directory"],
machine_type=machine_type)
shape_hat, loc_hat, scale_hat = joblib.load(score_distr_file_path)
# determine threshold for decision
decision_threshold = scipy.stats.gamma.ppf(q=param["decision_threshold"], a=shape_hat, loc=loc_hat, scale=scale_hat)
if mode:
# results for each machine type
csv_lines.append([machine_type])
csv_lines.append(["section", "domain", "AUC", "pAUC", "precision", "recall", "F1 score"])
performance = []
dir_names = ["source_test", "target_test"]
for dir_name in dir_names:
#list machine id
section_names = com.get_section_names(target_dir, dir_name=dir_name)
for section_name in section_names:
# load test file
files, y_true = com.file_list_generator(target_dir=target_dir,
section_name=section_name,
dir_name=dir_name,
mode=mode)
# setup anomaly score file path
anomaly_score_csv = "{result}/anomaly_score_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"],
machine_type=machine_type,
section_name=section_name,
dir_name=dir_name)
anomaly_score_list = []
# setup decision result file path
decision_result_csv = "{result}/decision_result_{machine_type}_{section_name}_{dir_name}.csv".format(result=param["result_directory"],
machine_type=machine_type,
section_name=section_name,
dir_name=dir_name)
decision_result_list = []
print("\n============== BEGIN TEST FOR A SECTION ==============")
y_pred = [0. for k in files]
for file_idx, file_path in tqdm(enumerate(files), total=len(files)):
try:
data = com.file_to_vectors(file_path,
n_mels=param["feature"]["n_mels"],
n_frames=param["feature"]["n_frames"],
n_fft=param["feature"]["n_fft"],
hop_length=param["feature"]["hop_length"],
power=param["feature"]["power"])
except:
com.logger.error("File broken!!: {}".format(file_path))
data = torch.tensor(data, dtype=torch.float32).to(device)
reconst = model(data)
mseloss = nn.functional.mse_loss(data.detach(), reconst.detach())
y_pred[file_idx] = mseloss.item()
# store anomaly scores
anomaly_score_list.append([os.path.basename(file_path), y_pred[file_idx]])
# store decision results
if y_pred[file_idx] > decision_threshold:
decision_result_list.append([os.path.basename(file_path), 1])
else:
decision_result_list.append([os.path.basename(file_path), 0])
# output anomaly scores
save_csv(save_file_path=anomaly_score_csv, save_data=anomaly_score_list)
com.logger.info("anomaly score result -> {}".format(anomaly_score_csv))
# output decision results
save_csv(save_file_path=decision_result_csv, save_data=decision_result_list)
com.logger.info("decision result -> {}".format(decision_result_csv))
if mode:
# append AUC and pAUC to lists
auc = metrics.roc_auc_score(y_true, y_pred)
p_auc = metrics.roc_auc_score(y_true, y_pred, max_fpr=param["max_fpr"])
tn, fp, fn, tp = metrics.confusion_matrix(y_true, [1 if x > decision_threshold else 0 for x in y_pred]).ravel()
prec = tp / np.maximum(tp + fp, sys.float_info.epsilon)
recall = tp / np.maximum(tp + fn, sys.float_info.epsilon)
f1 = 2.0 * prec * recall / np.maximum(prec + recall, sys.float_info.epsilon)
csv_lines.append([section_name.split("_", 1)[1], dir_name.split("_", 1)[0], auc, p_auc, prec, recall, f1])
performance.append([auc, p_auc, prec, recall, f1])
performance_over_all.append([auc, p_auc, prec, recall, f1])
com.logger.info("AUC : {}".format(auc))
com.logger.info("pAUC : {}".format(p_auc))
com.logger.info("precision : {}".format(prec))
com.logger.info("recall : {}".format(recall))
com.logger.info("F1 score : {}".format(f1))
print("\n============ END OF TEST FOR A SECTION ============")
if mode:
# calculate averages for AUCs and pAUCs
amean_performance = np.mean(np.array(performance, dtype=float), axis=0)
csv_lines.append(["arithmetic mean", ""] + list(amean_performance))
hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance, dtype=float), sys.float_info.epsilon), axis=0)
csv_lines.append(["harmonic mean", ""] + list(hmean_performance))
csv_lines.append([])
del data
del model
if mode:
csv_lines.append(["", "", "AUC", "pAUC", "precision", "recall", "F1 score"])
# calculate averages for AUCs and pAUCs
amean_performance = np.mean(np.array(performance_over_all, dtype=float), axis=0)
csv_lines.append(["arithmetic mean over all machine types, sections, and domains", ""] + list(amean_performance))
hmean_performance = scipy.stats.hmean(np.maximum(np.array(performance_over_all, dtype=float), sys.float_info.epsilon), axis=0)
csv_lines.append(["harmonic mean over all machine types, sections, and domains", ""] + list(hmean_performance))
csv_lines.append([])
# output results
result_path = "{result}/{file_name}".format(result=param["result_directory"], file_name=param["result_file"])
com.logger.info("results -> {}".format(result_path))
save_csv(save_file_path=result_path, save_data=csv_lines)
| StarcoderdataPython |
87869 | <reponame>paul90317/minecraft-craft
import json
import os
path=os.path.join(r"mcpath","launcher_accounts.json")
with open(path,'r') as f:
data=json.load(f)
for acc in data['accounts']:
data['accounts'][acc]['minecraftProfile']['name']='yourname'
with open(path,'w') as f:
json.dump(data,f) | StarcoderdataPython |
3298823 | from distutils.util import strtobool
from random import shuffle, randint
from math import floor
from enum import Enum
from time import time
DEBUG = False
class Card:
"""A class containing the value and suit for each card"""
def __init__(self, value, suit):
self.value = value
self.suit = suit
self.vname = value_names[value]
self.sname = suit_names[suit]
def __str__(self):
"""Pretty-prints each card"""
return f'{self.sname}{self.vname}{self.sname}'
def __repr__(self):
"""Represents each card in two characters"""
if self.value == 0:
return '__'
elif 0 < self.value < 10:
return f'{self.value}{self.suit[0].lower()}'
elif self.value >= 10:
return f'{self.vname[0]}{self.suit[0].lower()}'
def __eq__(self, other_card):
"""Returns True if the value and suit for two cards are equal"""
if not isinstance(other_card, self.__class__):
return False
return self.value == other_card.value and self.suit == other_card.suit
def __ne__(self, other_card):
"""Returns False if the value and suit for two cards are equal"""
return not self == other_card
def __hash__(self):
"""Defines card object hashing for the purpose of comparing equality"""
return hash((self.value, self.suit))
def __int__(self):
"""Returns a Card's value"""
return self.value
class Deck:
"""A class containing all of the cards that can be drawn as part of a hand"""
def __init__(self, shuffle_cards=True):
self.cards = []
self.shuffle_cards = shuffle_cards
self.create()
def create(self):
"""Generate all of the cards"""
for _ in range(decks_):
for val in (2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14):
for suit in ("Hearts", "Spades", "Clubs", "Diamonds"):
self.cards.append(Card(val, suit))
if self.shuffle_cards:
shuffle(self.cards)
def draw(self, c):
"""Generate a hand of c unique cards"""
card, x = 0, 0
while card < c:
if self.cards[-1] not in drawcards.values():
drawcards[card] = self.cards.pop()
card += 1
else:
if (len(self.cards) <= 520 and len(drawcards) <= 520) or x == 10**5:
s = set(list(drawcards.values()) + self.cards)
if s:
for x in self.cards:
if x not in drawcards.values():
drawcards[card] = x
break
if drawcards[card] in self.cards:
self.cards.remove(drawcards[card])
card += 1
else:
drawcards[card] = Card(0, 0)
card += 1
else:
i = randint(0, (len(self.cards) - 1))
self.cards[i], self.cards[-1] = self.cards[-1], self.cards[i]
x += 1
return drawcards
class BaseStrength(Enum):
"""The minimum strength value for each type of hand"""
ROYAL_FLUSH = 10000
STRAIGHT_FLUSH = 9000
QUADS = 8000
FULL_HOUSE = 7000
FLUSH = 6000
STRAIGHT = 5000
SET = 4000
TWO_PAIR = 3000
PAIR = 2000
HIGH_CARD = 1000
class HandTypeEvaluation:
"""Contains functions that determine the name of and assign strength values to each hand."""
def __init__(self):
HandTypeEvaluation.strength = 0
@classmethod
def h_card(cls, values):
"""Returns the name of a high-card hand (string) given a list of the hand's card values.
Also changes hand strength accordingly."""
cls.strength = BaseStrength.HIGH_CARD.value + 60*values[0] + 6*values[1] + .6*values[2] + .06*values[3]\
+ .006*values[4]
return f'High-Card {value_names[values[0]]}'
@classmethod
def num_pair(cls, values):
"""Returns the name of a one-pair or two-pair hand (string) given a list of the hand's card values.
Returns False if one-pair or two-pair is not present within the hand. Also changes hand strength accordingly."""
pairs = list(dict.fromkeys([val for val in values if values.count(val) == 2]))
if not pairs:
return False
if len(pairs) == 1:
vp = values.copy()
for _ in range(2):
vp.remove(pairs[0])
cls.strength = BaseStrength.PAIR.value + 60*pairs[0] + 6*vp[0] + .6*vp[1] + .06*vp[2]
return f'Pair of {value_names_plural[pairs[0]]}s'
if len(pairs) >= 2:
vps = values.copy()
pairs = sorted(pairs, reverse=True)
for _ in range(2):
vps.remove(pairs[0]); vps.remove(pairs[1])
cls.strength = BaseStrength.TWO_PAIR.value + 60*pairs[0] + 6*pairs[1] + .6*vps[0]
return f'{value_names_plural[pairs[0]]}s and {value_names_plural[pairs[1]]}s'
@classmethod
def trip(cls, values):
"""Returns the name of a three-of-a-kind hand (string) given a list of the hand's card values.
Returns False if a set is not present within the hand. Also changes hand strength accordingly."""
trips = [val for val in values if values.count(val) == 3]
if not trips:
return False
else:
trips = max(trips)
vs = values.copy()
for _ in range(3):
vs.remove(trips)
cls.strength = BaseStrength.SET.value + 60*trips + 6*vs[0] + .6*vs[1]
return f'Set of {value_names_plural[trips]}s'
@classmethod
def straight(cls, vset, get_vals=False):
"""Returns the name of a straight hand (string) given a set of the hand's card values.
Returns False if a straight is not present within the hand. Also changes hand strength accordingly.
If get_vals is true, straight() does not change strength and returns the values present in a straight."""
count = 0
if not get_vals:
straight = False
for rank in reversed([14, *range(2, 15)]):
if rank in vset:
count += 1
min_c = rank
if count == 5:
if min_c != 14:
max_c = min_c + 4
else:
min_c, max_c = 1, 5
cls.strength = BaseStrength.STRAIGHT.value + 70*max_c
straight = f'Straight from {value_names[min_c]} to {value_names[max_c]}'
break
else: count = 0
return straight
if get_vals:
sset = set()
for rank in reversed([14, *range(2, 15)]):
if rank in vset:
count += 1
sset.add(rank)
if count == 5:
return sset
else:
count = 0
sset = set()
raise Exception('No SSET')
@classmethod
def flush(cls, suits, all_cards):
"""Returns the name of a flush hand (string) given a list of the hand's card suits and a list of all the cards
in the hand. Returns False if a flush is not present within the hand. Also changes hand strength accordingly."""
flushes = [suit for suit in suits if suits.count(suit) >= 5]
if flushes:
flushes_vals = sorted([card.value for card in all_cards if card.suit == flushes[0]], reverse=True)
if not flushes:
return False
else:
cls.strength = BaseStrength.FLUSH.value + 60*flushes_vals[0] + 6*flushes_vals[1] + .6*flushes_vals[2] + \
.06*flushes_vals[3] + .006*flushes_vals[4]
flush = f'{value_names[max(flushes_vals)]}-High Flush of {flushes[0]}'
return flush
@classmethod
def full_house(cls, values):
"""Returns the name of a filled up (string) hand given a list of the hand's card values.
Returns False if a full house is not present within the hand. Also changes hand strength accordingly."""
trips = list(dict.fromkeys(sorted([val for val in values if values.count(val) == 3], reverse=True)))
pairs = sorted([val for val in values if values.count(val) == 2], reverse=True)
if not trips or (len(trips) == 1 and not pairs):
return False
if pairs:
cls.strength = BaseStrength.FULL_HOUSE.value + 60*trips[0] + 6*pairs[0]
fh = f'{value_names_plural[trips[0]]}s full of {value_names_plural[pairs[0]]}s'
if len(trips) > 1:
if pairs:
if trips[1] > pairs[0]:
cls.strength = BaseStrength.FULL_HOUSE.value + 60*trips[0] + 6*trips[1]
fh = f'{value_names_plural[trips[0]]}s full of {value_names_plural[trips[1]]}s'
else:
cls.strength = BaseStrength.FULL_HOUSE.value + 60*trips[0] + 6*trips[1]
fh = f'{value_names_plural[trips[0]]}s full of {value_names_plural[trips[1]]}s'
return fh
@classmethod
def quads(cls, values):
"""Returns the name of a four-of-a-kind hand (string) given a list of the hand's card values.
Returns False if quads are not present within the hand. Also changes hand strength accordingly."""
quads = [val for val in values if values.count(val) >= 4]
if not quads:
return False
else:
quads = max(quads)
vq = values.copy()
for _ in range(4):
vq.remove(quads)
cls.strength = BaseStrength.QUADS.value + 60*quads + 6*vq[0]
return f'Quad {value_names_plural[quads]}s'
@classmethod
def straight_flush(cls, suits, all_cards):
"""Returns the name of a straight or royal flush hand (string) given a list of the hand's card suits,
a set of the hand's card values, and a list of all the cards in the hand. Returns False if a straight or royal
flush is not present within the hand. Also changes hand strength accordingly."""
straight_: str = None
flushes = [suit for suit in suits if suits.count(suit) >= 5]
if flushes:
flushes_vals = sorted([card.value for card in all_cards if card.suit == flushes[0]], reverse=True)
if HandTypeEvaluation.straight(flushes_vals):
straight_vals = HandTypeEvaluation.straight(flushes_vals, True)
if {14, 10, 11, 12, 13} <= straight_vals: straight_ = "Royal"
elif {14, 2, 3, 4, 5} <= straight_vals: straight_ = "Wheel"
else: straight_ = "Normal"
if straight_ == "Normal":
cls.strength = BaseStrength.STRAIGHT_FLUSH.value + 70*max(flushes_vals)
sf = f'{value_names[max(straight_vals)]}-High Straight Flush of {flushes[0]}'
elif straight_ == "Wheel":
cls.strength = BaseStrength.STRAIGHT_FLUSH.value
sf = f'Five-High Straight Flush of {flushes[0]}'
elif straight_ == "Royal":
cls.strength = BaseStrength.ROYAL_FLUSH.value
sf = f'Royal Flush of {flushes[0]}'
else:
return False
return sf
@staticmethod
def evalhand(values, suits, vset, all_cards):
"""Returns the exact type of hand (string) that is present given a list of values and suits within the hand,
a set of values within the hand, and a list of all the cards in the hand"""
x = HandTypeEvaluation.straight_flush(suits, all_cards)
if not x: x = HandTypeEvaluation.quads(values)
if not x: x = HandTypeEvaluation.full_house(values)
if not x: x = HandTypeEvaluation.flush(suits, all_cards)
if not x: x = HandTypeEvaluation.straight(vset)
if not x: x = HandTypeEvaluation.trip(values)
if not x: x = HandTypeEvaluation.num_pair(values)
if not x: x = HandTypeEvaluation.h_card(values)
return x
def determine(hand):
"""Returns a list of values, a set of values, a list of suits, and a list of cards within a hand."""
values, vset, suits, all_cards = [], set(), [], []
for x in range(len(hand)):
values.append(int(hand[x]))
vset.add(int(hand[x]))
suits.append(hand[x].suit)
all_cards.append(hand[x])
return sorted(values, reverse=True), vset, suits, all_cards
# Message/Text Functions
def ss():
"""Prints hand strength if advanced stats are on"""
if show_strength_:
print('[{:6.6f}]'.format(HandTypeEvaluation.strength/10**4, 6))
else: print()
def hnumber(max_v, msg):
"""Returns the number of hands (int) to be generated given the maximum hands that can be generated"""
while True:
try:
hn = input(msg)
if hn.lower() in ('m', 'max', 'mx', 'maximum'):
return max_v
elif 0 < int(hn) <= max_v:
return int(hn)
else:
print(f'Please enter an integer between 1 and {max_v}.')
except ValueError:
print('Please either enter a positive integer or \'max\'.')
def decks(msg):
"""Returns the number of decks (int) to be generated"""
while True:
try:
d = int(input(msg))
if d > 0:
return d
else:
print('Please enter a positive integer.')
except ValueError:
print('Please enter a positive integer.')
def cph(msg):
"""Returns the number of cards (int) to be included in each hand"""
while True:
try:
d = int(input(msg))
if 5 <= d <= 52:
return d
else:
print('Please enter a positive integer between 5 and 52.')
except ValueError:
print('Please enter a positive integer between 5 and 52.')
def show_strength(msg):
"""Returns a boolean indicating whether advanced stats are shown"""
while True:
try:
ss = strtobool(input(msg))
if ss == 0 or ss == 1:
return ss
else:
print('Please indicate whether you\'d like to see advanced stats')
except ValueError:
print('Please indicate whether you\'d like to see advanced stats')
def get_inputs():
"""Returns the integer outputs of decks(), cph(), hnumber() and the boolean output of show_strength()"""
decks_ = decks('How many decks are there? ')
cph_ = cph('How many cards per hand? ')
max_v = floor((decks_*52)/cph_)
hnumber_ = hnumber(max_v, f'How many players are there (max {floor((decks_*52)/cph_)})? ')
sstrength_ = show_strength("Would you like to see advanced stats? ")
return decks_, cph_, hnumber_, sstrength_
def print_hand(user_hand, h_inc):
"""Pretty-prints a single hand"""
print(f"\nPlayer {h_inc + 1}'s hand:")
print("| ", end="")
if DEBUG:
for c_x in user_hand:
print(repr(user_hand[c_x]), end=" | ")
if sorted(list(set(user_hand))) != sorted(user_hand):
print("DUPLICATE", end="")
else:
for c_x in user_hand:
print(user_hand[c_x], end=" | ")
def post_draw():
"""Displays various stats if advanced stats are on
and displays the strongest and weakest hand if advanced stats are off"""
hss = sorted(h_strength.items(), key=lambda k: k[1], reverse=True)
if not show_strength_:
print(f'\n\n\nPlayer {hss[0][0] + 1} has the strongest hand!')
print(f'Player {hss[hnumber-1][0]+1} has the weakest hand :(')
else:
stats_start_time = time()
print(f'\n\n\nPlayer {hss[0][0] + 1} has the strongest hand! [' + '{:6.6f}]'.format(hss[0][1]/10**4, 6))
print(f'Player {hss[hnumber-1][0]+1} has the weakest hand :( [' + '{:6.6f}]'.format(hss[hnumber-1][1]/10**4))
strength_avg = sum([hss[x][1] for x in range(len(hss))])/len(hss)
print(f'Average Hand: {ho_names[floor(strength_avg/1000-1)]} [' + '{:6.6f}]'.format(strength_avg/10**4, 6))
print('\n\n\n\n\nHand Occurrence:\n')
for x in range(10):
print(ho_names[x]+': ', hand_occurrence[x], f'({round(100*hand_occurrence[x]/len(hss), 2)}%)')
print('\n\n\n\n\nFull Player Ranking:\n')
for x in range(len(hss)):
print(f'{x+1:0{len(str(len(hss)))}}.', f'Player {hss[x][0]+1:0{len(str(len(hss)))}}', '[{:6.6f}]'.format(hss[x][1]/10**4, 6))
t_time = time()-deck_start_time
d_time = deck_end_time-deck_start_time
h_time = stats_start_time-deck_end_time
s_time = time()-stats_start_time
print('\n\n\nPerformance:\n')
print('Complete Execution Time:', "~%ss" % (round(t_time, 2)))
print('Deck Build Time:', '~%ss' % (round(d_time, 2)),
f'({int(round(100*d_time/t_time, 0))}%)')
print('Hand Build Time:', '~%ss' % (round(h_time, 2)),
f'({int(round(100*h_time/t_time, 0))}%)')
print('Stats Calculation Time:', '~%ss' % (round(s_time, 2)),
f'({int(round(100*s_time/t_time, 0))}%)')
def showdown_poker(): # Main Function
for h_inc in range(hnumber):
user_hand = deck.draw(cards_per_hand)
print_hand(user_hand, h_inc)
values, vset, suits, all_cards = determine(user_hand)
exact_hand = HandTypeEvaluation.evalhand(values, suits, vset, all_cards)
print('\n'+exact_hand, end=" "); ss()
hand_occurrence[floor(HandTypeEvaluation.strength/1000-1)] += 1
h_strength[h_inc] = HandTypeEvaluation.strength
post_draw()
ho_names = ('High Card', 'Pair', 'Two-Pair', 'Three of a Kind', 'Straight', 'Flush', 'Full House',
'Four of a Kind', 'Straight Flush', 'Royal Flush')
hand_occurrence = {0: 0, 1: 0, 2: 0, 3: 0, 4: 0, 5: 0, 6: 0, 7: 0, 8: 0, 9: 0}
value_names = {1: 'Ace', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', 6: 'Six', 7: 'Seven', 8: 'Eight', 9: 'Nine',
10: 'Ten', 11: 'Jack', 12: 'Queen', 13: 'King', 14: 'Ace', 0: 'EMPTY'}
value_names_plural = {1: 'Ace', 2: 'Two', 3: 'Three', 4: 'Four', 5: 'Five', 6: 'Sixe', 7: 'Seven', 8: 'Eight',
9: 'Nine', 10: 'Ten', 11: 'Jack', 12: 'Queen', 13: 'King', 14: 'Ace'}
suit_names = {"Hearts": '♥', "Spades": '♠', "Clubs": '♣', "Diamonds": '♦', 0: ''}
drawcards, h_strength = {}, {}
decks_, cards_per_hand, hnumber, show_strength_ = get_inputs()
deck_start_time = time()
deck = Deck()
deck_end_time = time()
if __name__ == '__main__':
showdown_poker()
| StarcoderdataPython |
3389361 | class Transaction():
def __init__(self, txid, fee, weight, parents, ancesCnt=-1):
"""Object that is used to store a transaction
Args:
txid (str): hash of a transaction
fee (int): miners fee (i.e, the fee that a miner gets for including this transaction in their block)
weight (int): size of this transaction
parents (list): dependencies for this transaction
ancesCnt (int, optional): number of ancestors for this transaction. Defaults to -1.
"""
self.txid = txid
self.fee = int(fee)
self.weight = int(weight)
self.parents = [] if (not parents) else parents.split(';')
self.ancestorCnt = ancesCnt
def print(self):
print("id: {}".format(self.txid))
print("fee: {}".format(self.fee))
print("weight: {}".format(self.weight))
print("parents: {}\n".format(self.parents))
def cntParent(self):
return len(self.parents) | StarcoderdataPython |
30892 | import pytest
from checkout_sdk.events.events import RetrieveEventsRequest
from checkout_sdk.events.events_client import EventsClient
@pytest.fixture(scope='class')
def client(mock_sdk_configuration, mock_api_client):
return EventsClient(api_client=mock_api_client, configuration=mock_sdk_configuration)
class TestEventsClient:
def test_retrieve_all_event_types(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.get', return_value='response')
assert client.retrieve_all_event_types() == 'response'
def test_retrieve_events(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.get', return_value='response')
assert client.retrieve_events(RetrieveEventsRequest()) == 'response'
def test_retrieve_event(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.get', return_value='response')
assert client.retrieve_event('event_id') == 'response'
def test_retrieve_event_notification(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.get', return_value='response')
assert client.retrieve_event_notification('event_id', 'notification_id') == 'response'
def test_retry_webhook(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.post', return_value='response')
assert client.retry_webhook('event_id', 'webhook_id') == 'response'
def test_retry_all_webhooks(self, mocker, client: EventsClient):
mocker.patch('checkout_sdk.api_client.ApiClient.post', return_value='response')
assert client.retry_all_webhooks('event_id') == 'response'
| StarcoderdataPython |
122347 | <reponame>random-weights/Tensorflow-Project-Template
import json
from bunch import Bunch
import os
def write_to_json(exp_name, epochs, iter_per_epoch, batch_size, learning_rate):
"""
Makes sense to store each config file inside the experiments/exp_name dir.
That way all the data regarding an experiment is in one directory.
this config file will be generated for each instance of trainer obj.
"""
edict = {
"exp_name": exp_name,
"epochs": epochs,
"iter_per_epoch": iter_per_epoch,
"batch_size": batch_size,
"learning_rate": learning_rate}
def create_dirs(dirs):
"""
dirs - a list of directories to create if these directories are not found
:param dirs:
:return exit_code: 0:success -1:failed
"""
try:
for dir_ in dirs:
if not os.path.exists(dir_):
os.makedirs(dir_)
return 0
except Exception as err:
print("Creating directories error: {0}".format(err))
exit(-1) | StarcoderdataPython |
3225666 | try:
from . import _levenshtein
from ._levenshtein import *
except ImportError:
_levenshtein = None
else:
__doc__ = _levenshtein.__doc__
__version__ = "0.13.1"
__author__ = "<NAME>"
| StarcoderdataPython |
3229793 | <gh_stars>1-10
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import DingzCoordinator, DingzEntity
from .api import State
from .const import DOMAIN
logger = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
c: DingzCoordinator = hass.data[DOMAIN][entry.entry_id]
state: State = c.data
entities = [Brightness(c), Temperature(c)]
if c.device.has_pir:
pir_config = await c.session.pir_config()
if pir_config.enabled:
entities.append(Motion(c))
if state.sensors.input_state is not None:
entities.append(Input(c))
async_add_entities(entities)
return True
class Brightness(DingzEntity):
@property
def name(self):
return f"{super().name} Brightness"
@property
def unique_id(self):
return f"{super().unique_id}-brightness"
@property
def state(self):
return self._dingz_state.sensors.brightness
@property
def extra_state_attributes(self):
sensors = self._dingz_state.sensors
return {"light_state": sensors.light_state}
@property
def unit_of_measurement(self):
return "lx"
@property
def device_class(self):
return "illuminance"
class Motion(DingzEntity, BinarySensorEntity):
@property
def name(self):
return f"{super().name} Motion"
@property
def unique_id(self):
return f"{super().unique_id}-motion"
@property
def is_on(self):
return bool(self._dingz_state.sensors.person_present)
@property
def extra_state_attributes(self):
sensors = self._dingz_state.sensors
return {
"light_off_timer": sensors.light_off_timer,
"suspend_timer": sensors.suspend_timer,
}
@property
def device_class(self):
return "motion"
class Temperature(DingzEntity):
@property
def name(self):
return f"{super().name} Temperature"
@property
def unique_id(self):
return f"{super().unique_id}-temperature"
@property
def state(self):
return self._dingz_state.sensors.room_temperature
@property
def extra_state_attributes(self):
sensors = self._dingz_state.sensors
return {
"cpu": sensors.cpu_temperature,
"puck": sensors.puck_temperature,
"fet": sensors.fet_temperature,
}
@property
def unit_of_measurement(self):
return "°C"
@property
def device_class(self):
return "temperature"
class Input(DingzEntity, BinarySensorEntity):
@property
def name(self):
return f"{super().name} Input"
@property
def unique_id(self):
return f"{super().unique_id}-input"
@property
def is_on(self):
return bool(self._dingz_state.sensors.input_state)
@property
def device_class(self):
return "power"
| StarcoderdataPython |
1705545 | #!/usr/bin/python
import numpy as np
import wxmplot.interactive as wi
x = np.arange(0.0,10.0,0.1)
y = np.sin(2*x)/(x+2)
win1 = wi.plot(x, y, title='Window 1', xlabel='X (mm)', win=1)
win2 = wi.plot(x, np.cos(x-4), title='Window 2', xlabel='X (mm)', win=2)
pos = win2.GetPosition()
siz = win1.GetSize()
win2.SetPosition((pos[0]+int(siz[0]*0.8), pos[1]+10))
| StarcoderdataPython |
1723873 | <reponame>asb/opentitan
# Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
"""
Generate Rust constants from validated register JSON tree
"""
import io
import logging as log
import sys
import textwrap
import warnings
from typing import Optional, Set, TextIO
from .field import Field
from .ip_block import IpBlock
from .params import LocalParam
from .register import Register
from .multi_register import MultiRegister
from .signal import Signal
from .window import Window
def genout(outfile: TextIO, msg: str) -> None:
outfile.write(msg)
def to_snake_case(s: str) -> str:
val = []
for i, ch in enumerate(s):
if i > 0 and ch.isupper():
val.append('_')
val.append(ch)
return ''.join(val)
def as_define(s: str) -> str:
s = s.upper()
r = ''
for i in range(0, len(s)):
r += s[i] if s[i].isalnum() else '_'
return r
def first_line(s: str) -> str:
"""Returns the first line of a multi-line string"""
return s.splitlines()[0]
def format_comment(s: str) -> str:
"""Formats a string to comment wrapped to an 80 character line width
Returns wrapped string including newline and // comment characters.
"""
comment = textwrap.wrap(s,
width=77,
initial_indent='// ',
subsequent_indent='// ')
return '\n'.join(comment) + '\n'
def data_type(name: str, val: int, as_hex: bool) -> str:
""" Returns proper data type for name-value pair. """
if name.endswith("_OFFSET") or name.endswith("_BASE_ADDR"):
return "usize"
if val.bit_length() > 32:
log.error(name + " value exceeds 32 bit " + str(val))
sys.exit(1)
if not as_hex and val < 0:
return "i32"
return "u32"
def gen_const(outstr: TextIO,
name: str,
suffix: str,
val: int,
existing_defines: Set[str],
as_hex: bool = False) -> str:
r"""Produces a pub const string. Result includes newline.
Arguments:
name - Name of the constant
val - Value of the constant
existing_defines - set of already generated define names.
Error if `name` is in `existing_defines`.
Example result:
name = 'A_NAME'
val = '10'
pub const A_NAME: u32 = 10
"""
suffix = '' if not suffix.strip() else '_' + suffix
name = name + suffix
if name in existing_defines:
log.error("Duplicate pub const for " + name)
sys.exit(1)
define_declare = 'pub const ' + name + ': ' + data_type(name, val, as_hex)
val_str = hex(val) if as_hex else str(val)
oneline_define = define_declare + ' = ' + val_str + ';'
existing_defines.add(name)
output = oneline_define + '\n'
genout(outstr, output)
return output
def gen_const_register(outstr: TextIO,
reg: Register,
comp: str,
width: int,
rnames: Set[str],
existing_defines: Set[str]) -> None:
rname = reg.name
offset = reg.offset
genout(outstr, format_comment(first_line(reg.desc)))
defname = as_define(comp + '_' + rname)
gen_const(outstr, defname, 'REG_OFFSET', offset, existing_defines, True)
for field in reg.fields:
dname = defname + '_' + as_define(field.name)
field_width = field.bits.width()
if field_width == 1:
# single bit
gen_const(outstr, dname, 'BIT', field.bits.lsb, existing_defines)
else:
# multiple bits (unless it is the whole register)
if field_width != width:
mask = field.bits.bitmask() >> field.bits.lsb
gen_const(outstr, dname, 'MASK', mask, existing_defines, True)
gen_const(outstr, dname, 'OFFSET', field.bits.lsb, existing_defines)
if field.enum is not None:
for enum in field.enum:
ename = as_define(enum.name)
gen_const(
outstr,
defname + '_' + as_define(field.name),
'VALUE_' + ename,
enum.value,
existing_defines,
True)
genout(outstr, '\n')
return
def gen_const_window(outstr: TextIO,
win: Window,
comp: str,
regwidth: int,
rnames: Set[str],
existing_defines: Set[str]) -> None:
offset = win.offset
genout(outstr, format_comment('Memory area: ' + first_line(win.desc)))
defname = as_define(comp + '_' + win.name)
gen_const(outstr, defname, 'REG_OFFSET', offset, existing_defines, True)
items = win.items
gen_const(outstr, defname, 'SIZE_WORDS', items, existing_defines)
items = items * (regwidth // 8)
gen_const(outstr, defname, 'SIZE_BYTES', items, existing_defines)
wid = win.validbits
if (wid != regwidth):
mask = (1 << wid) - 1
gen_const(outstr, defname, 'MASK', mask, existing_defines, True)
def gen_rust_module_param(outstr: TextIO,
param: LocalParam,
module_name: str,
existing_defines: Set[str]) -> None:
# Presently there is only one type (int), however if the new types are
# added, they potentially need to be handled differently.
known_types = ["int"]
if param.param_type not in known_types:
warnings.warn("Cannot generate a module define of type {}"
.format(param.param_type))
return
if param.desc is not None:
genout(outstr, format_comment(first_line(param.desc)))
# Heuristic: if the name already has underscores, it's already snake_case,
# otherwise, assume StudlyCaps and covert it to snake_case.
param_name = param.name if '_' in param.name else to_snake_case(param.name)
define_name = as_define(module_name + '_PARAM_' + param_name)
if param.param_type == "int":
gen_const(outstr, define_name, '', int(param.value), existing_defines)
genout(outstr, '\n')
def gen_const_module_params(outstr: TextIO,
module_data: IpBlock,
module_name: str,
register_width: int,
existing_defines: Set[str]) -> None:
for param in module_data.params.get_localparams():
gen_rust_module_param(outstr, param, module_name, existing_defines)
genout(outstr, format_comment(first_line("Register width")))
define_name = as_define(module_name + '_PARAM_REG_WIDTH')
gen_const(outstr, define_name, '', register_width, existing_defines)
genout(outstr, '\n')
def gen_multireg_field_defines(outstr: TextIO,
regname: str,
field: Field,
subreg_num: int,
regwidth: int,
existing_defines: Set[str]) -> None:
field_width = field.bits.width()
fields_per_reg = regwidth // field_width
suffix = as_define(field.name + "_FIELD_WIDTH")
gen_const(outstr, regname, suffix, field_width, existing_defines)
suffix = as_define(field.name + "_FIELDS_PER_REG")
gen_const(outstr, regname, suffix, fields_per_reg, existing_defines)
gen_const(outstr, regname, "MULTIREG_COUNT", subreg_num, existing_defines)
genout(outstr, '\n')
def gen_const_multireg(outstr: TextIO,
multireg: MultiRegister,
component: str,
regwidth: int,
rnames: Set[str],
existing_defines: Set[str]) -> None:
comment = multireg.reg.desc + " (common parameters)"
genout(outstr, format_comment(first_line(comment)))
if len(multireg.reg.fields) == 1:
regname = as_define(component + '_' + multireg.reg.name)
gen_multireg_field_defines(outstr, regname, multireg.reg.fields[0],
len(multireg.regs), regwidth, existing_defines)
else:
log.warn("Non-homogeneous multireg " + multireg.reg.name +
" skip multireg specific data generation.")
for subreg in multireg.regs:
gen_const_register(outstr, subreg, component, regwidth, rnames,
existing_defines)
def gen_interrupt_field(outstr: TextIO,
interrupt: Signal,
component: str,
regwidth: int,
existing_defines: Set[str]) -> None:
fieldlsb = interrupt.bits.lsb
iname = interrupt.name
defname = as_define(component + '_INTR_COMMON_' + iname)
if interrupt.bits.width() == 1:
# single bit
gen_const(outstr, defname, 'BIT', fieldlsb, existing_defines)
else:
# multiple bits (unless it is the whole register)
if interrupt.bits.width() != regwidth:
mask = interrupt.bits.msb >> fieldlsb
gen_const(outstr, defname, 'MASK', mask, existing_defines, True)
gen_const(outstr, defname, 'OFFSET', fieldlsb, existing_defines)
def gen_const_interrupts(outstr: TextIO,
block: IpBlock,
component: str,
regwidth: int,
existing_defines: Set[str]) -> None:
# If no_auto_intr is true, then we do not generate common defines,
# because the bit offsets for a particular interrupt may differ between
# the interrupt enable/state/test registers.
if block.no_auto_intr:
return
genout(outstr, format_comment(first_line("Common Interrupt Offsets")))
for intr in block.interrupts:
gen_interrupt_field(outstr, intr, component, regwidth, existing_defines)
genout(outstr, '\n')
def gen_rust(block: IpBlock,
outfile: TextIO,
src_lic: Optional[str],
src_copy: str) -> int:
rnames = block.get_rnames()
outstr = io.StringIO()
# This tracks the defines that have been generated so far, so we
# can error if we attempt to duplicate a definition
existing_defines = set() # type: Set[str]
gen_const_module_params(outstr, block, block.name, block.regwidth,
existing_defines)
gen_const_interrupts(outstr, block, block.name, block.regwidth,
existing_defines)
for rb in block.reg_blocks.values():
for x in rb.entries:
if isinstance(x, Register):
gen_const_register(outstr, x, block.name, block.regwidth, rnames,
existing_defines)
continue
if isinstance(x, MultiRegister):
gen_const_multireg(outstr, x, block.name, block.regwidth, rnames,
existing_defines)
continue
if isinstance(x, Window):
gen_const_window(outstr, x, block.name, block.regwidth,
rnames, existing_defines)
continue
generated = outstr.getvalue()
outstr.close()
genout(outfile, '// Generated register constants for ' + block.name + '\n\n')
if src_copy != '':
genout(outfile, '// Copyright information found in source file:\n')
genout(outfile, '// ' + src_copy + '\n\n')
if src_lic is not None:
genout(outfile, '// Licensing information found in source file:\n')
for line in src_lic.splitlines():
genout(outfile, '// ' + line + '\n')
genout(outfile, '\n')
genout(outfile, generated)
genout(outfile, '// End generated register constants for ' + block.name)
return 0
def test_gen_const() -> None:
outstr = io.StringIO()
basic_oneline = 'pub const MACRO_NAME 10;\n'
assert (gen_const(outstr, 'MACRO', 'NAME', 10, set()) == basic_oneline)
long_macro_name = 'A_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_VERY_LONG_MACRO_NAME'
multiline = ('pub const ' + long_macro_name + ' \\\n' +
' 1000000000;\n')
assert (gen_const(outstr, long_macro_name, '', 1000000000, set()) == multiline)
| StarcoderdataPython |
1766999 | # coding: utf-8
import pytz
from dateutil.relativedelta import relativedelta
from calendar import monthrange
from .tools import yearrange
from .dow import DATEUTIL_DOWS,DOWS
class BaseSnap(object):
def __init__(self,timezone):
self.timezone = timezone
def _localized_datetime(self,datetime):
return self.timezone.normalize(datetime.astimezone(self.timezone))
def _to_utc(self,datetime):
return pytz.utc.normalize(datetime.astimezone(pytz.utc))
#Snaps date to nearest day of week in list
def snap_to_dow(self,datetime,dows):
raise NotImplementedError("Snapping class must implement snap_to_dow")
#Snaps date to day of the month if possible
def snap_to_month_day(self,datetime,day):
raise NotImplementedError("Snapping class must implement snap_to_month_day")
#Snaps date to month weekday (Xth friday for example) in list
#Ordinal can either be an integer 1-5 or 'last'
def snap_to_month_weekday(self,datetime,dows,ordinal):
raise NotImplementedError("Snapping class must implement snap_to_month_day")
class SnapLogical(BaseSnap):
"""
Implements 'logical' (at least, for our needs) snapping
This will snap days of the week to the next closest day of week (could overflow month)
It will snap the month day to the closest month day within that month (if possible, will never overflow month)
"""
def snap_to_dow(self,datetime,dow):
#Dow should be in ISO8601 formart
localized = self._localized_datetime(datetime)
localized_weekday = localized.isoweekday()
distance = dow - localized_weekday
if distance < 0:
#Since its negative dont subtract
distance = 7 + distance
if distance != 0 and distance is not None:
#relativedelta provides us better accuracy
localized = localized + relativedelta(days=+distance)
return self._to_utc(localized)
def snap_to_month_day(self,datetime,day):
localized = self._localized_datetime(datetime)
month = monthrange(localized.year,localized.month)
#month[1] = days in month
#month[0] = weekday of first day of the month
if day < 1 or day > month[1]:
raise ValueError("Month day %s falls outside of range available for %s,%s (%s-%s)"
%(day,localized.month,localized.year,month[0],month[1])
)
distance = day - localized.day
#13 21 = -8, need to snap down 8 to the 13th
#21 13 = 8, need to snap up 8 to 21
#relativedelta provides us better accuracy
if distance != 0:
localized = localized + relativedelta(days=distance)
return self._to_utc(localized)
def snap_to_weekday_ordinal(self,datetime,dow,ordinal):
localized = self._localized_datetime(datetime)
if ordinal == 'last':
if dow not in DATEUTIL_DOWS:
raise ValueError("Invalid day of week %s, must be 1-7 (mon-sun)"%dow)
#We need to ensure that this is the 1st of the month
#Calling .replace here is not enough as it will not account for DST
#Remember we are using LOCALIZED times here
negative_delta = relativedelta(days=-localized.day+1)
localized = localized + negative_delta
#We're now at the first
localized = localized + relativedelta(months=+1)
#We're now at the first of the next month
#If the first day of the month is the day we want, we need 2 weeks back
if localized.isoweekday() == dow:
last_delta = relativedelta(weekday=DATEUTIL_DOWS[dow](-2))
else:
last_delta = relativedelta(weekday=DATEUTIL_DOWS[dow](-1))
#This will find the previous occurence of the dow
#Since we're at the first of the next month,
#this will be the last occurence in the previous month
localized = localized + last_delta
return self._to_utc(localized)
else:
try:
ordinal = int(ordinal)
except ValueError:
raise ValueError("Ordinal must either be an integer 1-5 or 'last'")
if ordinal < 1 or ordinal > 5:
raise ValueError("Ordinal must either be an integer 1-5 or 'last'")
#We need to ensure that this is the 1st of the month
#Calling .replace here is not enough as it will not account for DST
#Remember we are using LOCALIZED times here
negative_delta = relativedelta(days=-localized.day+1)
localized = localized + negative_delta
localized_dow = localized.isoweekday()
if localized_dow > dow:
#Set to monday
days = 8 - localized_dow
localized = localized + relativedelta(days=+days)
localized_dow = 1
modifier = (ordinal - 1) * 7 + (dow - localized_dow)
if modifier > 0:
localized = localized + relativedelta(days=modifier)
return self._to_utc(localized)
def snap_to_year_day(self,datetime,yearday):
localized = self._localized_datetime(datetime)
year = yearrange(localized.year)
if yearday < year[0] or yearday > year[1]:
raise ValueError("Year day %s falls outside of range available for %s (%s-%s)"
%(yearday,localized.year,year[0],year[1])
)
localized_yearday = localized.timetuple().tm_yday
distance = yearday - localized_yearday
# 200 201 = -1 needs to go down 1 to the 200th
# 201 200 = 1 needs to go up 1 to the 201st
if distance != 0:
localized = localized + relativedelta(days=distance)
return self._to_utc(localized)
| StarcoderdataPython |
116368 | <reponame>opennode/waldur-rijkscloud<filename>src/waldur_rijkscloud/views.py
from __future__ import unicode_literals
from waldur_core.structure import views as structure_views
from . import filters, executors, models, serializers
class ServiceViewSet(structure_views.BaseServiceViewSet):
queryset = models.RijkscloudService.objects.all()
serializer_class = serializers.ServiceSerializer
class ServiceProjectLinkViewSet(structure_views.BaseServiceProjectLinkViewSet):
queryset = models.RijkscloudServiceProjectLink.objects.all()
serializer_class = serializers.ServiceProjectLinkSerializer
filter_class = filters.ServiceProjectLinkFilter
class FlavorViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.Flavor.objects.all().order_by('cores', 'ram')
serializer_class = serializers.FlavorSerializer
lookup_field = 'uuid'
filter_class = filters.FlavorFilter
class VolumeViewSet(structure_views.ImportableResourceViewSet):
queryset = models.Volume.objects.all()
serializer_class = serializers.VolumeSerializer
filter_class = filters.VolumeFilter
create_executor = executors.VolumeCreateExecutor
pull_executor = executors.VolumePullExecutor
delete_executor = executors.VolumeDeleteExecutor
disabled_actions = ['update', 'partial_update']
importable_resources_backend_method = 'get_volumes_for_import'
importable_resources_serializer_class = serializers.VolumeImportableSerializer
import_resource_serializer_class = serializers.VolumeImportSerializer
class InstanceViewSet(structure_views.ImportableResourceViewSet):
queryset = models.Instance.objects.all()
serializer_class = serializers.InstanceSerializer
filter_class = filters.InstanceFilter
pull_executor = executors.InstancePullExecutor
create_executor = executors.InstanceCreateExecutor
delete_executor = executors.InstanceDeleteExecutor
disabled_actions = ['update', 'partial_update']
importable_resources_backend_method = 'get_instances_for_import'
importable_resources_serializer_class = serializers.InstanceImportableSerializer
import_resource_serializer_class = serializers.InstanceImportSerializer
class NetworkViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.Network.objects.all().order_by('settings')
serializer_class = serializers.NetworkSerializer
lookup_field = 'uuid'
filter_class = filters.NetworkFilter
class SubNetViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.SubNet.objects.all().order_by('settings')
serializer_class = serializers.SubNetSerializer
lookup_field = 'uuid'
filter_class = filters.SubNetFilter
class InternalIPViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.InternalIP.objects.all().order_by('settings', 'address')
serializer_class = serializers.InternalIPSerializer
lookup_field = 'uuid'
filter_class = filters.InternalIPFilter
class FloatingIPViewSet(structure_views.BaseServicePropertyViewSet):
queryset = models.FloatingIP.objects.all().order_by('settings', 'address')
serializer_class = serializers.FloatingIPSerializer
lookup_field = 'uuid'
filter_class = filters.FloatingIPFilter
| StarcoderdataPython |
1785350 | #!/usr/bin/env python
# Python 2.7.14
import argparse
import os
import pandas
import numpy
import matplotlib.pyplot
import matplotlib.dates
import datetime
fig_dir = 'fig'
table_dir = 'table'
class Pointing:
def __init__(self, data_path):
self.file_base, _ = os.path.splitext(os.path.basename(data_path))
self.data_raw = pandas.read_csv(data_path)
self.frequency = ''
self.use_array = ''
def read_data(self):
data = self.data_raw.copy()
if not data.offset.dtype == 'float64':
data = data[data.offset != 'ERR']
data.offset = data.offset.astype('float64')
data.hpbw = data.hpbw.astype('float64')
self.az = data[data.AZEL == 'AZ']
self.el = data[data.AZEL == 'EL']
if (len(self.az) == 0) or (len(self.el) == 0):
print 'data are insufficient: {}'.format(self.file_base)
quit()
self.get_frequency(data)
def get_frequency(self, data):
pos1 = float(data.pos1.iloc[0])
if 34. < pos1 < 36.:
f = '22GHz'
elif 19. < pos1 < 21.:
f = '43GHz'
elif 9. < pos1 < 11.:
f = '86GHz'
else:
f = '?'
if f != '43GHz':
print 'skip: frequency is {}'.format(f)
quit()
self.frequency = f
def add_params(self):
self.az['daz_all'] = self.az['qlookAutoDaz'] + self.az['manualDaz']
self.az['dd'] = self.az.daz_all - self.az.daz_all.iloc[-1]
self.az['offset2'] = self.az.offset + self.az.dd
self.el['del_all'] = self.el['qlookAutoDel'] + self.el['manualDel']
self.el['dd'] = self.el.del_all - self.el.del_all.iloc[-1]
self.el['offset2'] = self.el.offset + self.el.dd
self.az['SN1'] = self.az.IntegINT1 / self.az.rmsIntegInt1
self.el['SN1'] = self.el.IntegINT1 / self.el.rmsIntegInt1
self.az['SN2'] = self.az.IntegINT2 / self.az.rmsIntegInt2
self.el['SN2'] = self.el.IntegINT2 / self.el.rmsIntegInt2
self.az['SN3'] = self.az.IntegINT3 / self.az.rmsIntegInt3
self.el['SN3'] = self.el.IntegINT3 / self.el.rmsIntegInt3
def select_array(self):
array_list = list(self.az.ARRAY.drop_duplicates())
sn_sum = []
for a in array_list:
az_t = self.az[self.az.ARRAY == a]
el_t = self.el[self.el.ARRAY == a]
az_t = az_t.drop_duplicates(['DATE_OBS'], keep='last')
el_t = el_t.drop_duplicates(['DATE_OBS'], keep='last')
sn_sum.append(az_t.SN2.sum() + el_t.SN2.sum())
self.use_array = array_list[numpy.argmax(sn_sum)]
self.az2 = self.az[self.az.ARRAY == self.use_array]
self.el2 = self.el[self.el.ARRAY == self.use_array]
self.az2 = self.az2.drop_duplicates(['DATE_OBS'], keep='last')
self.el2 = self.el2.drop_duplicates(['DATE_OBS'], keep='last')
if (len(self.az2) == 0) or (len(self.el2) == 0):
print 'data are insufficient (2): {}'.format(self.file_base)
quit()
def calculate_offset_hpbw(self, scan):
scan_data = eval('self.{}2'.format(scan))
offset_mean = scan_data.offset2.mean()
hpbw_mean = scan_data.hpbw.mean()
if len(scan_data) == 1:
offset_std = 0.
hpbw_std = 0.
else:
offset_std = scan_data.offset2.std()
hpbw_std = scan_data.hpbw.std()
return offset_mean, offset_std, hpbw_mean, hpbw_std
def output_table(self):
pd = {}
offset_mean_az, offset_std_az, hpbw_mean_az, hpbw_std_az = self.calculate_offset_hpbw('az')
offset_mean_el, offset_std_el, hpbw_mean_el, hpbw_std_el = self.calculate_offset_hpbw('el')
pd['offset_mean_az'] = offset_mean_az
pd['offset_std_az'] = offset_std_az
pd['hpbw_mean_az'] = hpbw_mean_az
pd['hpbw_std_az'] = hpbw_std_az
pd['offset_mean_el'] = offset_mean_el
pd['offset_std_el'] = offset_std_el
pd['hpbw_mean_el'] = hpbw_mean_el
pd['hpbw_std_el'] = hpbw_std_el
data = pandas.concat([self.az2, self.el2])
pd['az'] = data.AZreal.mean()
pd['el'] = data.ELreal.mean()
pd['daz'] = self.az2.daz_all.iloc[-1]
pd['del'] = self.el2.del_all.iloc[-1]
pd['sn'] = numpy.mean([self.az2.SN2.iloc[-1], self.el2.SN2.iloc[-1]])
pd['temp'] = data.Temp.mean()
pd['ap'] = data.AirPress.mean()
pd['wv'] = data.WaterVapor.mean()
pd['ws'] = data.wind_sp.mean()
pd['ws_std'] = data.wind_sp.std()
pd['wd'] = data.wind_dir.mean()
pd['wd_std'] = data.wind_dir.std()
table_path = '{}/{}_params.txt'.format(table_dir, self.frequency)
fmt = '{offset_mean_az},{offset_std_az},{hpbw_mean_az},{hpbw_std_az},'
fmt += '{offset_mean_el},{offset_std_el},{hpbw_mean_el},{hpbw_std_el},'
fmt += '{az},{el},{daz},{del},{sn},'
fmt += '{temp},{ap},{wv},{ws},{ws_std},{wd},{wd_std}'
header = fmt.replace('{', '').replace('}', '')
if not os.path.exists(table_dir):
os.mkdir(table_dir)
if not os.path.exists(table_path):
with open(table_path, 'w') as f:
f.write(header + '\n')
with open(table_path, 'a') as f:
f.write(fmt.format(**pd) + '\n')
def plot_data(self):
matplotlib.rcParams['lines.linewidth'] = 1
matplotlib.rcParams['lines.marker'] = 'o'
matplotlib.rcParams['lines.markersize'] = 3
matplotlib.rcParams['font.family'] = 'Times New Roman'
matplotlib.rcParams['font.size'] = 12
matplotlib.rcParams['axes.grid'] = True
matplotlib.rcParams['grid.linestyle'] = ':'
matplotlib.rcParams['mathtext.fontset'] = 'cm'
fig = matplotlib.pyplot.figure(figsize=(10, 10))
ax1 = fig.add_subplot(321)
ax2 = fig.add_subplot(322)
ax3 = fig.add_subplot(323)
ax4 = fig.add_subplot(324)
az_tmp = self.az2
el_tmp = self.el2
ax1.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.offset2)
ax1.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.offset, ls='--')
ax1.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.dd)
ax2.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.offset2)
ax2.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.offset, ls='--')
ax2.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.dd)
ax3.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.hpbw)
ax4.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.hpbw)
ax5 = fig.add_subplot(6, 2, 9)
ax6 = fig.add_subplot(6, 2, 10)
ax5.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.SN2)
ax5.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.SN1)
ax5.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.SN3)
ax6.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.SN2, label='center')
ax6.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.SN1, label='pos1')
ax6.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.SN3, label='pos3')
ax6.legend(loc='lower right', ncol=3, fontsize=8)
if self.frequency == '22GHz':
min_hpbw = 50
max_hpbw = 100
elif self.frequency == '43GHz':
min_hpbw = 20
max_hpbw = 60
elif self.frequency == '86GHz':
min_hpbw = 10
max_hpbw = 30
else:
min_hpbw = 10
max_hpbw = 100
ax1.set_title('Azimuth scan', y=1.35)
ax1.set_ylabel("offset ($''$)")
ax1.set_ylim(-15, 15)
ax2.set_title('Elevation scan', y=1.35)
ax2.set_ylim(-15, 15)
ax3.set_ylabel("HPBW ($''$)")
ax3.set_ylim(min_hpbw, max_hpbw)
ax4.set_ylim(min_hpbw, max_hpbw)
ax5.set_ylabel('S/N')
object_name = az_tmp.OBJECT.iloc[0]
ax5.text(0, 0.8, object_name, transform=ax5.transAxes)
peak_az_id = az_tmp['peakTa*2'].idxmax()
peak_el_id = el_tmp['peakTa*2'].idxmax()
peak_az = az_tmp['peakTa*2'].loc[peak_az_id]
peak_el = el_tmp['peakTa*2'].loc[peak_el_id]
peak = max([peak_az, peak_el])
ax5.text(0, 0.65, 'max $T_\mathrm{A}^*$:' + '{:.1f} K'.format(peak), transform=ax5.transAxes)
if numpy.argmax([peak_az, peak_el]) == 0:
ax5.plot(pandas.to_datetime(az_tmp.DATE_OBS.loc[peak_az_id]), az_tmp.SN2.loc[peak_az_id], c='r')
elif numpy.argmax([peak_az, peak_el]) == 1:
ax6.plot(pandas.to_datetime(el_tmp.DATE_OBS.loc[peak_el_id]), el_tmp.SN2.loc[peak_el_id], c='r')
ax7 = fig.add_subplot(6, 2, 11)
ax7.plot(pandas.to_datetime(az_tmp.DATE_OBS), az_tmp.wind_sp)
ax7.set_ylabel('wind speed (km s$^{-1}$)')
ax7.set_ylim(0, 10)
ax8 = fig.add_subplot(6, 2, 12)
ax8.plot(pandas.to_datetime(el_tmp.DATE_OBS), el_tmp.wind_sp)
ax8.set_ylim(0, 10)
day = self.az.DATE_OBS.iloc[0].split()[0]
fig.suptitle(day)
dt_az = (pandas.to_datetime(az_tmp.DATE_OBS.iloc[-1]) - pandas.to_datetime(az_tmp.DATE_OBS.iloc[0])).seconds
dt_el = (pandas.to_datetime(el_tmp.DATE_OBS.iloc[-1]) - pandas.to_datetime(el_tmp.DATE_OBS.iloc[0])).seconds
if (dt_az == 0) or (dt_el == 0):
xlocater = matplotlib.dates.MinuteLocator(interval=1)
date_fmt = '%H:%M:%S'
xlim1_az = pandas.to_datetime(az_tmp.DATE_OBS.iloc[0]) - datetime.timedelta(minutes=1)
xlim2_az = pandas.to_datetime(az_tmp.DATE_OBS.iloc[-1]) + datetime.timedelta(minutes=1)
xlim1_el = pandas.to_datetime(el_tmp.DATE_OBS.iloc[0]) - datetime.timedelta(minutes=1)
xlim2_el = pandas.to_datetime(el_tmp.DATE_OBS.iloc[-1]) + datetime.timedelta(minutes=1)
for i in [1, 3, 5, 7]:
eval('ax{}.set_xlim(xlim1_az, xlim2_az)'.format(i))
eval('ax{}.set_xlim(xlim1_el, xlim2_el)'.format(i+1))
elif dt_az > 1800:
xlocater = matplotlib.dates.MinuteLocator(interval=10)
date_fmt = '%H:%M'
elif dt_az > 900:
xlocater = matplotlib.dates.MinuteLocator(interval=5)
date_fmt = '%H:%M'
elif dt_az > 120:
xlocater = matplotlib.dates.MinuteLocator(interval=2)
date_fmt = '%H:%M'
else:
xlocater = matplotlib.dates.MinuteLocator(interval=2)
date_fmt = '%H:%M:%S'
for i in range(1, 9):
eval('ax{}.xaxis.set_major_locator(xlocater)'.format(i))
eval('ax{}.xaxis.set_major_formatter(matplotlib.dates.DateFormatter(date_fmt))'.format(i))
fig.text(0.05, 0.98, 'Frequency: {}'.format(self.frequency))
fig.text(0.05, 0.96, 'Array: {}'.format(self.use_array))
fig.text(0.8, 0.98, '(dAZ, dEL) = ({:+.2f}, {:+.2f})'.format(self.az2.daz_all.iloc[-1], self.el2.del_all.iloc[-1]))
ddaz2, time_ddaz2 = self.get_dd('az')
ddel2, time_ddel2 = self.get_dd('el')
text_yoffset_az = ax1.get_ylim()[1]
for t in range(len(ddaz2)):
ax1.text(pandas.Timestamp(time_ddaz2.iloc[t]), text_yoffset_az, '{:+.1f}'.format(ddaz2[t]), horizontalalignment='center', fontsize=9)
ax1.axvline(pandas.Timestamp(time_ddaz2.iloc[t]), c='k', marker='')
text_yoffset_el = ax2.get_ylim()[1]
for t in range(len(ddel2)):
ax2.text(pandas.Timestamp(time_ddel2.iloc[t]), text_yoffset_el, '{:+.1f}'.format(ddel2[t]), horizontalalignment='center', fontsize=9)
ax2.axvline(pandas.Timestamp(time_ddel2.iloc[t]), c='k', marker='')
ax9 = fig.add_axes([0.12, 0.89, 0.35, 0.07])
ax10 = fig.add_axes([0.54, 0.89, 0.35, 0.07])
self.plot_table('az', ax9)
self.plot_table('el', ax10)
self.fig = fig
def save_figure(self):
if not os.path.exists(fig_dir):
os.mkdir(fig_dir)
if not os.path.exists(os.path.join(fig_dir, self.frequency)):
os.mkdir(os.path.join(fig_dir, self.frequency))
print 'saved: {}'.format(self.file_base)
save_file = '{}/{}/{}.png'.format(fig_dir, self.frequency, self.file_base)
self.fig.savefig(save_file)
self.fig.clf()
@staticmethod
def show_figure():
matplotlib.pyplot.show()
def show_figure_gui(self):
self.fig.show()
def get_dd(self, scan):
scan_data = eval('self.{}2'.format(scan))
dd = numpy.array(scan_data['d{}_all'.format(scan)])[1:] \
- numpy.array(scan_data['d{}_all'.format(scan)])[:-1]
dd = numpy.insert(dd, 0, 0.)
dd2 = dd[numpy.where(dd != 0.)[0]]
time_dd2 = scan_data['DATE_OBS'].iloc[numpy.where(dd != 0.)[0]]
return dd2, time_dd2
def plot_table(self, scan, ax):
offset_mean, offset_std, hpbw_mean, hpbw_std = self.calculate_offset_hpbw(scan)
ax.patch.set_alpha(0)
ax.axis('off')
ax.axhline(0.1, c='k', marker='', linewidth=0.4)
ax.axhline(0.4, c='k', marker='', linewidth=0.4)
ax.axhline(0.7, c='k', marker='', linewidth=0.4)
ax.axvline(0.2, ymin=0.1, c='k', marker='', linewidth=0.4)
ax.axvline(0.5, ymin=0.1, c='k', marker='', linewidth=0.4)
ax.text(0.05, 0.45, 'offset')
ax.text(0.05, 0.15, 'HPBW')
ax.text(0.28, 0.75, 'Average')
ax.text(0.55, 0.75, 'Standard Deviation')
ax.text(0.3, 0.45, "{:+.1f}$''$".format(offset_mean))
ax.text(0.7, 0.45, "{:.1f}$''$".format(offset_std))
ax.text(0.3, 0.15, "{:+.1f}$''$".format(hpbw_mean))
ax.text(0.7, 0.15, "{:.1f}$''$".format(hpbw_std))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('file_path')
args = parser.parse_args()
qlp = Pointing(args.file_path)
qlp.read_data()
qlp.add_params()
qlp.select_array()
# qlp.output_table()
qlp.plot_data()
# qlp.save_figure()
qlp.show_figure()
| StarcoderdataPython |
189850 | <filename>setup.py
import setuptools
# read the contents of the README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='metcli',
version='0.1.1',
author='<NAME>',
description='A command line interface for Met Éireann.',
long_description=long_description,
long_description_content_type='text/markdown',
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.8',
'Topic :: Utilities'
],
keywords='weather utility cli',
url='https://github.com/ozzywalsh/metcli',
license='MIT',
packages=['metcli'],
install_requires=[
'requests',
'termcolor'
],
setup_requires=['pytest-runner'],
tests_require=['pytest'],
entry_points={
'console_scripts': ['metcli=metcli.command_line:main']
},
include_package_data=True,
zip_safe=False
)
| StarcoderdataPython |
1714454 | <filename>chat/face_detect.py
from __future__ import print_function
from google.cloud import vision
uri_base = 'gs://cloud-vision-codelab'
pics = ('face_surprise.jpg', 'face_no_surprise.png')
client = vision.ImageAnnotatorClient()
image = vision.Image()
for pic in pics:
image.source.image_uri = '%s/%s' % (uri_base, pic)
response = client.face_detection(image=image)
print('=' * 30)
print('File:', pic)
for face in response.face_annotations:
likelihood = vision.Likelihood(face.surprise_likelihood)
vertices = ['(%s,%s)' % (v.x, v.y) for v in face.bounding_poly.vertices]
print('Face surprised:', likelihood.name)
print('Face bounds:', ",".join(vertices))
| StarcoderdataPython |
3374448 | #!/usr/bin/python
#### Copyright (c) 2015, swpm, <NAME>
#### All rights reserved.
#### See the accompanying LICENSE file for terms of use
from SwpmGraph import GraphWorldLoader
import kivy
kivy.require("1.9.1")
from kivy.app import App
from kivy.uix.scatterlayout import ScatterLayout
from kivy.uix.relativelayout import RelativeLayout
from kivy.input.shape import ShapeRect
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.scrollview import ScrollView
from kivy.uix.widget import Widget
from kivy.graphics import Color, Ellipse, Line
from kivy.input.motionevent import MotionEvent
def GetGraph():
loader = GraphWorldLoader()
world = loader.load_map()
map_data = world.cities
x = []
y = []
lines = []
for city in map_data:
x.append(city.location[0])
y.append(city.location[1])
for link in city.links:
if link.lCity == city.idx:
for otherCity in map_data:
if otherCity.idx == link.rCity:
line_x = []
line_y = []
line_x.append(city.location[0])
line_x.append(otherCity.location[0])
line_y.append(city.location[1])
line_y.append(otherCity.location[1])
line = [line_x, line_y]
lines.append(line)
else:
for otherCity in map_data:
if otherCity.idx == link.lCity:
line_x = []
line_y = []
line_x.append(city.location[0])
line_x.append(otherCity.location[0])
line_y.append(city.location[1])
line_y.append(otherCity.location[1])
line = [line_x, line_y]
lines.append(line)
return map_data, lines
class MyWidget(ScatterLayout):
def __init__(self, **kwargs):
super(MyWidget, self).__init__(**kwargs)
self.update_canvas()
self.do_rotation = False
def update_canvas(self, **kwargs):
self.canvas.clear()
with self.canvas:
mapData = GetGraph()[0]
lineData = GetGraph()[1]
for line in lineData:
Color(0, 1, 0)
Line(points=[int(line[0][0]), int(line[1][0])-3382, int(line[0][1]), int(line[1][1])-3382], width=1)
for city in mapData:
Color(1, 1, 0)
Line(circle=(city.location[0], city.location[1] - 3382, 1), width=1)
self.scale = 1.0
def on_touch_down(self, touch):
if touch.is_mouse_scrolling:
if touch.button == 'scrolldown':
self.scale += 0.05
else:
self.scale -= 0.05
touch.push()
touch.apply_transform_2d(self.to_local)
ret = super(ScatterLayout, self).on_touch_down(touch)
touch.pop()
return ret
class MyApp(App):
def build(self):
base_view = RelativeLayout()
graph_widget = MyWidget()
base_view.add_widget(graph_widget)
return base_view
if __name__ == "__main__":
MyApp().run()
| StarcoderdataPython |
170245 | <filename>source/dicom/test/run_tests.py
# run_tests.py
"""Call all the unit test files in the test directory starting with 'test'"""
# Copyright (c) 2008-2012 <NAME>
# This file is part of pydicom, released under a modified MIT license.
# See the file license.txt included with this distribution, also
# available at http://pydicom.googlecode.com
import os
import os.path
import sys
import unittest
# Get the directory test_dir where the test scripts are
from pkg_resources import Requirement, resource_filename
test_dir = resource_filename(Requirement.parse("pydicom"), "dicom/test")
class MyTestLoader(object):
def loadTestsFromNames(self, *args):
# Simplest to change to directory where test_xxx.py files are
save_dir = os.getcwd()
if test_dir:
os.chdir(test_dir)
filenames = os.listdir(".")
module_names = [f[:-3] for f in filenames
if f.startswith("test") and f.endswith(".py")]
# Load all the tests
suite = unittest.TestSuite()
for module_name in module_names:
module_dotted_name = "dicom.test." + module_name
test = unittest.defaultTestLoader.loadTestsFromName(
module_dotted_name)
suite.addTest(test)
os.chdir(save_dir)
return suite
if __name__ == "__main__":
# Get the tests -- in format used by Distribute library
# to run under 'python setup.py test'
suite = MyTestLoader().loadTestsFromNames()
# Run the tests
verbosity = 1
args = sys.argv
if len(args) > 1 and (args[1] == "-v" or args[1] == "--verbose"):
verbosity = 2
runner = unittest.TextTestRunner(verbosity=verbosity)
# Switch directories to test DICOM files, used by many of the tests
save_dir = os.getcwd()
testfiles_dir = resource_filename(Requirement.parse("pydicom"),
"dicom/testfiles")
os.chdir(testfiles_dir)
runner.run(suite)
os.chdir(save_dir)
| StarcoderdataPython |
4820215 | """ Utility for cating files. """
from fishnet.cmds.base import UnixCommand
from fishnet.config import config
def cat_local_op(channel, shell_glob):
""" Perform a cat on the local machine. """
import glob
import subprocess
filenames = list(glob.glob(shell_glob))
p = subprocess.Popen(
["/bin/cat"] + filenames,
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
stdout, stderr = p.communicate()
for line in stdout.splitlines():
channel.send("%s" % (line,))
for line in stderr.splitlines():
channel.send("[ERR] %s" % (line,))
class Cat(UnixCommand):
""" Cat files. """
args = ('shell_glob',)
local_op = staticmethod(cat_local_op)
Cat.try_main(__name__, config.execnet_group)
| StarcoderdataPython |
1777509 | <filename>src/plotFunctions.py
import matplotlib.pyplot as plt
import constants
import numpy as np
from statistics import mean
from scipy.signal import savgol_filter
def plotNodeCharacteristics(nodeList):
FKs, BKs, mks= [], [], [];
index = np.array(range(constants.NODES))
for node in nodeList:
FKs.append(node.FK / pow(10,10));
BKs.append(node.BK / pow(10,10));
mks.append(node.m0);
plot = plt.figure();
plt.title("Node Characteristics");
plt.bar(index - 0.2, BKs, width = 0.2, color = 'b', align = 'center', label = "BKs");
plt.bar(index, FKs, width = 0.2, color = 'r', align = 'center', label = "FKs");
plt.bar(index + 0.2, mks, width = 0.2, color = 'g', align = 'center', label = "mks");
plt.xticks(range(constants.NODES));
plt.xlabel("Nodes");
plt.ylabel("Red and Blue bars in e10")
plt.legend()
plt.show()
return plot
def plotUsersOnNode(array):
plot = plt.figure();
plt.title("Number of users on each node over time");
plt.xlabel("SLA Iterations");
plt.ylabel("Number of User");
for i in range(len(array)):
thisLine = array[i];
plt.plot(range(len(thisLine)), thisLine, label = str(i));
plt.legend();
plt.show();
return plot;
def plotUserProbabilities(array):
toplot = array.transpose();
plot = plt.figure()
plt.title("The probability of choosing each node for user 0");
for i in range(len(toplot)):
thisLine = toplot[i];
plt.plot(range(len(thisLine)), thisLine, label = str(i))
plt.legend();
plt.xlabel("SLA Iterations");
plt.ylabel("Porbability");
plt.show();
return plot;
def plotUserReward(array):
plot = plt.figure();
plt.plot(range(len(array)), array)
plt.title("Reward over time for user 0")
plt.xlabel("SLA Iterations");
plt.ylabel("Normalized Reward");
plt.show();
return plot;
def plotNodeRewards(array):
toplot = array.transpose();
plot = plt.figure()
plt.title("The reward given by each node");
for i in range(len(toplot)):
thisLine = toplot[i];
plt.plot(range(len(thisLine)), thisLine, label = str(i))
plt.legend();
plt.xlabel("SLA Iterations");
plt.ylabel("Normalized Reward");
plt.show();
return plot;
def plotAvgProbabilites(array):
plot = plt.figure();
plt.title("Average probability of node choosing");
plt.xlabel("SLA Iterations");
plt.ylabel("Avg Probability");
for i in range(len(array)):
thisLine = array[i];
plt.plot(range(len(thisLine)), thisLine, label = str(i));
plt.legend();
plt.show();
return plot;
def plotNodeRewardCharacteristic(array, element, trans = False):
if trans:
toplot = array;
else:
toplot = array.transpose();
plot = plt.figure()
plt.title("The " + element + " of each node");
for i in range(len(toplot)):
thisLine = toplot[i];
plt.plot(range(len(thisLine)), thisLine, label = str(i))
plt.legend();
plt.xlabel("SLA Iterations");
plt.ylabel(element);
plt.show();
return plot;
def plotBarCharacteristics(array, element):
toplot = array.transpose();
plot = plt.figure()
plt.title("The avarage " + element + " of each node");
plotMaterial = [];
for i in range(constants.NODES):
plotMaterial.append(mean(toplot[i]));
plt.bar(range(constants.NODES), plotMaterial)
plt.xticks(range(constants.NODES))
plt.xlabel("Nodes");
plt.ylabel(element);
plt.show();
return plot;
def plotAvgReward(array):
plot = plt.figure();
plt.title("Average Reward for users");
plt.xlabel("SLA Iterations");
plt.ylabel("Avg Reward");
plt.plot(range(len(array)), array)
plt.show();
return plot;
def pltStep2dlist(list2d, element):
plot = plt.figure();
plt.title(element + " of nodes over time")
plt.xlabel("Timeslot");
plt.ylabel(element);
for i in range(len(list2d)):
plt.step(range(len(list2d[i])),list2d[i], label = str(i));
plt.legend();
plt.show();
return plot
def plotReps(list2d):
plot = plt.figure();
plt.title("Reputation of nodes over time")
plt.xlabel("Timeslot");
plt.ylabel("Reputation");
for i in range(len(list2d)):
plt.plot(range(len(list2d[i])),list2d[i], label = str(i));
plt.legend();
plt.show();
return plot
def plotMonteCarlo(toPlot, i):
plot = plt.figure();
plt.title("Average Reward using Monte Carlo Smoothing after " + str(i) + " executions");
plt.xlabel("SLA Iterations");
plt.ylabel("Avg Reward");
plt.plot(range(len(toPlot)), toPlot);
plt.show();
return plot;
def plotMonteCarloTotal(toPlot, i):
plot = plt.figure();
plt.title("Total Reward using Monte Carlo Smoothing after " + str(i) + " timeslots");
plt.xlabel("SLA Iterations");
plt.ylabel("Total Reward");
plt.plot(range(len(toPlot)), toPlot);
plt.show();
return plot;
def barAvgRep(list2d):
plot = plt.figure();
values = [];
plt.title("Average Reputation of Nodes")
plt.xlabel("Node");
plt.xticks(range(constants.NODES))
plt.ylabel("Reputation");
for i in range(len(list2d)):
values.append(mean(list2d[i]));
plt.bar(range(constants.NODES), values);
plt.show();
return plot;
def plotAggrRewards(array):
plot = plt.figure();
plt.title("Total Reward given over Time");
plt.xlabel("Timeslot");
plt.ylabel("Reward");
yhat = savgol_filter(array, 51, 3)
plt.plot(range(len(array)), array);
plt.plot(range(len(array)), yhat, color = 'red')
return plot; | StarcoderdataPython |
134439 | from unittest import TestCase
from haleasy import HALEasy
import responses
class TestHaleasyHaltalk(TestCase):
haltalk_root = '''{
"_links": {
"self": {
"href":"/"
},
"curies": [
{
"name": "ht",
"href": "http://haltalk.herokuapp.com/rels/{rel}",
"templated": true
}
],
"ht:users": {
"href":"/users"
},
"ht:signup": {
"href":"/signup"
},
"ht:me": {
"href": "/users/{name}",
"templated":true
},
"ht:latest-posts": {
"href":"/posts/latest"
}
},
"welcome": "Welcome to a haltalk server.",
"hint_1": "You need an account to post stuff..",
"hint_2": "Create one by POSTing via the ht:signup link..",
"hint_3": "Click the orange buttons on the right to make POST requests..",
"hint_4": "Click the green button to follow a link with a GET request..",
"hint_5": "Click the book icon to read docs for the link relation."
}'''
haltalk_get_user_aaa = '''{
"_links": {
"self": {
"href": "/users/aaa"
},
"curies": [
{
"name": "ht",
"href": "http://haltalk.herokuapp.com/rels/{rel}",
"templated": true
},
{
"name": "bla",
"href": "http://haltalk.herokuapp.com/rels/{rel}",
"templated": true
}
],
"ht:posts": {
"href": "/users/aaa/posts"
}
},
"username": "aaa",
"bio": null,
"real_name": null
}'''
def setUp(self):
responses.reset()
responses.add(responses.GET, 'http://haltalk.herokuapp.com.test_domain/',
body=self.haltalk_root, status=200,
content_type='application/json')
responses.add(responses.POST, 'http://haltalk.herokuapp.com.test_domain/signup',
body='', status=201,
adding_headers={'Location': 'http://haltalk.herokuapp.com.test_domain/users/aaa'},
content_type='application/json')
responses.add(responses.GET, 'http://haltalk.herokuapp.com.test_domain/users/aaa',
body=self.haltalk_get_user_aaa, status=200,
content_type='application/json')
@responses.activate
def test_haltalk_root(self):
h = HALEasy('http://haltalk.herokuapp.com.test_domain')
self.assertEqual(h.link(rel=u'self')['href'], u'/')
self.assertEqual(h.link(rel=u'http://haltalk.herokuapp.com/rels/users')['href'], u'/users')
self.assertEqual(h.link(rel=u'http://haltalk.herokuapp.com/rels/me')['href'], u'/users/{name}')
self.assertEqual(h.link(rel=u'http://haltalk.herokuapp.com/rels/me')['templated'], True)
@responses.activate
def test_haltalk_root_with_curies(self):
h = HALEasy('http://haltalk.herokuapp.com.test_domain')
self.assertEqual(h.link(rel=u'self')['href'], u'/')
self.assertEqual(h.link(rel=u'ht:users')['href'], u'/users')
self.assertEqual(h.link(rel=u'ht:me')['href'], u'/users/{name}')
self.assertEqual(h.link(rel=u'ht:me')['templated'], True)
@responses.activate
def test_haltalk_create_user(self):
h = HALEasy('http://haltalk.herokuapp.com.test_domain')
user = h.link(rel='ht:signup').follow(method='POST', data={'username': 'aaa', 'password': '<PASSWORD>'})
self.assertEqual(user['username'], 'aaa')
@responses.activate
def test_haltalk_get_me_aaa(self):
h = HALEasy('http://haltalk.herokuapp.com.test_domain')
user = h.link(rel='ht:me').follow(name='aaa')
self.assertEqual(user['username'], 'aaa') | StarcoderdataPython |
3370936 | <reponame>Mr-TelegramBot/python-tdlib
from ..factory import Type
class richTextUrl(Type):
text = None # type: "RichText"
url = None # type: "string"
| StarcoderdataPython |
93373 | <reponame>image72/browserscope
#!/usr/bin/python2.4
#
# Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the 'License')
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an 'AS IS' BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Benchmark Tests Definitions."""
import logging
from categories import test_set_base
from categories import all_test_sets
class SummaryTest(test_set_base.TestBase):
def __init__(self, category, category_name, summary_doc):
test_set_base.TestBase.__init__(
self,
key=category,
name=category_name,
url=None,
doc=summary_doc,
min_value=0,
max_value=0)
_TESTS = []
for test_set in all_test_sets.GetVisibleTestSets():
_TESTS.append(SummaryTest(
test_set.category, test_set.category_name, test_set.summary_doc))
class SummaryTestSet(test_set_base.TestSet):
def GetTestScoreAndDisplayValue(self, test_key, raw_scores):
"""Get a normalized score (0 to 100) and a value to output to the display.
Args:
test_key: a key for a test_set test.
raw_scores: a dict of raw_scores indexed by test keys.
Returns:
score, display_value
# score is from 0 to 100.
# display_value is the text for the cell.
"""
score = raw_scores[test_key]
return score, score
def GetRowScoreAndDisplayValue(self, results):
"""Get the overall score for this row of results data.
Args:
results: A dictionary that looks like:
{
'testkey1': {'score': 1-10, 'median': median, 'display': 'celltext'},
'testkey2': {'score': 1-10, 'median': median, 'display': 'celltext'},
etc...
}
Returns:
A tuple of (score, display)
Where score is a value between 1-100.
And display is the text for the cell.
"""
logging.info('summary getrowscore results: %s' % results)
if not results.has_key('score') or results['score']['median'] is None:
score = 0
else:
score = results['score']['median']
return score, score
TEST_SET = SummaryTestSet(
category='summary',
category_name='Summary',
summary_doc='Summary of all the test categories.',
tests=_TESTS,
test_page=''
)
| StarcoderdataPython |
20942 | <reponame>willogy-team/insights--tensorflow<gh_stars>0
import os
import argparse
import numpy as np
import tensorflow as tf
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing.image import load_img, img_to_array
import matplotlib.pyplot as plt
from visualizations.manual_plot_by_matplotlib import plot_filters_of_a_layer
from visualizations.manual_plot_by_matplotlib import plot_feature_maps_of_a_layer, plot_feature_maps_of_multiple_layers
from visualizations.automatic_plot_by_tf_keras_vis import plot_activation_maximization_of_a_layer
from visualizations.automatic_plot_by_tf_keras_vis import plot_vanilla_saliency_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_smoothgrad_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_gradcam_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_gradcam_plusplus_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_scorecam_of_a_model
from visualizations.automatic_plot_by_tf_keras_vis import plot_faster_scorecam_of_a_model
ap = argparse.ArgumentParser()
ap.add_argument("-trd", "--train_dir", required=True, help="Path to dataset train directory")
ap.add_argument("-mdp", "--model_path", required=True, help="Path to the folder for saving checkpoints")
args = vars(ap.parse_args())
def create_model():
model = tf.keras.Sequential([
tf.keras.layers.Conv2D(8, 7, activation='relu'),
tf.keras.layers.Conv2D(8, 5, activation='relu'),
tf.keras.layers.Conv2D(8, 3, activation='relu'),
tf.keras.layers.Flatten(input_shape=(32, 32, 3)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(3, activation='softmax')
])
input_shape = (None, 128, 128, 3)
model.build(input_shape)
model.compile(loss='categorical_crossentropy', optimizer=Adam(learning_rate=1e-4), metrics=['accuracy'])
return model
model = create_model()
checkpoint_path = os.path.join(args["model_path"], 'models')
model.load_weights(checkpoint_path)
for idx, layer in enumerate(model.layers):
print('[*] layer: ', layer)
if 'conv' not in layer.name:
print('No')
continue
filters_weights, biases_weights = layer.get_weights()
print('[**] id: {}, layer.name: {}, filters_weights.shape: {}, biases_weights.shape: {}'.format(idx, layer.name, filters_weights.shape, biases_weights.shape))
print('[**] layer.output.shape: {}'.format(layer.output.shape))
filters_max, filters_min = filters_weights.max(), filters_weights.min()
filters_weights = (filters_weights - filters_min)/(filters_max - filters_min)
print('[**] filters_weights: ', filters_weights)
plot_filters_of_a_layer(filters_weights, 3)
# === Output feature maps from a single layer ===
# A PIL object
img = load_img(os.path.join(args["train_dir"], 'n02085620-Chihuahua', 'n02085620_1558.jpg'), target_size=(128, 128))
# Convert to numpy array
img = img_to_array(img)
img = np.expand_dims(img, axis=0)
# img = model.preprocess_input(img)
img = img/255
model_1 = Model(inputs=model.inputs, outputs=model.layers[0].output)
feature_maps_1 = model_1.predict(img)
print('[*] feature_maps_1.shape: ', feature_maps_1.shape)
plot_feature_maps_of_a_layer(feature_maps_1)
# === Output feature maps from multiple layers ===
list_of_outputs = [model.layers[idx].output for idx in range(3)]
model_2 = Model(inputs=model.inputs, outputs=list_of_outputs)
model_2.summary()
feature_maps_2 = model_2.predict(img)
for feature_map in feature_maps_2:
print('[*] feature_map.shape: ', feature_map.shape)
plot_feature_maps_of_multiple_layers(feature_maps_2)
# === Output activation maximization from a single layer ===
plot_activation_maximization_of_a_layer(model, 2)
# === GradCam++ from a single layer ===
# plot_gradcam_plusplus_of_a_layer(model, 2)
# === Attentions ===
image_titles = ['Chihuahua', 'Japanese_spaniel', 'Maltese_dog']
img1 = load_img(os.path.join(args["train_dir"], 'n02085620-Chihuahua', 'n02085620_1558.jpg'), target_size=(128, 128))
img2 = load_img(os.path.join(args["train_dir"], 'n02085782-Japanese_spaniel', 'n02085782_2874.jpg'), target_size=(128, 128))
img3 = load_img(os.path.join(args["train_dir"], 'n02085936-Maltese_dog', 'n02085936_4245.jpg'), target_size=(128, 128))
img1 = np.asarray(img1)
img2 = np.asarray(img2)
img3 = np.asarray(img3)
images = np.asarray([img1, img2, img3])
X = images/255
## Vanilla saliency
print('[*] Vanilla saliency')
plot_vanilla_saliency_of_a_model(model, X, image_titles)
## SmoothGrad
print('[*] SmoothGrad')
plot_smoothgrad_of_a_model(model, X, image_titles)
## GradCAM
print('[*] GradCAM')
plot_gradcam_of_a_model(model, X, image_titles, images)
## GradCAM++
print('[*] GradCAM++')
plot_gradcam_plusplus_of_a_model(model, X, image_titles, images)
## ScoreCAM
print('[*] ScoreCam')
plot_scorecam_of_a_model(model, X, image_titles, images)
## Faster-ScoreCAM
print('[*] Faster-ScoreCAM')
plot_faster_scorecam_of_a_model(model, X, image_titles, images) | StarcoderdataPython |
3205202 | <filename>dilated_encoder.py
import tensorflow as tf
import numpy as np
from layers import *
from BN_layers import *
class Dilated_Block(object):
def __init__(self, prefix, is_training, filter_width, conv_in_channels, conv_out_channels, skip_channels, dilation, clust_size = None, use_skip = True):
self.use_dense = True
self.use_dropout = False
self.use_skip = use_skip
self.glu = True
self.clust_size = clust_size
self.x_filter = BN_Conv("%s_x_filter" %(prefix), is_training, filter_width, conv_in_channels, conv_out_channels, dilation = dilation)
if self.glu:
self.x_gate = BN_Conv("%s_x_gate" %(prefix), is_training, filter_width, conv_in_channels, conv_out_channels, dilation = dilation)
self.dense = BN_Conv_1x1("%s_dense" %(prefix), is_training, conv_out_channels, conv_out_channels)
if self.use_skip:
self.skip = BN_Conv_1x1("%s_skip" %(prefix), is_training, conv_out_channels, skip_channels)
def activated_on(self, x):
x_filter = self.x_filter.activated_on(x)
if self.glu:
x_gate = self.x_gate.activated_on(x)
if self.glu:
out = x_filter * tf.sigmoid(x_gate)
else:
out = tf.nn.relu(x_filter)
dense = self.dense.activated_on(out)
if self.use_skip:
skip = self.skip.activated_on(out)
else:
skip = None
return x + dense, skip
class Dilated_Encoder(object):
def __init__(self, name, is_training, batch_size, max_seq_len, channels, discrete_dims = 22, embedding_size = 32, do_embed = True, use_skip = False):
self.batch_size = batch_size
self.var_scope = name
self.max_seq_len = max_seq_len
self.is_training = is_training
self.positional_encoding = True
self.embedding_size = embedding_size
self.discrete_dims = discrete_dims
self.position_embedding_size = self.discrete_dims
self.do_embed = do_embed
self.use_skip = use_skip
self.residual_channels = channels
self.dilation_channels = channels
self.skip_channels = channels
self.filter_width = 3
self.dilations = [1, 3, 9, 27]
self.model_output_dim = self.skip_channels if self.use_skip else self.residual_channels
self.block_class = Dilated_Block
self.vars = self.create_variables()
def create_variables(self):
var = {}
with tf.variable_scope(self.var_scope):
with tf.variable_scope("wavenet_encoder"):
if self.do_embed:
initial_channels = self.embedding_size
var["seq_embed"] = Conv_1x1("seq_embed", self.discrete_dims, self.embedding_size)
else:
initial_channels = self.discrete_dims
if self.positional_encoding:
var["position_encoder"] = tf.get_variable("enc_position_encoder", [1, self.max_seq_len, self.position_embedding_size], tf.float32, tf.random_normal_initializer(0.0, 0.05))
var["position_1x1"] = Conv_1x1("pos_embed", self.position_embedding_size, initial_channels)
var["input_conv"] = BN_Conv("input_conv", self.is_training, 3, initial_channels, self.residual_channels, dilation = 1)
with tf.variable_scope("dilated_convolutions"):
var["dilated_convolutions"] = []
for (layer_index, dilation) in enumerate(self.dilations):
next_layer = self.block_class("encoding_wavenet_%i" %(layer_index), self.is_training, self.filter_width, self.residual_channels, self.dilation_channels, self.skip_channels, dilation = dilation, use_skip = self.use_skip)
var["dilated_convolutions"].append(next_layer)
return var
def run_conv(self, batch):
skip_outputs = []
if self.do_embed:
embedded_batch = self.vars["seq_embed"].activated_on(batch)
else:
embedded_batch = batch
if self.positional_encoding:
embedded_batch += self.vars["position_1x1"].activated_on(self.vars["position_encoder"])
cur_act = self.vars["input_conv"].activated_on(embedded_batch)
for layer in self.vars["dilated_convolutions"]:
cur_act, skip = layer.activated_on(cur_act)
skip_outputs.append(skip)
if self.use_skip:
return sum(skip_outputs), cur_act
else:
return None, cur_act
def activated_on(self, batch):
if self.use_skip:
net_out, _ = self.run_conv(batch)
else:
_, net_out = self.run_conv(batch)
return net_out
| StarcoderdataPython |
185068 | # Generated by Django 2.1.3 on 2018-12-04 13:36
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('openbook_follows', '0007_remove_follow_list'),
('openbook_lists', '0002_auto_20181023_1331'),
]
operations = [
migrations.AddField(
model_name='list',
name='follows',
field=models.ManyToManyField(related_name='lists', to='openbook_follows.Follow'),
),
]
| StarcoderdataPython |
1707056 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Standard Python imports
#
import sys
import platform
import os
import json
import pprint
import argparse
import datetime
# Use local Python modules if requested
#
if "--local" in sys.argv:
if platform.system() == "Windows":
sys.path.insert(0, "D:\\keep\\packages\\git\\PyMISP")
else:
sys.path.insert(0, "~/keep/packages/git/PyMISP")
# Access to MISP servers
from pymisp import PyMISP
# Private settings for access to the chosen MISP server
from settings import url, key, ssl
# Modules directly associated with this application
#
import caching
import misp
import analysis
import heatmaps
import scorecards
import utility
# A helper for parsing date arguments on the command-line
#
def validate_date(input_string):
try:
return datetime.datetime.strptime(input_string, "%Y-%m-%d")
except ValueError:
error = "Not a valid date: '{0}'.".format(input_string)
raise argparse.ArgumentTypeError(error)
#
# Main program
#
if __name__ == "__main__":
# Prepare a pretty printer for debug purposes
pp = pprint.PrettyPrinter(indent=4)
# Configure access to the MISP server
misp_server = PyMISP(url, key, ssl)
# Process command-line arguments
#
epoch = datetime.datetime.utcfromtimestamp(0)
parser = argparse.ArgumentParser(description="With no arguments, the cached data will be used to generate " +
"heatmaps showing threat actors against time, scored by various criteria.")
parser.add_argument("--nocache", dest="use_cache", action="store_const", const=False, default=True,
help="Avoid reading or writing information from or to the cache and query the MISP server directly (which can be slow)")
parser.add_argument("--dumpcache", dest="dump_cache", action="store_const", const=True, default=False,
help="Load the contents of the cache.obj file and pretty-print it to a text file named cache.txt")
parser.add_argument("--numdays", metavar="DAYS", dest="num_days", type=int, default="0",
help="Set the number of days of history for heatmaps")
parser.add_argument("--binsize", metavar="DAYS", dest="bin_size", type=int, default="0",
help="Set the number of days for each bin for heatmaps")
parser.add_argument("--scorecards", dest="scorecards", action="store_const", const=True, default=False,
help="Show scoring for all threat actors")
parser.add_argument("--startdate", metavar="DATE", dest="start_date", type=validate_date, default=epoch,
help="Set the start date for threat actor scorecards, in the format YYYY-MM-DD")
parser.add_argument("--enddate", metavar="DATE", dest="end_date", type=validate_date, default=epoch,
help="Set the end date for threat actor scorecards, in the format YYYY-MM-DD")
parser.add_argument("--listactors", dest="list_actors", action="store_const", const=True, default=False,
help="Produce list of the known threat actors in the data")
parser.add_argument("--analyse", dest="analyse", action="store_const", const=True, default=False,
help="Produce an analysis of structure of the MISP data")
# Parse command-line arguments and then perform some extra validation
#
args = parser.parse_args()
if args.num_days != 0 and args.bin_size == 0:
print("When specifying the number of days, the bin size must be specified")
sys.exit(1)
if args.num_days == 0 and args.bin_size != 0:
print("When specifying the bin size, the number of days must be specified")
sys.exit(1)
if args.bin_size != 0 and args.num_days % args.bin_size != 0:
print("The number of days should be a multiple of the bin size to ensure that the")
print("left hand side of the graph is not misleading")
sys.exit(1)
# If requested, pretty print the cache contents into a file
#
if args.dump_cache:
caching.dump_cache()
sys.exit(0)
# Obtain the event data, either from the local cache or from the MISP server
#
misp_data = misp.get_misp_data(misp_server, args.use_cache)
total = len(misp_data["events"])
if total == 0:
sys.exit("No events returned")
if args.scorecards:
# Produce a score table against various criteria for each threat actor and for each ransomware
#
if not os.path.exists("scorecards-actors"):
os.makedirs("scorecards-actors")
print("Generating Threat Actor scorecards")
scorecards.generate_threat_actor_scorecards(misp_data, "scorecards-actors", args.start_date, args.end_date)
if not os.path.exists("scorecards-ransomware"):
os.makedirs("scorecards-ransomware")
print("Generating Ransomware scorecards")
scorecards.generate_ransomware_scorecards(misp_data, "scorecards-ransomware", args.start_date, args.end_date)
elif args.analyse:
# Perform some basic analysis on the MISP data, which can be useful
# for learning what is present in the data
analysis.analyse(misp_data)
elif args.list_actors:
# List the threat actors present in the data
#
threat_actors = utility.identify_threat_actors(misp_data, initial={})
for actor in threat_actors:
print(actor)
else:
# Generate the desired heat maps
#
if not os.path.exists("heatmaps"):
os.makedirs("heatmaps")
if args.num_days != 0 and args.bin_size != 0:
print("Generating custom heatmaps")
heatmaps.generate_heatmaps(misp_data, num_days = args.num_days, bin_size = args.bin_size, bin_name = "custom")
else:
print("Generating monthly heatmaps")
heatmaps.generate_heatmaps(misp_data, num_days = 15 * 30, bin_size = 30, bin_name = "monthly")
print("Generating weekly heatmaps")
heatmaps.generate_heatmaps(misp_data, num_days = 3 * 30, bin_size = 7, bin_name = "weekly")
| StarcoderdataPython |
1676134 | <gh_stars>1-10
from flask import Flask
from werkzeug.utils import import_string
from utils import mysql
from flask_cors import *
# import utils.mysql as mysql
blueprints = [
'src.veiws.classRoom:classRoom',
'src.veiws.user:user',
'src.veiws.class:my_class',
'src.veiws.sc:sc',
'src.veiws.course:course',
]
dbclient = mysql.MySqldb()
# __all__ = [dbclient]
def creatapp():
app = Flask(__name__)
CORS(app, supports_credentials=True)
app.config['DEBUG'] = True
for bp_name in blueprints:
bp = import_string(bp_name)
app.register_blueprint(bp)
return app
| StarcoderdataPython |
3318323 | <filename>predict.py
import tensorflow as tf
from model.create_model import get_model
from datetime import datetime
import pandas as pd
import numpy as np
import sys
import pickle
import math
from model.data_preprocessor import generate_lstm_data
from helpers.settings import *
from model.config import HISTORY_SIZE, ROW_LENGTH
new_model = tf.keras.models.load_model('model/saved_model/model-20200429-114857.h5')
dataset_file = 'data/test_data.csv'
stations_file = './data/stations.csv'
stations = pd.read_csv(stations_file, usecols=STATIONS_COLUMNS, sep=';')
x, y = generate_lstm_data(
dataset_file,
history_size=HISTORY_SIZE,
index_col='timestamp',
norm_cols=NORM_COLS,
scale_cols=SCALE_COLS,
adjust_cols=ADJUST_COLUMNS,
cat_cols=None,
extra_columns=EXTRA_COLS
)
val_data_single = tf.data.Dataset.from_tensor_slices((x, y))
val_data_single = val_data_single.batch(len(y))
new_prediction = None
for x, y in val_data_single.take(1):
new_predictions = new_model.predict(x)
ADJUST_COLUMNS = {
'lat': {
'amount': -51.0
},
'lng': {
'amount': -16.0
}
}
def parse_x(x):
x = x.numpy()
x[1] = x[1] - ADJUST_COLUMNS['lat']['amount']
x[2] = x[2] - ADJUST_COLUMNS['lng']['amount']
x[3] = x[3] * 1380
return [x[3], x[1], x[2]]
pred = []
for i, val in enumerate(new_predictions):
pred.append(parse_x(x) + y[i])
# Display progess bar
sys.stdout.write('\r')
sys.stdout.write("[%-20s] %d%% (%d/%d)" % (
'=' * int(20 * (i + 1) / len(y)), int(100 * (i + 1) / len(y)), (i + 1),
len(y)))
sys.stdout.flush()
exit_file = pd.DataFrame(pred, columns=['time', 'lat', 'lng', 'count'])
exit_file.to_csv('pred.csv') | StarcoderdataPython |
3273510 | <gh_stars>0
from django.urls import path
from wallet.views import WalletDetail
from wallet.views import WalletList
from wallet.views import WalletUpdate
urlpatterns = [
path("", WalletList.as_view(), name="wallet_list"),
path("<pk>", WalletDetail.as_view(), name="wallet_detail"),
path("<pk>/update", WalletUpdate.as_view(), name="wallet_update"),
]
| StarcoderdataPython |
3277435 | import logging
LOG = logging.getLogger(__name__)
class TargetBase(object):
TARGET_TYPE = "base"
def __init__(self, target):
self._target = target
def log(self, msg):
LOG.info(msg)
@property
def workspace(self):
raise NotImplementedError()
def prepare(self):
pass
def commit(self, summary):
pass
def cleanup(self):
pass
def __str__(self):
return "{0}: ({1})".format(self.__class__.__name__, self._target)
| StarcoderdataPython |
3373056 | <filename>Cura/Uranium/UM/OutputDevice/OutputDevicePlugin.py
# Copyright (c) 2019 <NAME>.
# Uranium is released under the terms of the LGPLv3 or higher.
from typing import Optional, Callable
from UM.OutputDevice.OutputDeviceManager import ManualDeviceAdditionAttempt
from UM.PluginObject import PluginObject
from UM.Application import Application
## Base class for output device plugins.
#
# This class provides the base for any output device plugin that should be
# registered with the OutputDeviceManager. Each OutputDevicePlugin should
# implement device detection and add/remove devices as needed.
#
# For example, the Removable Device plugin searches for removable devices
# that have been plugged in and creates new OutputDevice objects for each.
# Additionally, whenever a removable device has been removed, it will remove
# the OutputDevice object from the OutputDeviceManager.
#
# \sa OutputDeviceManager
class OutputDevicePlugin(PluginObject):
def __init__(self) -> None:
super().__init__()
self._output_device_manager = Application.getInstance().getOutputDeviceManager()
## Convenience method to get the Application's OutputDeviceManager.
def getOutputDeviceManager(self):
return self._output_device_manager
## Called by OutputDeviceManager to indicate the plugin should start its device detection.
def start(self) -> None:
raise NotImplementedError("Start should be implemented by subclasses")
## Called by OutputDeviceManager to indicate the plugin should stop its device detection.
def stop(self) -> None:
raise NotImplementedError("Stop should be implemented by subclasses")
## Used to check if this adress makes sense to this plugin w.r.t. adding(/removing) a manual device.
# /return 'No', 'possible', or 'priority' (in the last case this plugin takes precedence, use with care).
def canAddManualDevice(self, address: str = "") -> ManualDeviceAdditionAttempt:
return ManualDeviceAdditionAttempt.NO
## Add a manual device by the specified address (for example, an IP).
# The optional callback is a function with signature func(success: bool, address: str) -> None, where
# - success is a bool that indicates if the manual device's information was successfully retrieved.
# - address is the address of the manual device.
def addManualDevice(self, address: str, callback: Optional[Callable[[bool, str], None]] = None) -> None:
pass
## Remove a manual device by either the name and/or the specified address.
# Since this may be asynchronous, use the 'removeDeviceSignal' when the machine actually has been added.
def removeManualDevice(self, key: str, address: Optional[str] = None) -> None:
pass
## Starts to discovery network devices that can be handled by this plugin.
def startDiscovery(self) -> None:
pass
## Refresh the available/discovered printers for an output device that handles network printers.
def refreshConnections(self) -> None:
pass
| StarcoderdataPython |
1752737 | from __future__ import print_function
import json
import os
try:
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
Signal = pyqtSignal
except ImportError:
from PySide2.QtWidgets import *
from PySide2.QtGui import *
from PySide2.QtCore import *
import hou
DEFAULT_SETTINGS = {
'hammer.previous_files.first_start': True,
'hammer.previous_files.enable': True,
'hammer.previous_files.startup': True,
'hammer.previous_files.check_file_existence': True,
'hammer.previous_files.db_location': '$HOUDINI_USER_PREF_DIR',
'hammer.previous_files.silent.manual_update': True,
'hammer.previous_files.silent.disable_sims': False,
'hammer.open_location.enable': True,
'hammer.open_location.use_custom_explorer': False,
'hammer.open_location.custom_explorer_path': '',
'hammer.audio.play.enable': True,
'hammer.audio.play.use_external_player': True,
'hammer.audio.set_scene_audio.enable': True,
'hammer.set_interpolation.enable': True,
'hammer.select_parm_value.select.enable': True,
'hammer.select_parm_value.select_font.enable': True,
'hammer.shelf.copy_tool.enable': True,
'hammer.shelf.edit_shelf_tools.enable': True
}
class SettingsManager:
_instance = None
class State:
Direct = 1
Accumulation = 2
class SaveBehaviour:
Immediatly = 1
OnDemand = 2
def __init__(self, settings_file=None):
# File
if settings_file is None:
self.__settings_file = hou.homeHoudiniDirectory() + '/hammer_tools.settings'
elif os.path.isfile(settings_file):
self.__settings_file = settings_file
else:
raise FileNotFoundError
# Data
self.__data = {}
self.__accumulated_data = {}
self.__state = SettingsManager.State.Direct
self.__save_mode = SettingsManager.SaveBehaviour.Immediatly
# Fill Data
self.resetToFactoryDefaults()
self.load()
def beginEdit(self):
self.__state = SettingsManager.State.Accumulation
def endEdit(self, cancel=False):
if self.__state == SettingsManager.State.Accumulation:
return # todo?: raise exception
self.__state = SettingsManager.State.Direct
if cancel:
self.__accumulated_data.clear()
return
self.__data.update(self.__accumulated_data)
self.__accumulated_data.clear()
if self.__save_mode == SettingsManager.SaveBehaviour.Immediatly:
self.save()
def value(self, setting_key):
if setting_key not in self.__data:
raise ValueError('Invalid setting key')
return self.__data[setting_key]
def setValue(self, setting_key, value, force_direct=False):
if self.__state == SettingsManager.State.Direct or force_direct:
self.__data[setting_key] = value
if self.__save_mode == SettingsManager.SaveBehaviour.Immediatly:
self.save()
elif self.__state == SettingsManager.State.Accumulation:
self.__accumulated_data[setting_key] = value
def save(self, settings_file=None):
with open(self.__settings_file if settings_file is None else settings_file, 'w') as file:
json.dump(self.__data, file, indent=4)
def resetToFactoryDefaults(self):
self.__data = DEFAULT_SETTINGS
def load(self, settings_file=None, update=True):
try:
with open(self.__settings_file if settings_file is None else settings_file, 'r') as file:
try:
data = json.load(file)
if update:
self.__data.update(data)
else:
self.__data = data
except ValueError:
pass
except IOError:
pass
def isSynced(self):
raise NotImplementedError
def sync(self):
raise NotImplementedError
def reset(self):
raise NotImplementedError
@classmethod
def instance(cls):
if cls._instance is None:
cls._instance = cls()
return cls._instance
| StarcoderdataPython |
1609920 | import pytest
from tf_yarn._env import (
gen_pyenv_from_existing_archive,
CONDA_CMD, CONDA_ENV_NAME
)
test_data = [
("/path/to/myenv.pex",
"./myenv.pex",
"myenv.pex"),
("/path/to/myenv.zip",
f"{CONDA_CMD}",
CONDA_ENV_NAME)
]
@pytest.mark.parametrize(
"path_to_archive,expected_cmd, expected_dest_path",
test_data)
def test_gen_pyenvs_from_existing_env(path_to_archive, expected_cmd,
expected_dest_path):
result = gen_pyenv_from_existing_archive(path_to_archive)
assert result.path_to_archive == path_to_archive
assert result.dispatch_task_cmd == expected_cmd
assert result.dest_path == expected_dest_path
def test_gen_pyenvs_from_unknown_format():
with pytest.raises(ValueError):
gen_pyenv_from_existing_archive("/path/to/pack.tar.bz2")
| StarcoderdataPython |
134256 | <filename>fem/utilities/command_dispatcher/test.py
import inspect
def check_types(*args):
def check_args(func, *args2):
types = tuple(map(type, args2))
for i in range(len(types)):
if not isinstance(types[i], args[i]):
raise TypeError("Argument types for %s%s do not match! %s" % (func.__name__, str(args), str(types)))
def add_args_checking(func):
def _func(*args2):
check_args(func, *args2)
return func(*args2)
return _func
return add_args_checking
class Dummy(object):
@check_types(object, int, int, float)
def func1(self, a, b, c):
print(a, b, c)
print(type((int, int, float))) | StarcoderdataPython |
1786112 | <gh_stars>0
import sys
import io
from mlad.cli import context
from . import mock
def setup_function():
mock.setup()
def teardown_function():
mock.teardown()
def test_ls():
origin_stdout = sys.stdout
mock.add('test1')
mock.add('test2')
sys.stdout = buffer = io.StringIO()
context.ls()
sys.stdout = origin_stdout
assert buffer.getvalue() == ' NAME \n* test1\n test2\n'
def test_blank_ls():
origin_stdout = sys.stdout
origin_stderr = sys.stderr
sys.stdout = buffer = io.StringIO()
sys.stderr = buffer2 = io.StringIO()
context.ls()
assert buffer.getvalue() == ' NAME\n'
assert buffer2.getvalue() == 'There are no contexts.\n'
sys.stdout = origin_stdout
sys.stderr = origin_stderr
| StarcoderdataPython |
153573 | <filename>pebble/build/c4che/_cache.py
BINDIR = '/usr/local/bin'
BLOCK_MESSAGE_KEYS = []
BUILD_TYPE = 'app'
BUNDLE_NAME = 'pebble.pbw'
DEFINES = ['RELEASE']
LIBDIR = '/usr/local/lib'
LIB_DIR = 'node_modules'
LIB_JSON = []
MESSAGE_KEYS = {}
MESSAGE_KEYS_HEADER = '/mnt/files/scripts/pebble/pebble-navigation/pebble/build/include/message_keys.auto.h'
NODE_PATH = '/home/rhys/.pebble-sdk/SDKs/current/node_modules'
PEBBLE_SDK_COMMON = '/home/rhys/.pebble-sdk/SDKs/current/sdk-core/pebble/common'
PEBBLE_SDK_ROOT = '/home/rhys/.pebble-sdk/SDKs/current/sdk-core/pebble'
PREFIX = '/usr/local'
PROJECT_INFO = {'appKeys': {}, u'sdkVersion': u'3', u'displayName': u'geocaching', u'uuid': u'6191ad65-6cb1-404f-bccc-2446654c20ab', u'messageKeys': {}, u'companyName': u'WebMajstr', u'enableMultiJS': True, u'targetPlatforms': [u'aplite', u'basalt', u'chalk'], 'versionLabel': u'2.0', 'longName': u'geocaching', 'shortName': u'geocaching', u'watchapp': {u'watchface': False}, u'resources': {u'media': [{u'menuIcon': True, u'type': u'png', u'name': u'IMAGE_MENU_ICON', u'file': u'images/menu_icon_geocache.png'}, {u'type': u'png', u'name': u'NO_BT', u'file': u'images/no_bt.png'}, {u'type': u'font', u'name': u'FONT_ROBOTO_CONDENSED_30', u'file': u'fonts/Roboto-Condensed.ttf'}, {u'type': u'font', u'name': u'FONT_ROBOTO_BOLD_SUBSET_22', u'file': u'fonts/Roboto-Bold.ttf'}, {u'type': u'font', u'name': u'FONT_ROBOTO_CONDENSED_36', u'file': u'fonts/Roboto-Condensed.ttf'}]}, 'name': u'geocaching'}
REQUESTED_PLATFORMS = [u'aplite', u'basalt', u'chalk']
RESOURCES_JSON = [{u'menuIcon': True, u'type': u'png', u'name': u'IMAGE_MENU_ICON', u'file': u'images/menu_icon_geocache.png'}, {u'type': u'png', u'name': u'NO_BT', u'file': u'images/no_bt.png'}, {u'type': u'font', u'name': u'FONT_ROBOTO_CONDENSED_30', u'file': u'fonts/Roboto-Condensed.ttf'}, {u'type': u'font', u'name': u'FONT_ROBOTO_BOLD_SUBSET_22', u'file': u'fonts/Roboto-Bold.ttf'}, {u'type': u'font', u'name': u'FONT_ROBOTO_CONDENSED_36', u'file': u'fonts/Roboto-Condensed.ttf'}]
SANDBOX = False
SUPPORTED_PLATFORMS = ['chalk', 'basalt', 'diorite', 'aplite', 'emery']
TARGET_PLATFORMS = ['chalk', 'basalt', 'aplite']
TIMESTAMP = 1601736560
USE_GROUPS = True
VERBOSE = 0
WEBPACK = '/home/rhys/.pebble-sdk/SDKs/current/node_modules/.bin/webpack'
| StarcoderdataPython |
1686628 | import json
import operator
import os
import pickle
import subprocess
from django.contrib.auth import logout, authenticate, login
from django.shortcuts import render, render_to_response
from django.utils.datastructures import MultiValueDictKeyError
from django.contrib.auth.models import User
from carnivora.instabot.config import ConfigLoader, Config
from carnivora.instabot.driver import Driver
from carnivora.instabot.log import Log
from carnivora.instabot.statistics import Statistics
from carnivora.instabot.statistics import frequencies
from carnivora.instabot.statistics import timeranges
from tf_imagenet.imagenet import classify_image
from tf_open_nsfw.classify_nsfw import classify_nsfw
from django.contrib.auth.decorators import user_passes_test
def index(request):
return render_to_response('index.html')
def main_body(request):
if request.user.is_authenticated:
return render(request, 'main-body.html')
else:
return render(request, 'login.html')
def login_user(request):
logout(request)
username = request.GET['username']
password = request.GET['password']
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user=user)
return render(request, 'main-body.html')
return render(request, 'login.html', {'message': 'Login failed. Please try again.'})
def load_registration(request):
username = request.GET['username']
password = request.GET['password']
return render(request, 'register.html', {'username': username, 'password': password})
def logout_user(request):
logout(request)
return render(request, 'logout.html')
def register_user(request):
logout(request)
username = request.GET['username']
email = request.GET['email']
password = request.GET['password']
User.objects.create_user(username=username, email=email, password=password)
user = authenticate(username=username, password=password)
if user is not None:
if user.is_active:
login(request, user=user)
return render(request, 'main-body.html')
return render(request, 'register.html', {'message': 'Registration failed. Please try again.'})
def load_button_chain(request):
if not request.user.is_authenticated:
return
username = request.user.username
log_path = Config.bot_path + "log/" + username
if not os.path.exists(log_path):
os.makedirs(log_path)
running_path = log_path + "/running.pickle"
try:
with open(running_path, "rb") as f:
active = bool(pickle.load(f))
except FileNotFoundError:
active = False
return render(request, 'buttonchain.html', {'active': active})
def run_instabot(request):
if not request.user.is_authenticated:
return
username = request.GET['username']
password = request.GET['password']
log_path = Config.bot_path + "log/" + username
if not os.path.exists(log_path):
os.makedirs(log_path)
running_path = log_path + "/running.pickle"
with open(running_path, "wb") as f:
pickle.dump(True, f)
screenshot_folder = "static/img/" + request.user.username
if not os.path.exists(screenshot_folder):
os.makedirs(screenshot_folder)
screenshot_path = screenshot_folder + "/screenshot.png"
driver = Driver(username=username, password=password, screenshot_path=screenshot_path)
driver.start()
return render(request, 'buttonchain.html', {'active': True})
def stop_instabot(request):
if not request.user.is_authenticated:
return
username = request.user.username
log_path = Config.bot_path + "log/" + username
if not os.path.exists(log_path):
os.makedirs(log_path)
running_path = log_path + "/running.pickle"
with open(running_path, "wb") as f:
pickle.dump(False, f)
return render(request, 'buttonchain.html', {'active': False})
@user_passes_test(lambda u: u.is_superuser)
def update_server(request):
commands = [["git", "status"], ["git", "pull"]]
output = []
for command in commands:
process = subprocess.Popen(command, stdout=subprocess.PIPE)
output.append(
(" ".join(command), process.stdout, str(process.wait(timeout=30)))
)
return render(request, 'server_update.html', {'output': output})
def table_monitor_update(request):
if not request.user.is_authenticated:
return
try:
n = int(request.GET['n'])
except (MultiValueDictKeyError, ValueError):
n = 20
try:
search = request.GET['search']
except (MultiValueDictKeyError, ValueError):
search = ''
username = request.user.username
log_path = Config.bot_path + "log/" + username
path = log_path + "/log.pickle"
lines = Log.get(log_path=path, page_size=n, search=search)
return render(request, 'table_monitor_update.html', {'lines': lines})
def load_screenshot(request):
if not request.user.is_authenticated:
return
path = "static/img/" + request.user.username + "/screenshot.png"
time = os.path.getmtime(path)
src = path + "?mtime=" + str(time)
return render(request, 'screenshot.html', {'src': src})
@user_passes_test(lambda u: u.is_superuser)
def submit_to_config(request):
try:
config_key = request.GET['config_key']
config_param = request.GET['config_param']
ConfigLoader.store(config_key, config_param)
return render(request, 'settings_update.html', {'config_key': config_key, 'config_param': config_param})
except MultiValueDictKeyError as e:
print(e)
return render(request, 'settings_update.html')
def monitor(request):
if not request.user.is_authenticated:
return
try:
n = int(request.GET['n'])
except (MultiValueDictKeyError, ValueError):
n = 20
try:
search = request.GET['search']
except (MultiValueDictKeyError, ValueError):
search = ''
if not request.user.is_authenticated:
return
path = "static/img/" + request.user.username + "/screenshot.png"
try:
time = os.path.getmtime(path)
except FileNotFoundError:
time = 0
src = path + "?mtime=" + str(time)
# pages = range(Log.number_of_pages(page_size=page_size))
username = request.user.username
log_path = Config.bot_path + "log/" + username
path = log_path + "/log.pickle"
lines = Log.get(log_path=path, page_size=n, search=search)
return render(request, 'monitor.html', {'lines': lines, 'src': src})
def statistics(request):
if not request.user.is_authenticated:
return
username = request.user.username
try:
freq = request.GET['freq']
except (MultiValueDictKeyError, ValueError):
freq = "Calendar day frequency"
try:
timerange = request.GET['timerange']
except (MultiValueDictKeyError, ValueError):
timerange = None
hashtag_names, hashtag_scores = Statistics.get_hashtags(username=username, n=40, truncated_name_length=20)
amount_of_users, amount_of_interactions, amount_of_likes, amount_of_follows, amount_of_comments \
= Statistics.get_amount_of_actions(username=username)
amount_of_follows_all_time = Statistics.get_amount_of_followed_accounts(username=username)
index, likes_data, comments_data, follows_data = Statistics.get_timelines(
username=username,
freq=freq,
timerange=timerange
)
current_likes, remaining_likes, \
current_follows, remaining_follows, \
current_comments, remaining_comments, \
current_unfollows, remaining_unfollows = Statistics.get_dispatch_statistics(username=username)
tr = timeranges.keys()
render_data = {
'hashtag_names': json.dumps(hashtag_names),
'hashtag_scores': hashtag_scores,
'amount_of_users': amount_of_users,
'amount_of_likes': amount_of_likes,
'amount_of_comments': amount_of_comments,
'amount_of_follows': amount_of_follows,
'amount_of_interactions': amount_of_interactions,
'amount_of_follows_all_time': amount_of_follows_all_time,
'index': index,
'likes_data': likes_data,
'comments_data': comments_data,
'follows_data': follows_data,
'frequencies': frequencies,
'freq': freq,
'timerange': timerange,
'timeranges': tr,
'current_likes': current_likes,
'current_follows': current_follows,
'current_comments': current_comments,
'current_unfollows': current_unfollows,
'remaining_likes': remaining_likes,
'remaining_follows': remaining_follows,
'remaining_comments': remaining_comments,
'remaining_unfollows': remaining_unfollows,
}
return render(request, 'statistics.html', render_data)
@user_passes_test(lambda u: u.is_superuser)
def submit_nsfw(request):
if not request.user.is_authenticated:
return render(request, 'nsfw_progress_bar.html', {'nsfw': 0})
try:
link = request.GET['nsfw_link']
sfw, nsfw = classify_nsfw(link)
return render(request, 'nsfw_progress_bar.html', {'nsfw': int(nsfw * 100)})
except MultiValueDictKeyError as e:
print(e)
return render(request, 'nsfw_progress_bar.html', {'nsfw': 0})
@user_passes_test(lambda u: u.is_superuser)
def submit_to_classification(request):
if not request.user.is_authenticated:
return render(request, 'image_classification.html', {'top_k': []})
username = request.user.username
try:
link = request.GET['link']
top_k = classify_image(image_url=link, num_predictions=5, username=username)
return render(request, 'image_classification.html', {'top_k': top_k})
except MultiValueDictKeyError as e:
print(e)
return render(request, 'image_classification.html', {'top_k': []})
@user_passes_test(lambda u: u.is_superuser)
def server(request):
return render(request, 'server.html')
@user_passes_test(lambda u: u.is_superuser)
def nsfw_check(request):
return render(request, 'nsfw_check.html')
@user_passes_test(lambda u: u.is_superuser)
def perform_reboot(request):
os.system('touch '+__file__)
return None
@user_passes_test(lambda u: u.is_superuser)
def settings(request):
config = ConfigLoader.load()
sorted_config = sorted(config.items(), key=operator.itemgetter(0))
filtered_config = [(k, v) for k, v in sorted_config if
k != "nsfw_hashtags" and
k != "comments" and
k != "topics" and
k != "smileys"]
return render(request, 'settings.html', {
'filtered_config': filtered_config,
'nsfw_hashtags': config['nsfw_hashtags'],
'comments': config['comments'],
'topics': config['topics'],
'smileys': config['smileys'],
})
| StarcoderdataPython |
3241433 | import numpy as numpy
a = numpy.array([1,2,3,4])
b = numpy.array([10,20,30,40])
c = a * b
print (c) | StarcoderdataPython |
147807 | <filename>Scripts/random_sampler.py
import os
import itertools
import numpy as np
import random
import math
import shutil
random.seed(100)
np.random.seed(100)
def populate(images, split_index=1):
assert type(images) is list and len(images) > 0, "Check input..."
cache_dict = dict()
count_dict = dict()
for im in images:
split = im.split("_")[split_index]
if split not in cache_dict.keys():
cache_dict[split] = [i for i in images if i.startswith("UID_{}_".format(split))]
count_dict[split] = len(cache_dict[split])
return cache_dict, count_dict
def split(hem, all1, n_hem, n_all, ratio=0.3, tolerence=0.02):
'''
:param hem:
:param all1:
:param shuffle:
:return:
'''
while True:
random.shuffle(hem)
random_hem = hem
random.shuffle(all1)
random_all = all1
n_train_hem = int(len(hem) * 0.7)
n_val_hem = ((n_hem - n_train_hem) // 2)
n_test_hem = n_hem - n_val_hem - n_train_hem
train_hem = random_hem[:n_train_hem]
val_hem = random_hem[n_train_hem:n_train_hem+n_val_hem]
test_hem = random_hem[-n_test_hem:]
# train_hem = random_hem[-n_train_hem:]
# val_hem = random_hem[-(n_train_hem+n_val_hem):-n_train_hem]
# test_hem = random_hem[:n_test_hem]
n_train_all = int(len(all1) * 0.7)
n_val_all = ((n_all - n_train_all) // 2)
n_test_all = n_all - n_val_all - n_train_all
train_all = random_all[:n_train_all]
val_all = random_all[n_train_all:n_train_all+n_val_all]
test_all = random_all[-n_test_all:]
# train_all = random_all[-n_train_all:]
# val_all = random_all[-(n_train_all + n_val_all):-n_train_all]
# test_all = random_all[:n_test_all]
n_train_hem = get_sum(train_hem)
n_val_hem = get_sum(val_hem)
n_test_hem = get_sum(test_hem)
n_train_all = get_sum(train_all)
n_val_all = get_sum(val_all)
n_test_all = get_sum(test_all)
n_train = n_train_all + n_train_hem
n_val = n_val_hem + n_val_all
n_test = n_test_hem + n_test_all
train_hem_ratio = n_train_hem / n_train
train_all_ratio = n_train_all / n_train
val_hem_ratio = n_val_hem / n_val
val_all_ratio = n_val_all / n_val
test_hem_ratio = n_test_hem / n_test
test_all_ratio = n_test_all / n_test
print('Train - hem: {} | all: {}'.format(train_hem_ratio, train_all_ratio))
print('Validation - hem: {} | all: {}'.format(val_hem_ratio, val_all_ratio))
print('Test - hem: {} | all: {}'.format(test_hem_ratio, test_all_ratio))
print('-------------------------------')
if (
(math.fabs(ratio - train_hem_ratio) <= tolerence) and
(math.fabs(ratio - val_hem_ratio) <= tolerence) and
(math.fabs(ratio - test_hem_ratio) <= tolerence)
):
print('Done.')
break
return (train_hem, train_all, val_hem, val_all, test_hem, test_all)
def get_sum(samples):
return sum([s[1] for s in samples])
def get_dist(hem_list, all_list):
hem_samples = get_sum(hem_list)
all_samples = get_sum(all_list)
total_samples = hem_samples + all_samples
return 'HEM: {} | ALL: {}'.format(hem_samples/total_samples, all_samples/total_samples)
if __name__ == "__main__":
# Change the working dir
os.chdir(r'D:\Personal\ISBI\train - Copy')
folds = ['fold_0', 'fold_1', 'fold_2']
ALL_IMAGES = [os.listdir(os.path.join(os.getcwd(), fold, 'all')) for fold in folds]
HEM_IMAGES = [os.listdir(os.path.join(os.getcwd(), fold, 'hem')) for fold in folds]
ALL = []
HEM = []
for _all in ALL_IMAGES:
ALL += _all
for hem in HEM_IMAGES:
HEM += hem
num_hem = len(ALL)
num_all = len(HEM)
hem_dist = num_hem/(num_hem + num_all)
all_dist = num_all/(num_hem + num_all)
print('HEM:', hem_dist, ',', 'ALL:', all_dist)
hem_dict, count_hem = populate(HEM)
all_dict, count_all = populate(ALL)
n_hem_g = len(count_hem)
n_all_g = len(count_all)
# Sort the distribution in the ascending order of the number of images associated with each ID.
sorted_hem = sorted(count_hem.items(), key=lambda x: x[1])
sorted_all = sorted(count_all.items(), key=lambda x: x[1])
# sorted_hem = [(k, v) for k, v in count_hem.items()]
# sorted_all = [(k, v) for k, v in count_all.items()]
(train_hem, train_all, val_hem,
val_all, test_hem, test_all) = split(sorted_hem, sorted_all, n_hem_g, n_all_g, tolerence=0.02)
print(train_hem)
print(train_all)
print(val_hem)
print(val_all)
print(test_hem)
print(test_all)
print(get_sum(train_hem))
print(get_sum(train_all))
print(get_sum(val_hem))
print(get_sum(val_all))
print(get_sum(test_hem))
print(get_sum(test_all))
# sets = ['train', 'val', 'test']
# classes = ['all', 'hem']
#
# uids = {
# 'train': {
# 'all': train_all,
# 'hem': train_hem
# },
#
# 'val': {
# 'all': val_all,
# 'hem': val_hem
# },
#
# 'test': {
# 'all': test_all,
# 'hem': test_hem
# }
# }
#
# if not os.path.exists(r'..\final_data'):
# os.mkdir(r'..\final_data')
# for _set in sets:
# print('Copying files: {}'.format(_set))
# root = os.path.join(r'..\final_data', _set)
# if not os.path.exists(root):
# os.mkdir(root)
# for _class in classes:
# print('\tCopying files: {}'.format(_class))
# node = os.path.join(root, _class)
# if not os.path.exists(node):
# os.mkdir(node)
# _uids = [u[0] for u in uids[_set][_class]]
# filenames = []
# for fold in folds:
# filepath = os.path.join(os.getcwd(), fold, _class)
# files = os.listdir(filepath)
# for _file in files:
# if any(['UID_{}_'.format(_uid) in _file for _uid in _uids]):
# src = os.path.join(filepath, _file)
# dst = os.path.join(node, _file)
# shutil.copy(src, dst)
# print('\tDone.')
| StarcoderdataPython |
3382670 | <gh_stars>1-10
import numpy as np
from sklearn.preprocessing import MinMaxScaler
from collections import Counter
from matplotlib import pylab as plt
from tqdm import tqdm
## El conjunto de datos se descargó de:
## http://www.stat.cmu.edu/~larry/all-of-statistics/=Rprograms/a1882_25.dat
D = [list(map(float, x.strip().split())) for x in open("a1882_25.dat").readlines()]
D = np.array(D)
D = D[:, 2]
D = D[D <= 0.2]
# D = MinMaxScaler().fit_transform(np.atleast_2d(D).T)[:, 0]
def riesgo(D, m=10):
"""Riesgo de validación cruzada de histograma"""
N = D.shape[0]
limits = np.linspace(D.min(), D.max(), m + 1)
h = limits[1] - limits[0]
_ = np.searchsorted(limits, D, side='right')
_[_ == 0] = 1
_[_ == m + 1] = m
p_j = Counter(_)
cuadrado = sum([(x / N)**2 for x in p_j.values()])
return (2 / ((N - 1) * h)) - ((N + 1) * cuadrado / ((N - 1) * h))
m = np.arange(2, 500)
r = [riesgo(D, x) for x in m]
print(np.argmin(r))
plt.plot(m, r)
plt.xlabel("Número de bins (m)")
plt.ylabel("Riesgo")
plt.grid()
plt.savefig("hist-riesgo.png")
# from sklearn import datasets
# X, y = datasets.load_boston(return_X_y=True)
#
# m = np.arange(2, 300)
# r = [riesgo(y, x) for x in m]
# print(np.argmin(r))
# plt.plot(m, r)
# plt.grid()
# plt.xlabel("Número de bins")
# plt.ylabel("Riesgo")
# plt.savefig("riesgo-boston.png") | StarcoderdataPython |
3245355 | <filename>Codewars/8kyu/calculate-bmi/Python/test.py
# Python - 3.4.3
Test.describe('Basic tests')
Test.assert_equals(bmi(50, 1.80), 'Underweight')
Test.assert_equals(bmi(80, 1.80), 'Normal')
Test.assert_equals(bmi(90, 1.80), 'Overweight')
Test.assert_equals(bmi(110, 1.80), 'Obese')
Test.assert_equals(bmi(50, 1.50), 'Normal')
| StarcoderdataPython |
1775249 | <reponame>andyasne/commcare-hq
from datetime import datetime
from django.conf import settings
from django.contrib import messages
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib.auth.views import redirect_to_login
from django.core.exceptions import ValidationError
from django.http import HttpResponseRedirect, JsonResponse
from django.shortcuts import render
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import ugettext as _
from django.utils.translation import ugettext_lazy
from django.views.decorators.debug import sensitive_post_parameters
from django.views.decorators.http import require_POST
from corehq.apps.registration.models import AsyncSignupRequest
from corehq.apps.sso.models import IdentityProvider
from dimagi.utils.couch import CriticalSection
from corehq.apps.accounting.decorators import always_allow_project_access
from corehq.apps.analytics.tasks import (
HUBSPOT_EXISTING_USER_INVITE_FORM,
HUBSPOT_NEW_USER_INVITE_FORM,
send_hubspot_form,
track_workflow,
)
from corehq.apps.domain.extension_points import has_custom_clean_password
from corehq.apps.domain.models import Domain
from corehq.apps.hqwebapp.views import BasePageView, logout
from corehq.apps.locations.permissions import location_safe
from corehq.apps.registration.forms import WebUserInvitationForm
from corehq.apps.registration.utils import activate_new_user_via_reg_form
from corehq.apps.users.decorators import require_can_edit_web_users
from corehq.apps.users.forms import DomainRequestForm
from corehq.apps.users.models import CouchUser, DomainRequest, Invitation
from corehq.const import USER_CHANGE_VIA_INVITATION
class UserInvitationView(object):
# todo cleanup this view so it properly inherits from BaseSectionPageView
template = "users/accept_invite.html"
def __call__(self, request, uuid, **kwargs):
# add the correct parameters to this instance
self.request = request
if 'domain' in kwargs:
self.domain = kwargs['domain']
if request.GET.get('switch') == 'true':
logout(request)
return redirect_to_login(request.path)
if request.GET.get('create') == 'true':
logout(request)
return HttpResponseRedirect(request.path)
try:
invitation = Invitation.objects.get(uuid=uuid)
except (Invitation.DoesNotExist, ValidationError):
messages.error(request, _("Sorry, it looks like your invitation has expired. "
"Please check the invitation link you received and try again, or "
"request a project administrator to send you the invitation again."))
return HttpResponseRedirect(reverse("login"))
if invitation.is_accepted:
messages.error(request, _("Sorry, that invitation has already been used up. "
"If you feel this is a mistake please ask the inviter for "
"another invitation."))
return HttpResponseRedirect(reverse("login"))
self.validate_invitation(invitation)
if invitation.is_expired:
return HttpResponseRedirect(reverse("no_permissions"))
# Add zero-width space to username for better line breaking
username = self.request.user.username.replace("@", "​@")
context = {
'formatted_username': username,
'domain': self.domain,
'invite_to': self.domain,
'invite_type': _('Project'),
'hide_password_feedback': has_custom_clean_password(),
}
if request.user.is_authenticated:
context['current_page'] = {'page_name': _('Project Invitation')}
else:
context['current_page'] = {'page_name': _('Project Invitation, Account Required')}
if request.user.is_authenticated:
is_invited_user = request.couch_user.username.lower() == invitation.email.lower()
if self.is_invited(invitation, request.couch_user) and not request.couch_user.is_superuser:
if is_invited_user:
# if this invite was actually for this user, just mark it accepted
messages.info(request, _("You are already a member of {entity}.").format(
entity=self.inviting_entity))
invitation.is_accepted = True
invitation.save()
else:
messages.error(request, _("It looks like you are trying to accept an invitation for "
"{invited} but you are already a member of {entity} with the "
"account {current}. Please sign out to accept this invitation "
"as another user.").format(
entity=self.inviting_entity,
invited=invitation.email,
current=request.couch_user.username))
return HttpResponseRedirect(self.redirect_to_on_success(invitation.email, self.domain))
if not is_invited_user:
messages.error(request, _("The invited user {invited} and your user {current} "
"do not match!").format(invited=invitation.email, current=request.couch_user.username))
if request.method == "POST":
couch_user = CouchUser.from_django_user(request.user, strict=True)
invitation.accept_invitation_and_join_domain(couch_user)
track_workflow(request.couch_user.get_email(),
"Current user accepted a project invitation",
{"Current user accepted a project invitation": "yes"})
send_hubspot_form(HUBSPOT_EXISTING_USER_INVITE_FORM, request)
return HttpResponseRedirect(self.redirect_to_on_success(invitation.email, self.domain))
else:
mobile_user = CouchUser.from_django_user(request.user).is_commcare_user()
context.update({
'mobile_user': mobile_user,
"invited_user": invitation.email if request.couch_user.username != invitation.email else "",
})
return render(request, self.template, context)
else:
idp = None
if settings.ENFORCE_SSO_LOGIN:
idp = IdentityProvider.get_active_identity_provider_by_username(invitation.email)
if request.method == "POST":
form = WebUserInvitationForm(request.POST, is_sso=idp is not None)
if form.is_valid():
# create the new user
invited_by_user = CouchUser.get_by_user_id(invitation.invited_by)
if idp:
signup_request = AsyncSignupRequest.create_from_invitation(invitation)
return HttpResponseRedirect(idp.get_login_url(signup_request.username))
user = activate_new_user_via_reg_form(
form,
created_by=invited_by_user,
created_via=USER_CHANGE_VIA_INVITATION,
domain=invitation.domain
)
user.save()
messages.success(request, _("User account for %s created!") % form.cleaned_data["email"])
invitation.accept_invitation_and_join_domain(user)
messages.success(
self.request,
_('You have been added to the "{}" project space.').format(self.domain)
)
authenticated = authenticate(username=form.cleaned_data["email"],
password=<PASSWORD>.cleaned_data["password"], request=request)
if authenticated is not None and authenticated.is_active:
login(request, authenticated)
track_workflow(request.POST['email'],
"New User Accepted a project invitation",
{"New User Accepted a project invitation": "yes"})
send_hubspot_form(HUBSPOT_NEW_USER_INVITE_FORM, request, user)
return HttpResponseRedirect(self.redirect_to_on_success(invitation.email, invitation.domain))
else:
if (CouchUser.get_by_username(invitation.email)
or User.objects.filter(username__iexact=invitation.email).count() > 0):
login_url = reverse("login")
accept_invitation_url = reverse(
'domain_accept_invitation',
args=[invitation.domain, invitation.uuid]
)
return HttpResponseRedirect(
f"{login_url}"
f"?next={accept_invitation_url}"
f"&username={invitation.email}"
)
form = WebUserInvitationForm(
initial={
'email': invitation.email,
},
is_sso=idp is not None,
)
context.update({
'is_sso': idp is not None,
'idp_name': idp.name if idp else None,
'invited_user': invitation.email,
})
context.update({"form": form})
return render(request, self.template, context)
def validate_invitation(self, invitation):
assert invitation.domain == self.domain
def is_invited(self, invitation, couch_user):
return couch_user.is_member_of(invitation.domain)
@property
def inviting_entity(self):
return self.domain
def redirect_to_on_success(self, email, domain):
if Invitation.by_email(email).count() > 0 and not self.request.GET.get('no_redirect'):
return reverse("domain_select_redirect")
else:
return reverse("domain_homepage", args=[domain])
@always_allow_project_access
@location_safe
@sensitive_post_parameters('password')
def accept_invitation(request, domain, uuid):
from corehq.apps.users.views.web import UserInvitationView
return UserInvitationView()(request, uuid, domain=domain)
@always_allow_project_access
@require_POST
@require_can_edit_web_users
def reinvite_web_user(request, domain):
uuid = request.POST['uuid']
try:
invitation = Invitation.objects.get(uuid=uuid)
except Invitation.DoesNotExist:
return JsonResponse({'response': _("Error while attempting resend"), 'status': 'error'})
invitation.invited_on = datetime.utcnow()
invitation.save()
invitation.send_activation_email()
return JsonResponse({'response': _("Invitation resent"), 'status': 'ok'})
@always_allow_project_access
@require_POST
@require_can_edit_web_users
def delete_invitation(request, domain):
uuid = request.POST['uuid']
invitation = Invitation.objects.get(uuid=uuid)
invitation.delete()
return JsonResponse({'status': 'ok'})
@method_decorator(always_allow_project_access, name='dispatch')
class DomainRequestView(BasePageView):
urlname = "domain_request"
page_title = ugettext_lazy("Request Access")
template_name = "users/domain_request.html"
request_form = None
@property
def page_url(self):
return reverse(self.urlname, args=[self.request.domain])
@property
def page_context(self):
domain_obj = Domain.get_by_name(self.request.domain)
if self.request_form is None:
initial = {'domain': domain_obj.name}
if self.request.user.is_authenticated:
initial.update({
'email': self.request.user.get_username(),
'full_name': self.request.user.get_full_name(),
})
self.request_form = DomainRequestForm(initial=initial)
return {
'domain': domain_obj.name,
'hr_name': domain_obj.display_name(),
'request_form': self.request_form,
}
def post(self, request, *args, **kwargs):
self.request_form = DomainRequestForm(request.POST)
if self.request_form.is_valid():
data = self.request_form.cleaned_data
with CriticalSection(["domain_request_%s" % data['domain']]):
if DomainRequest.by_email(data['domain'], data['email']) is not None:
messages.error(request, _("A request is pending for this email. "
"You will receive an email when the request is approved."))
else:
domain_request = DomainRequest(**data)
domain_request.send_request_email()
domain_request.save()
domain_obj = Domain.get_by_name(domain_request.domain)
return render(request, "users/confirmation_sent.html", {
'hr_name': domain_obj.display_name() if domain_obj else domain_request.domain,
})
return self.get(request, *args, **kwargs)
| StarcoderdataPython |
3243302 | import pglive.examples_pyqt6 as examples
from threading import Thread
from pglive.sources.data_connector import DataConnector
from pglive.sources.live_plot import LiveHBarPlot
from pglive.sources.live_plot_widget import LivePlotWidget
"""
In this example Horizontal Bar plot is displayed.
"""
win = LivePlotWidget(title="Horizontal Bar Plot @ 100Hz")
plot = LiveHBarPlot(bar_height=1, brush="green", pen="green")
win.addItem(plot)
data_connector = DataConnector(plot, max_points=600)
win.show()
Thread(target=examples.sin_wave_generator, args=(data_connector,)).start()
examples.app.exec()
examples.stop()
| StarcoderdataPython |
1677850 | <gh_stars>1-10
from marl_env.envs.marl_farm import FarmMARL | StarcoderdataPython |
4838352 | """
Django settings for example project.
Generated by Cookiecutter Django Package
For more information on this file, see
https://docs.djangoproject.com/en/1.9/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.9/ref/settings/
"""
import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.9/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = "--------------------------------------------------"
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'parler',
'ordered_model',
# 'ckeditor', # uncomment for ckeditor support
'tinymce', # uncomment for tinymce support
'minicms',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'example.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [os.path.join(BASE_DIR, 'templates'), ],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'example.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.9/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': os.path.join(BASE_DIR, 'db.sqlite3'),
}
}
# Password validation
# https://docs.djangoproject.com/en/1.9/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/1.9/topics/i18n/
LANGUAGE_CODE = 'en'
_ = lambda s: s
LANGUAGES = (
('en', _(u'English')),
('pl', _(u'Polish')),
)
CMS_LANGUAGES = {
None: [
{
'code': 'en',
'name': _('English'),
'public': True,
'hide_untranslated': True,
},
{
'code': 'pl',
'name': _('polski'),
'fallbacks': ['en', ],
'public': True,
'redirect_on_fallback': False,
},
],
'default': {
'fallbacks': ['en'],
'redirect_on_fallback': True,
'public': True,
'hide_untranslated': False,
}
}
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.9/howto/static-files/
STATIC_URL = '/static/'
CMS_TEMPLATES = (('minicms/page_jumbotron.html', 'Jumbotron with 3 secondary columns',),
('minicms/page_sections.html', 'Three fluid sections',),)
CMS_WYSIWYG_EDITOR = 'tinymce'
TINYMCE_DEFAULT_CONFIG = {
'theme': "advanced", # default value
'relative_urls': False, # default value
'width': '100%',
'height': 500,
'extended_valid_elements': 'main,section[data-section-id]',
'content_css': STATIC_URL + 'minicms/css/editor.css'
}
| StarcoderdataPython |
1617052 | <reponame>TheTacoScott/GoAtThrottleUp<filename>ServerRelay/gatu/globals.py
import threading
class camera_data(object):
def __init__(self,id):
self.id = id
self.img_data = ""
self.img_lock = threading.Lock()
self.img_updated = -1.0
def setdata(self,data):
with self.img_lock:
self.img_data = data
def getdata(self):
return_data = ""
with self.img_lock:
return_data = self.img_data
return return_data
camera01 = camera_data(1)
camera02 = camera_data(2)
camera03 = camera_data(3)
camera04 = camera_data(4)
camera05 = camera_data(5)
camera06 = camera_data(6)
camera07 = camera_data(7)
camera08 = camera_data(8)
camera09 = camera_data(9)
camera10 = camera_data(10)
camera11 = camera_data(11)
camera12 = camera_data(12)
low_data_lock = threading.Lock()
low_data = {}
low_data_updated = -1
med_data_lock = threading.Lock()
med_data = {}
med_data_updated = -1
high_data_lock = threading.Lock()
high_data = {}
high_data_updated = -1
stringTypes = ["tar.name","body.name","v.body","v.encounter.body","o.n.body","o.body","o.n.type","o.type"]
boolTypes = ["p.paused","v.rcsValue","v.sasValue","v.lightValue","v.brakeValue","v.gearValue"] | StarcoderdataPython |
3228945 | import base64
import os
import math
import time
import requests
import json
from config import config
from api_client.url_helpers.internal_app_url import get_chunk_upload_url
from Logs.log_configuration import configure_logger
from models.api_header_model import RequestHeader
log = configure_logger('default')
def chunk_upload(file_source_path):
"""
Uploads the file in chunks to Airwatch Server
:param file_source_path: File to be uploaded
:return: True/False indicating Success/Failure and Transaction ID, if successful
"""
file_size = os.path.getsize(file_source_path)
api_url = get_chunk_upload_url()
headers = RequestHeader().header
with open(file_source_path, 'rb') as file:
start = 0
chunk_count = math.ceil(file_size / config.MAX_UPLOAD_BYTE_LENGTH)
retry_timeout = 0.300 # milliseconds
sent_chunk_count = 0
transaction_id = ''
log.debug('File Total chunk count:{count} with transaction {id}'.format(count=chunk_count, id=transaction_id))
while True:
current_chunk_count = sent_chunk_count + 1
log.debug('Uploading chunk number: {}'.format(current_chunk_count))
end = min(file_size, start + config.MAX_UPLOAD_BYTE_LENGTH)
file.seek(start)
chunk_data = file.read(end)
base64_file = str(base64.b64encode(chunk_data))[2:-1]
internal_app_chunk_value = {
'TransactionId': str(transaction_id),
'ChunkData': base64_file,
'ChunkSize': end - start,
'ChunkSequenceNumber': current_chunk_count,
'TotalApplicationSize': file_size
}
payload = json.dumps(internal_app_chunk_value)
start = start + end
try:
response = requests.post(api_url, headers=headers, data=payload)
if not response.ok:
log.error(f'{response.status_code}, {response.reason}')
log.debug(f'{response.content}')
return False, 0
else:
response_data = json.loads(response.content)
if response_data['UploadSuccess']:
log.debug('{}. chunk sent to server'.format(
current_chunk_count))
sent_chunk_count = current_chunk_count
transaction_id = response_data['TranscationId']
else:
return False, 0
except Exception as e:
log.error('Upload chunk failed with exception: {}'.format(str(e)))
# Sleep
time.sleep(retry_timeout)
if sent_chunk_count >= chunk_count:
return True, transaction_id
| StarcoderdataPython |
3367921 | <filename>src/parser.py
# coding=utf-8
import csv
# Classe que modela o conjunto de treinamento/teste
class Data:
def __init__(self, tweet, target, stance, opinion_towards, sentiment):
# type: (basestring, basestring, basestring, string, string, basestring) -> object
self.tweet = tweet # Esse vai ser processado
self.original_tweet = tweet # Esse não
self.target = target
self.stance = stance
self.opinionTowards = opinion_towards
self.sentiment = sentiment
class CsvReader:
def __init__(self, file_name):
self.file_name = file_name
def parse(self):
file = open(self.file_name, 'rU')
reader = csv.reader(file, delimiter=',')
tweets = []
for row in reader:
tweets += [Data(row[0], row[1], row[2], row[3], row[4])]
file.close()
return tweets[1:]
# Teste
if __name__ == '__main__':
pass | StarcoderdataPython |
4812571 | <filename>tests/v1/python/accounts/test_serializers.py
import responses
from rest_framework import status
from rest_framework.test import APITestCase
from accounts.v1.serializers import LoginRegistrationSerializer, OneSignalSerializer, UserRegistrationSerializer, \
UserEmailUpdateSerializer, UserAppVersionUpdateSerializer
from lib.v1.testutils import CustomTestCase
from tests.v1.python.accounts.test_models import UserFactory
class LoginRegistrationSerializerTest(CustomTestCase, APITestCase):
INVALID_DATA_DICT = [
{'data': {'email': '<EMAIL>',
'facebook_uid': 'test',
'first_name': 'test',
'last_name': 'user',
'gender': 'user',
'facebook_access_token': 'test'},
'error': ('email', ['Email is invalid.']),
'label': 'Invalid email.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
},
{'data': {'email': '<EMAIL>',
'facebook_uid': 'test',
'first_name': 'test',
'last_name': 'user',
'gender': 'user',
'facebook_access_token': 'test',
'app_version': '1.0.0'},
'error': ('app_version', ['App version not valid.']),
'label': 'Invalid app version format.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
}
]
VALID_DATA_DICT = [
{'facebook_access_token': 'xxxxxxxxxxx',
'first_name': 'Jon',
'last_name': 'Doe',
'email': '<EMAIL>',
'gender': 'male',
'facebook_uid': '1234567890'},
{'facebook_access_token': 'xxxxxxxxxxx',
'first_name': '',
'last_name': '',
'email': '',
'gender': '',
'facebook_uid': '1234567890'},
{'facebook_access_token': 'xxxxxxxxxxx',
'first_name': 'Jon',
'last_name': 'Doe',
'email': '<EMAIL>',
'gender': 'male',
'facebook_uid': '1234567890',
'app_version': '1.0'},
{'facebook_access_token': '<PASSWORD>',
'facebook_uid': '1234567890'}
]
def setUp(self):
self.required_fields = ['facebook_uid', 'facebook_access_token']
self.not_required_fields = ['first_name', 'last_name', 'gender', 'email', 'branch_data', 'app_version', 'id']
self.user = UserFactory.create(email='<EMAIL>')
self.fb_long_lived_token = {'access_token': '<PASSWORD>'}
self.fb_user_info = {'first_name': 'Jon',
'last_name': 'Doe',
'email': '<EMAIL>',
'gender': 'male',
'id': '1234567890'}
def test_fields(self):
serializer = LoginRegistrationSerializer()
self.assert_fields_required(True, serializer, self.required_fields)
self.assert_fields_required(False, serializer, self.not_required_fields)
self.assertEqual(len(serializer.fields), len(self.required_fields) + len(self.not_required_fields))
def test_invalid_data(self):
serializer = LoginRegistrationSerializer
self.assert_invalid_data(serializer, self.INVALID_DATA_DICT)
@responses.activate
def test_valid_data(self):
serializer = LoginRegistrationSerializer
# Mock facebook 'get information' request
responses.add(responses.GET, 'https://graph.facebook.com/v2.5/me',
json=self.fb_user_info, status=status.HTTP_200_OK, content_type='application/json')
# Mock facebook 'get long lived token' request
responses.add(responses.GET, 'https://graph.facebook.com/v2.5/oauth/access_token',
json=self.fb_long_lived_token, status=status.HTTP_200_OK, content_type='application/json')
self.assert_valid_data(serializer, self.VALID_DATA_DICT)
class OneSignalSerializerTest(CustomTestCase, APITestCase):
INVALID_DATA_DICT = []
VALID_DATA_DICT = [
{'one_signal_id': '1234567890'},
{'one_signal_id': ''}
]
def setUp(self):
self.required_fields = ['one_signal_id']
self.not_required_fields = []
def test_fields(self):
serializer = OneSignalSerializer()
self.assert_fields_required(True, serializer, self.required_fields)
self.assert_fields_required(False, serializer, self.not_required_fields)
self.assertEqual(len(serializer.fields), len(self.required_fields) + len(self.not_required_fields))
def test_invalid_data(self):
serializer = OneSignalSerializer
self.assert_invalid_data(serializer, self.INVALID_DATA_DICT)
def test_valid_data(self):
serializer = OneSignalSerializer
self.assert_valid_data(serializer, self.VALID_DATA_DICT)
class UserEmailUpdateSerializerTest(CustomTestCase, APITestCase):
INVALID_DATA_DICT = [
{'data': {'email': '<EMAIL>'},
'error': ('email', ['Email is invalid.']),
'label': 'Invalid email.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
},
]
VALID_DATA_DICT = [
{'email': '<EMAIL>'}
]
def setUp(self):
self.required_fields = ['email']
self.not_required_fields = []
def test_fields(self):
serializer = UserEmailUpdateSerializer()
self.assert_fields_required(True, serializer, self.required_fields)
self.assert_fields_required(False, serializer, self.not_required_fields)
self.assertEqual(len(serializer.fields), len(self.required_fields) + len(self.not_required_fields))
def test_invalid_data(self):
serializer = UserEmailUpdateSerializer
self.assert_invalid_data(serializer, self.INVALID_DATA_DICT)
def test_valid_data(self):
serializer = UserEmailUpdateSerializer
self.assert_valid_data(serializer, self.VALID_DATA_DICT)
class UserAppVersionUpdateSerializerTest(CustomTestCase, APITestCase):
INVALID_DATA_DICT = [
{'data': {'app_version': '1.0.0.'},
'error': ('app_version', ['App version not valid.']),
'label': 'Invalid app version format.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
},
]
VALID_DATA_DICT = [
{'app_version': '1.0'}
]
def setUp(self):
self.required_fields = ['app_version']
self.not_required_fields = []
def test_fields(self):
serializer = UserAppVersionUpdateSerializer()
self.assert_fields_required(True, serializer, self.required_fields)
self.assert_fields_required(False, serializer, self.not_required_fields)
self.assertEqual(len(serializer.fields), len(self.required_fields) + len(self.not_required_fields))
def test_invalid_data(self):
serializer = UserAppVersionUpdateSerializer
self.assert_invalid_data(serializer, self.INVALID_DATA_DICT)
def test_valid_data(self):
serializer = UserAppVersionUpdateSerializer
self.assert_valid_data(serializer, self.VALID_DATA_DICT)
class UserRegistrationSerializerTest(CustomTestCase, APITestCase):
INVALID_DATA_DICT = [
{'data': {'email': '<EMAIL>',
'name': '<NAME>',
'password': '<PASSWORD>',
'confirm_password': '<PASSWORD>',
'facebook_uid': 'randomfacebookuid'},
'error': ('email', ['Please use a different email address provider.']),
'label': 'Invalid email.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
},
{'data': {'email': '<EMAIL>',
'name': '<NAME>',
'password': '<PASSWORD>',
'confirm_password': '<PASSWORD>',
'facebook_uid': 'randomfacebookuid'},
'error': ('email', ['Enter a valid email address.']),
'label': 'Bad email format.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
},
{'data': {'email': '<EMAIL>',
'name': '<NAME>',
'password': '<PASSWORD>',
'confirm_password': '<PASSWORD>',
'app_version': '2.0.0',
'facebook_uid': 'randomfacebookuid'},
'error': ('app_version', ['App version not valid.']),
'label': 'App version not valid.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
},
{'data': {'email': '<EMAIL>',
'name': '<NAME>',
'password': '<PASSWORD>',
'confirm_password': '<PASSWORD>',
'app_version': '2.0',
'facebook_uid': 'randomfacebookuid'},
'error': ('email', ['Email already in use, please use a different email address.']),
'label': 'User with email already exists.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
},
{'data': {'email': '<EMAIL>',
'name': '<NAME>',
'password': '<PASSWORD>',
'confirm_password': '<PASSWORD>',
'app_version': '2.0',
'facebook_uid': 'randomfacebookuid'},
'error': ('non_field_errors', ['Passwords don\'t match.']),
'label': 'Password and confirm password don\'t match.',
'method': 'POST',
'status': status.HTTP_400_BAD_REQUEST
}
]
VALID_DATA_DICT = [
{'email': '<EMAIL>',
'name': '<NAME>',
'password': '<PASSWORD>',
'confirm_password': '<PASSWORD>',
'facebook_uid': 'randomfacebookuid_1'},
{'email': '<EMAIL>',
'name': '<NAME>',
'app_version': '1.0',
'password': '<PASSWORD>',
'confirm_password': 'test',
'facebook_uid': 'randomfacebookuid_2'},
]
def setUp(self):
self.required_fields = ['email', 'name', 'password', '<PASSWORD>_password', 'facebook_uid']
self.not_required_fields = ['app_version', 'branch_data']
self.user = UserFactory(email='<EMAIL>')
def test_fields(self):
serializer = UserRegistrationSerializer()
self.assert_fields_required(True, serializer, self.required_fields)
self.assert_fields_required(False, serializer, self.not_required_fields)
self.assertEqual(len(serializer.fields), len(self.required_fields) + len(self.not_required_fields))
def test_invalid_data(self):
serializer = UserRegistrationSerializer
self.assert_invalid_data(serializer, self.INVALID_DATA_DICT)
def test_valid_data(self):
serializer = UserRegistrationSerializer
self.assert_valid_data(serializer, self.VALID_DATA_DICT)
| StarcoderdataPython |
3271218 | <reponame>amochtar/adventofcode<gh_stars>1-10
#!/usr/bin/env python
import aoc
@aoc.timing
def part1(inp):
cnt = 0
for line in inp.splitlines():
_, outs = (part.split(' ') for part in line.split(' | ', 1))
cnt += sum(1 for d in outs if len(d) in [2, 3, 4, 7])
return cnt
@aoc.timing
def part2(inp):
def digit_mapping(digits):
mapping = {}
# map unique digits 1, 4, 7, 8
for d in digits:
if len(d) == 2:
mapping[1] = set(d)
elif len(d) == 3:
mapping[7] = set(d)
elif len(d) == 4:
mapping[4] = set(d)
elif len(d) == 7:
mapping[8] = set(d)
assert len(mapping) == 4
for d in digits:
# map digits 2, 3 and 5 (all len 5)
if len(d) == 5:
if len(set(d).intersection(mapping[1])) == 2 or len(set(d).intersection(mapping[7])) == 3:
mapping[3] = set(d)
elif len(set(d).intersection(mapping[4])) == 2:
mapping[2] = set(d)
else:
mapping[5] = set(d)
# map digits 0, 6 and 9 (all len 6)
if len(d) == 6:
if len(set(d).intersection(mapping[1])) == 1 or len(set(d).intersection(mapping[7])) == 2:
mapping[6] = set(d)
elif len(set(d).intersection(mapping[4])) == 4:
mapping[9] = set(d)
else:
mapping[0] = set(d)
assert len(mapping) == 10
# reverse mapping for easy lookup
return {''.join(sorted(v)): k for k, v in mapping.items()}
total_value = 0
for line in inp.splitlines():
ins, outs = (part.split(' ') for part in line.split(' | ', 1))
mapping = digit_mapping(ins+outs)
total_value += sum(10**(3-i)*mapping[''.join(sorted(out))]
for i, out in enumerate(outs))
return total_value
# with open('test.txt', 'r') as f:
# inp = f.read()
# print("Part 1:", part1(inp))
# print("Part 2:", part2(inp))
with open('input.txt', 'r') as f:
inp = f.read()
print("Part 1:", part1(inp))
print("Part 2:", part2(inp))
| StarcoderdataPython |
198644 | import six
from pubnub import utils
from pubnub.endpoints.endpoint import Endpoint
from pubnub.enums import HttpMethod, PNOperationType
from pubnub.models.consumer.history import PNHistoryResult
class History(Endpoint):
HISTORY_PATH = "/v2/history/sub-key/%s/channel/%s"
MAX_COUNT = 100
def __init__(self, pubnub):
Endpoint.__init__(self, pubnub)
self._channel = None
self._start = None
self._end = None
self._reverse = None
self._count = None
self._include_timetoken = None
def channel(self, channel):
self._channel = channel
return self
def start(self, start):
assert isinstance(start, six.integer_types)
self._start = start
return self
def end(self, end):
assert isinstance(end, six.integer_types)
self._end = end
return self
def reverse(self, reverse):
assert isinstance(reverse, bool)
self._reverse = reverse
return self
def count(self, count):
assert isinstance(count, six.integer_types)
self._count = count
return self
def include_timetoken(self, include_timetoken):
assert isinstance(include_timetoken, bool)
self._include_timetoken = include_timetoken
return self
def custom_params(self):
params = {}
if self._start is not None:
params['start'] = str(self._start)
if self._end is not None:
params['end'] = str(self._end)
if self._count is not None and 0 < self._count <= History.MAX_COUNT:
params['count'] = str(self._count)
else:
params['count'] = '100'
if self._reverse is not None:
params['reverse'] = "true" if self._reverse else "false"
if self._include_timetoken is not None:
params['include_token'] = "true" if self._include_timetoken else "false"
return params
def build_path(self):
return History.HISTORY_PATH % (
self.pubnub.config.subscribe_key,
utils.url_encode(self._channel)
)
def http_method(self):
return HttpMethod.GET
def is_auth_required(self):
return True
def validate_params(self):
self.validate_subscribe_key()
self.validate_channel()
def create_response(self, envelope):
return PNHistoryResult.from_json(envelope, self._include_timetoken, self.pubnub.config.cipher_key)
def request_timeout(self):
return self.pubnub.config.non_subscribe_request_timeout
def connect_timeout(self):
return self.pubnub.config.connect_timeout
def operation_type(self):
return PNOperationType.PNHistoryOperation
def name(self):
return "History"
| StarcoderdataPython |
1767256 | <reponame>object-oriented-human/competitive
n = int(input())
while n != 0:
sn = 0
for _ in list(str(n)):
sn += int(_)
p = 10
np = n * p
snp = 0
while sn != snp:
p += 1
np = n * p
snp = 0
for x in list(str(np)):
snp += int(x)
print(p)
n = int(input()) | StarcoderdataPython |
4826968 | #!/usr/bin/env python
"""Reverse DNS lookup for a list of IPs separated by a new line
Should work with python 2.7.15 and 3.6.5"""
import argparse
import json
import os
import socket
import sys
PARSER = argparse.ArgumentParser(
description="Reverse DNS lookup for a list of IPs separated by a new line"
)
PARSER.add_argument(
"-i",
"--input",
required=True,
help="Input file with a list of IPs separated by a new line",
)
PARSER.add_argument(
"-o",
"--output",
help="Output file with a list of IP:hostname pairs separated by a new line",
)
ARGS = PARSER.parse_args()
if not os.path.isfile(ARGS.input):
print("{} does not exist".format(ARGS.input))
sys.exit(1)
F = open(ARGS.input, "r")
CONTENTS = F.readlines()
IPS_HOSTNAMES = {}
for ip in CONTENTS:
ip = ip.rstrip()
if not ip == "":
try:
hostname, alias, addresslist = socket.gethostbyaddr(ip)
except (socket.error, socket.herror, socket.gaierror):
hostname = ""
IPS_HOSTNAMES[ip] = hostname
if ARGS.output is None:
print("{}:{}".format(ip, hostname))
if ARGS.output:
with open(ARGS.output, "w") as a_file:
a_file.write(json.dumps(IPS_HOSTNAMES, indent=2))
| StarcoderdataPython |
1635697 | <filename>Lib/test/test_cinder.py<gh_stars>0
# Copyright (c) Facebook, Inc. and its affiliates. (http://www.facebook.com)
import asyncio
import asyncio.tasks
import cinder
import inspect
import sys
import unittest
import weakref
from cinder import (
async_cached_classproperty,
async_cached_property,
cached_classproperty,
cached_property,
strict_module_patch,
StrictModule,
)
from functools import wraps
from textwrap import dedent
from types import CodeType, FunctionType, GeneratorType, ModuleType
from typing import List, Tuple
from test.support import gc_collect, requires_type_collecting, temp_dir, TESTFN, unlink
from test.support.cinder import get_await_stack, verify_stack
from test.support.script_helper import assert_python_ok, make_script
class NoWayError(Exception):
pass
class LeakDetector:
def __init__(self, finalized):
self.finalized = finalized
def __del__(self):
self.finalized[0] = True
def create_strict_module(
name="foo", filename="foo.py", enable_patching=False, **kwargs
):
kwargs.update(__name__=name, __file__=filename)
return StrictModule(kwargs, enable_patching)
def strict_module_from_module(mod, enable_patching=False):
return StrictModule(dict(mod.__dict__), enable_patching)
class CinderTest(unittest.TestCase):
def test_type_cache(self):
class C:
x = 42
a = C()
self.assertEqual(a.x, 42)
sys._clear_type_cache()
C.x = 100
self.assertEqual(a.x, 100)
def test_recompute_func_entry_for_defaults(self):
"""Update function __defaults__ *after* creation
Function entry point should be re-computed
"""
# sanity check
def foofunc(a, b):
return a + b
self.assertEqual(foofunc(1, 2), 3)
# error due to missing positional arguments
with self.assertRaises(TypeError):
foofunc()
foofunc.__defaults__ = (3, 4)
self.assertEqual(foofunc(), 7)
def test_recompute_func_entry_for_kwonly(self):
"""Change function __code__ after creation, adding kwonly args
Function entry point should be re-computed
"""
def f():
return "f"
def kwonly(*, a, b):
return "kwonly"
self.assertEqual(f(), "f")
f.__code__ = kwonly.__code__
with self.assertRaises(TypeError):
f()
with self.assertRaises(TypeError):
f(1, 2)
self.assertEqual(f(a=1, b=2), "kwonly")
@unittest.skip("re-enable once shadowcode is ported")
def test_knob(self):
try:
knobs = cinder.getknobs()
original = knobs["shadowcode"]
cinder.setknobs({"shadowcode": not original})
knobs = cinder.getknobs()
self.assertEqual(knobs["shadowcode"], not original)
finally:
cinder.setknobs({"shadowcode": original})
knobs = cinder.getknobs()
self.assertEqual(knobs["shadowcode"], original)
@requires_type_collecting
def test_global_del_SystemExit(self):
code = """if 1:
import cinder
cinder.setknobs({'skipfinalcleanup': True})
class ClassWithDel:
def __del__(self):
print('__del__ called')
a = ClassWithDel()
a.link = a
raise SystemExit(0)"""
self.addCleanup(unlink, TESTFN)
with open(TESTFN, "w") as script:
script.write(code)
rc, out, err = assert_python_ok(TESTFN)
self.assertEqual(out.strip(), b"")
def test_type_freeze(self):
class C:
pass
cinder.freeze_type(C)
with self.assertRaisesRegex(
TypeError, "type 'C' has been frozen and cannot be modified"
):
C.foo = 42
class D:
x = 42
cinder.freeze_type(D)
with self.assertRaisesRegex(
TypeError, "type 'D' has been frozen and cannot be modified"
):
D.foo = 42
with self.assertRaisesRegex(
TypeError, "type 'D' has been frozen and cannot be modified"
):
del D.foo
def test_type_freeze_bad_arg(self):
with self.assertRaisesRegex(TypeError, "freeze_type requires a type"):
cinder.freeze_type(42)
def test_cached_class_prop(self):
class C:
@cached_classproperty
def f(self):
return 42
self.assertEqual(C.f, 42)
def test_cached_class_prop_subtype(self):
class ST(cached_classproperty):
pass
class C:
@ST
def f(self):
return 42
self.assertEqual(C.f, 42)
def test_cached_class_prop_called_once(self):
class C:
calls = 0
@cached_classproperty
def f(cls):
cls.calls += 1
return 42
self.assertEqual(C.f, 42)
self.assertEqual(C.f, 42)
self.assertEqual(C.calls, 1)
def test_cached_class_prop_descr(self):
"""verifies the descriptor protocol isn't invoked on the cached value"""
class classproperty:
def __get__(self, inst, ctx):
return 42
clsprop = classproperty()
class C:
@cached_classproperty
def f(cls):
return clsprop
self.assertEqual(C.f, clsprop)
self.assertEqual(C.f, clsprop)
def test_cached_class_prop_descr_raises(self):
class classproperty(LeakDetector):
def __get__(self, inst, ctx):
raise NoWayError()
finalized = [False]
class C:
@cached_classproperty
def f(cls):
return classproperty(finalized)
x = C.f
# descriptor is cached in the type...
self.assertEqual(finalized, [False])
# and we can still invoke it
x = C.f
self.assertEqual(type(C.__dict__["f"]), cached_classproperty)
del C.f
del x
self.assertEqual(finalized, [True])
def test_cached_class_prop_inst_method(self):
"""verifies the descriptor protocol isn't invoked on the cached value"""
class C:
def __init__(self, value):
self.value = value
@cached_classproperty
def f(cls):
return lambda self: self.value
self.assertEqual(C(42).f(C(100)), 100)
def test_cached_class_prop_inheritance(self):
class C:
@cached_classproperty
def f(cls):
return cls.__name__
class D(C):
pass
self.assertEqual(C.f, "C")
self.assertEqual(D.f, "C")
def test_cached_class_prop_inheritance_reversed(self):
class C:
@cached_classproperty
def f(cls):
return cls.__name__
class D(C):
pass
self.assertEqual(D.f, "D")
self.assertEqual(C.f, "D")
def test_cached_class_prop_recursion(self):
depth = 0
class C:
@cached_classproperty
def f(cls):
nonlocal depth
depth += 1
if depth == 2:
return 2
x = C.f
return 1
self.assertEqual(C.f, 2)
def test_cached_class_prop_inst_method_no_inst(self):
class C:
def __init__(self, value):
self.value = value
@cached_classproperty
def f(cls):
return lambda self: self.value
self.assertEqual(type(C.f), FunctionType)
def test_cached_class_prop_inst(self):
class C:
@cached_classproperty
def f(cls):
return 42
self.assertEqual(C().f, 42)
def test_cached_class_prop_frozen_type(self):
class C:
@cached_classproperty
def f(cls):
return 42
cinder.freeze_type(C)
self.assertEqual(C.f, 42)
def test_cached_class_prop_frozen_type_inst(self):
class C:
@cached_classproperty
def f(cls):
return 42
cinder.freeze_type(C)
self.assertEqual(C().f, 42)
def test_cached_class_prop_setattr_fails(self):
class metatype(type):
def __setattr__(self, name, value):
if name == "f":
raise NoWayError()
class C(metaclass=metatype):
@cached_classproperty
def f(self):
return 42
self.assertEqual(C.f, 42)
def test_cached_class_prop_doc(self):
class C:
@cached_classproperty
def f(cls):
"hi"
return 42
self.assertEqual(C.__dict__["f"].__doc__, "hi")
self.assertEqual(C.__dict__["f"].name, "f")
self.assertEqual(type(C.__dict__["f"].func), FunctionType)
def test_warn_on_type_dict_non_type(self):
with self.assertRaises(TypeError):
cinder.warn_on_inst_dict(42)
def test_warn_on_type_dict_no_callback(self):
class C:
pass
cinder.warn_on_inst_dict(C)
a = C()
a.foo = 42
self.assertEqual(a.foo, 42)
def test_warn_on_type_dict(self):
class C:
pass
msg = type = attr = None
def cb(*args):
nonlocal msg, type, attr
msg = args[0]
type = args[1]
attr = args[2]
cinder.cinder_set_warn_handler(cb)
try:
cinder.warn_on_inst_dict(C)
a = C()
a.foo = 42
self.assertEqual(msg, "WARN001: Dictionary created for flagged instance")
self.assertEqual(type, C)
self.assertEqual(attr, "foo")
self.assertEqual(a.foo, 42)
a.bar = 42
self.assertEqual(msg, "WARN001: Dictionary created for flagged instance")
self.assertEqual(type, C)
self.assertEqual(attr, "foo")
finally:
cinder.cinder_set_warn_handler(None)
def test_warn_on_type_dict_non_split_keys(self):
class C:
pass
msg = type = attr = None
def cb(*args):
nonlocal msg, type, attr
msg = args[0]
type = args[1]
attr = args[2]
cinder.cinder_set_warn_handler(cb)
try:
cinder.warn_on_inst_dict(C)
a = C()
a.foo = 42
a.bar = 100
a = C()
a.baz = 100
a = C()
a.quox = 100
self.assertEqual(msg, "WARN001: Dictionary created for flagged instance")
self.assertEqual(type, C)
self.assertEqual(attr, "quox")
self.assertEqual(a.quox, 100)
finally:
cinder.cinder_set_warn_handler(None)
def test_raise_immutability_warning(self):
warns = None
def log_warnings(warnings: List[Tuple[int, str, object]]):
nonlocal warns
warns = warnings
cinder.set_immutable_warn_handler(None)
self.assertEqual(cinder.get_immutable_warn_handler(), None)
cinder.raise_immutable_warning(0, "test", "test1")
self.assertEqual(warns, None)
cinder.set_immutable_warn_handler(log_warnings)
cinder.raise_immutable_warning(0, "test", "test2")
self.assertEqual(cinder.get_immutable_warn_handler(), log_warnings)
self.assertEqual(warns, None)
cinder.flush_immutable_warnings()
self.assertListEqual(
warns,
[
(0, "test", "test1"),
(0, "test", "test2"),
],
)
cinder.set_immutable_warn_handler(None)
def test_cached_property(self):
class C:
def __init__(self):
self.calls = 0
@cached_property
def f(self):
self.calls += 1
return 42
a = C()
self.assertEqual(a.f, 42)
self.assertEqual(a.calls, 1)
self.assertEqual(a.f, 42)
self.assertEqual(a.calls, 1)
def test_cached_property_subtype(self):
class ST(cached_property):
pass
class C:
def __init__(self):
self.calls = 0
@ST
def f(self):
self.calls += 1
return 42
a = C()
self.assertEqual(a.f, 42)
self.assertEqual(a.calls, 1)
self.assertEqual(a.f, 42)
self.assertEqual(a.calls, 1)
def test_cached_property_loop(self):
val = object()
class C:
@cached_property
def f(self):
return val
a = C()
for i in range(1000):
x = a.f
self.assertEqual(x, val)
def test_cached_property_raises(self):
class C:
@cached_property
def f(self):
raise NoWayError()
with self.assertRaises(NoWayError):
C().f
def test_cached_property_raising_set(self):
class C:
@cached_property
def f(self):
raise NoWayError()
a = C()
a.f = 42
self.assertEqual(a.f, 42)
def test_cached_property_clear(self):
value = 42
class C:
@cached_property
def f(self):
return value
a = C()
self.assertEqual(a.f, 42)
C.f.clear(a)
value = 100
self.assertEqual(a.f, 100)
def test_cached_property_has_value(self):
value = 42
class C:
@cached_property
def f(self):
return value
a = C()
self.assertEqual(a.f, 42)
self.assertEqual(C.f.has_value(a), True)
C.f.clear(a)
self.assertEqual(C.f.has_value(a), False)
def test_cached_property_clear_not_set(self):
class C:
@cached_property
def f(self):
return 42
a = C()
C.f.clear(a)
self.assertEqual(a.f, 42)
def test_cached_property_no_dict(self):
class C:
__slots__ = ()
@cached_property
def f(self):
return 42
with self.assertRaises(AttributeError):
a = C().f
with self.assertRaises(AttributeError):
C().f = 42
def test_cached_property_clear_no_dict(self):
class C:
__slots__ = ()
@cached_property
def f(self):
return 42
with self.assertRaises(AttributeError):
a = C.f.clear(C())
def test_cached_property_name(self):
class C:
@cached_property
def f(self):
return 42
self.assertEqual(C.f.name, "f")
def test_cached_property_func(self):
class C:
pass
def f(self):
return 42
C.f = cached_property(f)
self.assertEqual(C.f.fget, f)
def test_cached_property_doc(self):
class C:
@cached_property
def f(self):
return 42
self.assertEqual(C.f.__doc__, None)
class D:
@cached_property
def f(self):
"hi there"
return 42
self.assertEqual(D.f.__doc__, "hi there")
D.f.fget.__doc__ = "updated"
self.assertEqual(D.f.__doc__, "updated")
def test_cached_property_slot(self):
class C:
__slots__ = ("f", "calls")
def __init__(self):
self.calls = 0
def f(self):
self.calls += 1
return 42
C.f = cached_property(f, C.f)
a = C()
self.assertEqual(a.f, 42)
self.assertEqual(a.calls, 1)
self.assertEqual(a.f, 42)
self.assertEqual(a.calls, 1)
def test_cached_property_clear_slot(self):
value = 42
class C:
__slots__ = "f"
def f(self):
return value
C.f = cached_property(f, C.f)
a = C()
self.assertEqual(a.f, 42)
C.f.clear(a)
value = 100
self.assertEqual(a.f, 100)
def test_cached_property_has_value_slot(self):
value = 42
class C:
__slots__ = "f"
def f(self):
return value
C.f = cached_property(f, C.f)
a = C()
self.assertEqual(a.f, 42)
self.assertEqual(C.f.has_value(a), True)
C.f.clear(a)
self.assertEqual(C.f.has_value(a), False)
value = 100
self.assertEqual(a.f, 100)
self.assertEqual(C.f.has_value(a), True)
def test_cached_property_clear_slot_not_set(self):
class C:
__slots__ = "f"
def f(self):
return 42
C.f = cached_property(f, C.f)
a = C()
C.f.clear(a)
self.assertEqual(a.f, 42)
def test_cached_property_clear_slot_bad_value(self):
value = 42
class C:
__slots__ = "f"
def f(self):
return value
C.f = cached_property(f, C.f)
a = C()
self.assertEqual(a.f, 42)
with self.assertRaisesRegex(
TypeError, "descriptor 'f' for 'C' objects doesn't apply to a 'int' object"
):
C.f.clear(42)
def test_cached_property_slot_set_del(self):
class C:
__slots__ = ("f", "calls")
def __init__(self):
self.calls = 0
def f(self):
self.calls += 1
return 42
C.f = cached_property(f, C.f)
a = C()
a.f = 100
self.assertEqual(a.f, 100)
self.assertEqual(a.calls, 0)
del a.f
with self.assertRaises(AttributeError):
del a.f
self.assertEqual(a.f, 42)
self.assertEqual(a.calls, 1)
def test_cached_property_slot_subtype(self):
class C:
__slots__ = ("f",)
def f(self):
return 42
class my_cached_prop(cached_property):
pass
with self.assertRaises(TypeError):
C.f = my_cached_prop(f, C.f)
def test_cached_property_slot_raises(self):
class C:
__slots__ = ("f",)
def f(self):
raise NoWayError()
C.f = cached_property(f, C.f)
with self.assertRaises(NoWayError):
C().f
def test_cached_property_slot_wrong_type(self):
"""apply a cached property from one type to another"""
class C:
__slots__ = ("abc",)
class D:
pass
D.abc = cached_property(lambda self: 42, C.abc)
a = D()
with self.assertRaises(TypeError):
x = a.abc
def test_cached_property_slot_wrong_type_set(self):
"""apply a cached property from one type to another"""
class C:
__slots__ = ("abc",)
class D:
pass
D.abc = cached_property(lambda self: 42, C.abc)
a = D()
with self.assertRaises(TypeError):
print(a.abc)
def test_cached_property_slot_name(self):
class C:
__slots__ = ("f",)
C.f = cached_property(lambda self: 42, C.f)
self.assertEqual(C.f.name, "f")
def test_cached_property_slot_property(self):
class C:
__slots__ = ("f",)
prev_f = C.f
C.f = cached_property(lambda self: 42, C.f)
self.assertEqual(C.f.slot, prev_f)
def test_cached_property_no_slot_property(self):
class C:
@cached_property
def f(self):
return 42
self.assertEqual(C.f.slot, None)
def test_cached_property_non_descriptor(self):
with self.assertRaises(TypeError):
cached_property(lambda self: 42, 42)
def test_cached_property_incompatible_descriptor(self):
with self.assertRaises(TypeError):
cached_property(lambda self: 42, GeneratorType.gi_frame)
def test_cached_property_readonly_descriptor(self):
with self.assertRaises(TypeError):
cached_property(lambda self: 42, range.start)
def test_warn_on_type(self):
class C:
pass
msg = type = attr = None
def cb(*args):
nonlocal msg, type, attr
msg = args[0]
type = args[1]
attr = args[2]
cinder.warn_on_inst_dict(C)
cinder.freeze_type(C)
cinder.cinder_set_warn_handler(cb)
C.foo = 42
self.assertEqual(
msg, "WARN002: Type modified that was flagged for immutability"
)
self.assertEqual(type, C)
self.assertEqual(attr, "foo")
def test_get_warn(self):
class C:
pass
def cb(*args):
pass
cinder.set_warn_handler(cb)
self.assertEqual(cinder.get_warn_handler(), cb)
cinder.set_warn_handler(None)
self.assertEqual(cinder.get_warn_handler(), None)
def test_warn_on_frozen_type(self):
class C:
pass
cinder.freeze_type(C)
with self.assertRaisesRegex(
TypeError, "can't call warn_on_inst_dict on a frozen type"
):
cinder.warn_on_inst_dict(C)
def test_gen_free_list(self):
knobs = cinder.getknobs()
self.assertEqual(knobs["genfreelist"], False)
cinder.setknobs({"genfreelist": True})
knobs = cinder.getknobs()
self.assertEqual(knobs["genfreelist"], True)
def f():
yield 42
a = f()
id1 = id(a)
del a
a = f()
id2 = id(a)
self.assertEqual(id1, id2)
cinder.setknobs({"genfreelist": False})
knobs = cinder.getknobs()
self.assertEqual(knobs["genfreelist"], False)
# def test_polymorphic_cache(self):
# knobs = cinder.getknobs()
# self.assertEqual(knobs["polymorphiccache"], False)
# cinder.setknobs({"polymorphiccache": True})
# knobs = cinder.getknobs()
# self.assertEqual(knobs["polymorphiccache"], True)
# cinder.setknobs({"polymorphiccache": False})
# knobs = cinder.getknobs()
# self.assertEqual(knobs["polymorphiccache"], False)
def test_strictmodule_type(self):
foo = strict_module_from_module(ModuleType("foo"))
self.assertTrue(type(foo) is StrictModule)
def test_strictmodule_uninitialized(self):
# An uninitialized module has no __dict__ or __name__,
# and __doc__ is None
foo = StrictModule.__new__(StrictModule)
self.assertTrue(foo.__dict__ == None)
self.assertRaises(SystemError, dir, foo)
try:
s = foo.__name__
self.fail("__name__ = %s" % repr(s))
except AttributeError:
pass
self.assertEqual(foo.__doc__, StrictModule.__doc__)
def test_strictmodule_uninitialized_missing_getattr(self):
foo = StrictModule.__new__(StrictModule)
self.assertRaisesRegex(
AttributeError,
"module has no attribute 'not_here'",
getattr,
foo,
"not_here",
)
def test_strictmodule_missing_getattr(self):
foo = strict_module_from_module(ModuleType("foo"))
self.assertRaisesRegex(
AttributeError,
"module 'foo' has no attribute 'not_here'",
getattr,
foo,
"not_here",
)
def test_strictmodule_no_docstring(self):
# Regularly initialized module, no docstring
foo = strict_module_from_module(ModuleType("foo"))
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, None)
self.assertIs(foo.__loader__, None)
self.assertIs(foo.__package__, None)
self.assertIs(foo.__spec__, None)
self.assertEqual(
foo.__dict__,
{
"__name__": "foo",
"__doc__": None,
"__loader__": None,
"__package__": None,
"__spec__": None,
},
)
def test_strictmodule_ascii_docstring(self):
# ASCII docstring
foo = strict_module_from_module(ModuleType("foo", "foodoc"))
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc")
self.assertEqual(
foo.__dict__,
{
"__name__": "foo",
"__doc__": "foodoc",
"__loader__": None,
"__package__": None,
"__spec__": None,
},
)
def test_strictmodule_unicode_docstring(self):
# Unicode docstring
foo = strict_module_from_module(ModuleType("foo", "foodoc\u1234"))
self.assertEqual(foo.__name__, "foo")
self.assertEqual(foo.__doc__, "foodoc\u1234")
self.assertEqual(
foo.__dict__,
{
"__name__": "foo",
"__doc__": "foodoc\u1234",
"__loader__": None,
"__package__": None,
"__spec__": None,
},
)
def test_strictmodule_weakref(self):
m = strict_module_from_module(ModuleType("foo"))
wr = weakref.ref(m)
self.assertIs(wr(), m)
del m
gc_collect()
self.assertIs(wr(), None)
def test_strictmodule_getattr(self):
foo = create_strict_module(x=1)
self.assertEqual(foo.x, 1)
def test_strictmodule_setattr(self):
foo = create_strict_module(x=1)
with self.assertRaises(AttributeError):
foo.x = 2
def test_strictmodule_delattr(self):
foo = create_strict_module(x=1)
with self.assertRaises(AttributeError):
del foo.x
def test_strictmodule_setattr_with_patch_enabled(self):
foo = create_strict_module(x=1, enable_patching=True)
with self.assertRaises(AttributeError):
foo.x = 2
def test_strictmodule_patch_disabled(self):
foo = create_strict_module(x=1)
with self.assertRaises(AttributeError):
strict_module_patch(foo, "x", 2)
def test_strictmodule_patch_enabled(self):
foo = create_strict_module(x=1, enable_patching=True)
strict_module_patch(foo, "x", 2)
self.assertEqual(foo.x, 2)
def test_strictmodule_patch_enabled(self):
foo = strict_module_from_module(ModuleType("a"), enable_patching=True)
strict_module_patch(foo, "__dir__", 2)
self.assertEqual(foo.__dir__, 2)
def test_strictmodule_patch_enabled_2(self):
m = ModuleType("a")
d = m.__dict__
foo = StrictModule(m.__dict__, False)
d["__dir__"] = 2
self.assertEqual(foo.__dir__, 2)
def test_strictmodule_getattr_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
bga = strict_module_from_module(bga)
bad_getattr2 = strict_module_from_module(bad_getattr2)
self.assertEqual(bga.x, 1)
self.assertEqual(bad_getattr2.x, 1)
# we are not respecting module __getattr__ here
with self.assertRaises(TypeError):
bga.nope
with self.assertRaises(TypeError):
bad_getattr2.nope
del sys.modules["test.bad_getattr"]
if "test.bad_getattr2" in sys.modules:
del sys.modules["test.bad_getattr2"]
def test_strictmodule_dir(self):
import test.good_getattr as gga
gga = strict_module_from_module(gga)
self.assertEqual(dir(gga), ["a", "b", "c"])
del sys.modules["test.good_getattr"]
def test_strictmodule_dir_errors(self):
import test.bad_getattr as bga
from test import bad_getattr2
bga = strict_module_from_module(bga)
bad_getattr2 = strict_module_from_module(bad_getattr2)
with self.assertRaises(TypeError):
dir(bga)
with self.assertRaises(TypeError):
dir(bad_getattr2)
del sys.modules["test.bad_getattr"]
if "test.bad_getattr2" in sys.modules:
del sys.modules["test.bad_getattr2"]
def test_strictmodule_getattr_tricky(self):
from test import bad_getattr3
bad_getattr3 = strict_module_from_module(bad_getattr3)
# these lookups should not crash
with self.assertRaises(AttributeError):
bad_getattr3.one
with self.assertRaises(AttributeError):
bad_getattr3.delgetattr
if "test.bad_getattr3" in sys.modules:
del sys.modules["test.bad_getattr3"]
def test_strictmodule_repr_minimal(self):
# reprs when modules have no __file__, __name__, or __loader__
m = ModuleType("foo")
del m.__name__
m = strict_module_from_module(m)
self.assertEqual(repr(m), "<module '?'>")
def test_strictmodule_repr_with_name(self):
m = ModuleType("foo")
m = strict_module_from_module(m)
self.assertEqual(repr(m), "<module 'foo'>")
def test_strictmodule_repr_with_name_and_filename(self):
m = ModuleType("foo")
m.__file__ = "/tmp/foo.py"
m = strict_module_from_module(m)
self.assertEqual(repr(m), "<module 'foo' from '/tmp/foo.py'>")
def test_strictmodule_repr_with_filename_only(self):
m = ModuleType("foo")
del m.__name__
m.__file__ = "/tmp/foo.py"
m = strict_module_from_module(m)
self.assertEqual(repr(m), "<module '?' from '/tmp/foo.py'>")
def test_strictmodule_modify_dict_patch_disabled(self):
foo = create_strict_module(x=1, enable_patching=False)
foo.__dict__["x"] = 2
self.assertEqual(foo.x, 1)
def test_strictmodule_modify_dict_patch_enabled(self):
foo = create_strict_module(x=1, enable_patching=True)
foo.__dict__["x"] = 2
self.assertEqual(foo.x, 1)
def test_strictmodule_unassigned_field(self):
d = {"<assigned:x>": False, "x": 1}
foo = StrictModule(d, False)
self.assertNotIn("x", foo.__dict__)
def test_const_object(self):
class MyObj:
magic = [42]
a = MyObj
b = const(a)
c = [b]
self.assertEqual(c[0], b)
self.assertEqual(b.magic, const([42]))
d = c[0]
e = d.magic
with self.assertRaises(AttributeError):
e.magic = 33
def async_test(f):
assert inspect.iscoroutinefunction(f)
@wraps(f)
def impl(*args, **kwargs):
asyncio.run(f(*args, **kwargs))
return impl
class AsyncCinderTest(unittest.TestCase):
def setUp(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = loop
def tearDown(self):
self.loop.close()
asyncio.set_event_loop_policy(None)
@async_test
async def test_cached_property(self):
class C:
def __init__(self):
self.calls = 0
@async_cached_property
async def f(self):
self.calls += 1
return 42
a = C()
self.assertEqual(await a.f, 42)
self.assertEqual(a.calls, 1)
self.assertEqual(await a.f, 42)
self.assertEqual(a.calls, 1)
@async_test
async def test_cached_property_loop(self):
val = object()
class C:
@async_cached_property
async def f(self):
return val
a = C()
for i in range(1000):
x = await a.f
self.assertEqual(x, val)
@async_test
async def test_cached_property_raises(self):
class C:
@async_cached_property
async def f(self):
raise NoWayError()
with self.assertRaises(NoWayError):
await C().f
@async_test
async def test_cached_property_no_dict(self):
class C:
__slots__ = ()
@async_cached_property
async def f(self):
return 42
with self.assertRaises(AttributeError):
a = await C().f
@async_test
async def test_cached_property_name(self):
class C:
@async_cached_property
async def f(self):
return 42
self.assertEqual(C.f.name, "f")
@async_test
async def test_cached_property_func(self):
class C:
pass
async def f(self):
return 42
C.f = async_cached_property(f)
self.assertEqual(C.f.func, f)
@async_test
async def test_cached_property_doc(self):
class C:
@async_cached_property
async def f(self):
return 42
self.assertEqual(C.f.__doc__, None)
class D:
@async_cached_property
async def f(self):
"hi there"
return 42
self.assertEqual(D.f.__doc__, "hi there")
D.f.func.__doc__ = "updated"
self.assertEqual(D.f.__doc__, "updated")
@async_test
async def test_cached_property_slot(self):
class C:
__slots__ = ("f", "calls")
def __init__(self):
self.calls = 0
async def f(self):
self.calls += 1
return 42
C.f = async_cached_property(f, C.f)
a = C()
self.assertEqual(await a.f, 42)
self.assertEqual(a.calls, 1)
self.assertEqual(await a.f, 42)
self.assertEqual(a.calls, 1)
@async_test
async def test_cached_property_slot_raises(self):
class C:
__slots__ = ("f",)
async def f(self):
raise NoWayError()
C.f = async_cached_property(f, C.f)
with self.assertRaises(NoWayError):
await C().f
@async_test
async def test_cached_property_slot_wrong_type(self):
"""apply a cached property from one type to another"""
class C:
__slots__ = ("abc",)
class D:
pass
async def f(self):
return 42
D.abc = async_cached_property(f, C.abc)
a = D()
with self.assertRaises(TypeError):
x = await a.abc
@async_test
async def test_cached_property_slot_name(self):
class C:
__slots__ = ("f",)
async def f(self):
return 42
C.f = async_cached_property(f, C.f)
self.assertEqual(C.f.name, "f")
@async_test
async def test_cached_property_slot_property(self):
class C:
__slots__ = ("f",)
async def f(self):
return 42
prev_f = C.f
C.f = async_cached_property(f, C.f)
self.assertEqual(C.f.slot, prev_f)
@async_test
async def test_cached_property_no_slot_property(self):
class C:
@async_cached_property
async def f(self):
return 42
self.assertEqual(C.f.slot, None)
@async_test
async def test_cached_property_non_descriptor(self):
async def f(self):
return 42
with self.assertRaises(TypeError):
async_cached_property(f, 42)
@async_test
async def test_cached_property_incompatible_descriptor(self):
async def f(self):
return 42
with self.assertRaises(TypeError):
async_cached_property(f, GeneratorType.gi_frame)
@async_test
async def test_cached_property_readonly_descriptor(self):
async def f(self):
return 42
with self.assertRaises(TypeError):
async_cached_property(f, range.start)
@async_test
async def test_cached_class_prop(self):
class C:
@async_cached_classproperty
async def f(self):
return 42
self.assertEqual(await C.f, 42)
@async_test
async def test_cached_class_prop_called_once(self):
class C:
calls = 0
@async_cached_classproperty
async def f(cls):
cls.calls += 1
return 42
self.assertEqual(await C.f, 42)
self.assertEqual(await C.f, 42)
self.assertEqual(C.calls, 1)
@async_test
async def test_cached_class_prop_descr(self):
"""verifies the descriptor protocol isn't invoked on the cached value"""
class classproperty:
def __get__(self, inst, ctx):
return 42
clsprop = classproperty()
class C:
@async_cached_classproperty
async def f(cls):
return clsprop
self.assertEqual(await C.f, clsprop)
self.assertEqual(await C.f, clsprop)
@async_test
async def test_cached_class_prop_inheritance(self):
class C:
@async_cached_classproperty
async def f(cls):
return cls.__name__
class D(C):
pass
self.assertEqual(await C.f, "C")
self.assertEqual(await D.f, "C")
@async_test
async def test_cached_class_prop_inheritance_reversed(self):
class C:
@async_cached_classproperty
async def f(cls):
return cls.__name__
class D(C):
pass
self.assertEqual(await D.f, "D")
self.assertEqual(await C.f, "D")
@async_test
async def test_cached_class_prop_inst_method_no_inst(self):
class C:
def __init__(self, value):
self.value = value
@async_cached_classproperty
async def f(cls):
return lambda self: self.value
self.assertEqual(type(await C.f), FunctionType)
@async_test
async def test_cached_class_prop_inst(self):
class C:
@async_cached_classproperty
async def f(cls):
return 42
self.assertEqual(await C().f, 42)
@async_test
async def test_cached_class_prop_frozen_type(self):
class C:
@async_cached_classproperty
async def f(cls):
return 42
cinder.freeze_type(C)
self.assertEqual(await C.f, 42)
@async_test
async def test_cached_class_prop_frozen_type_inst(self):
class C:
@async_cached_classproperty
async def f(cls):
return 42
cinder.freeze_type(C)
self.assertEqual(await C().f, 42)
@async_test
async def test_cached_class_prop_setattr_fails(self):
class metatype(type):
def __setattr__(self, name, value):
if name == "f":
raise NoWayError()
class C(metaclass=metatype):
@async_cached_classproperty
async def f(self):
return 42
self.assertEqual(await C.f, 42)
@async_test
async def test_cached_class_prop_doc(self):
class C:
@async_cached_classproperty
async def f(cls):
"hi"
return 42
self.assertEqual(C.__dict__["f"].__doc__, "hi")
self.assertEqual(C.__dict__["f"].name, "f")
self.assertEqual(type(C.__dict__["f"].func), FunctionType)
@async_test
async def test_cached_property_awaiter(self):
class C:
def __init__(self, coro):
self.coro = coro
@async_cached_property
async def f(self):
return await self.coro
coro = None
await_stack = None
async def g():
nonlocal coro, await_stack
# Force suspension. Otherwise the entire execution is eager and
# awaiter is never set.
await asyncio.sleep(0)
await_stack = get_await_stack(coro)
return 100
async def h(c):
return await c.f
coro = g()
h_coro = h(C(coro))
res = await h_coro
self.assertEqual(res, 100)
# awaiter of g is the coroutine running C.f. That's created by the
# AsyncLazyValue machinery, so we can't check the awaiter's identity
# directly, only that it corresponds to C.f.
self.assertIs(await_stack[0].cr_code, C.f.func.__code__)
self.assertIs(await_stack[1], h_coro)
@async_test
async def test_cached_property_gathered_awaiter(self):
class C:
def __init__(self, coro):
self.coro = coro
@async_cached_property
async def f(self):
return await self.coro
coros = [None, None]
await_stacks = [None, None]
async def g(res, idx):
nonlocal coros, await_stacks
# Force suspension. Otherwise the entire execution is eager and
# awaiter is never set.
await asyncio.sleep(0)
await_stacks[idx] = get_await_stack(coros[idx])
return res
async def gatherer(c0, c1):
return await asyncio.gather(c0.f, c1.f)
coros[0] = g(10, 0)
coros[1] = g(20, 1)
gatherer_coro = gatherer(C(coros[0]), C(coros[1]))
results = await gatherer_coro
self.assertEqual(results[0], 10)
self.assertEqual(results[1], 20)
# awaiter of g is the coroutine running C.f. That's created by the
# AsyncLazyValue machinery, so we can't check the awaiter's identity
# directly, only that it corresponds to C.f.
self.assertIs(await_stacks[0][0].cr_code, C.f.func.__code__)
self.assertIs(await_stacks[0][1], gatherer_coro)
self.assertIs(await_stacks[1][0].cr_code, C.f.func.__code__)
self.assertIs(await_stacks[1][1], gatherer_coro)
def f():
pass
class C:
def x(self):
pass
@staticmethod
def sm():
pass
@classmethod
def cm():
pass
def f(self):
class G:
def y(self):
pass
return G.y
class CodeObjectQualnameTest(unittest.TestCase):
def test_qualnames(self):
self.assertEqual(cinder._get_qualname(f.__code__), "f")
self.assertEqual(cinder._get_qualname(C.x.__code__), "C.x")
self.assertEqual(cinder._get_qualname(C.sm.__code__), "C.sm")
self.assertEqual(cinder._get_qualname(C.cm.__code__), "C.cm")
self.assertEqual(cinder._get_qualname(C().f().__code__), "C.f.<locals>.G.y")
c = f.__code__
co = CodeType(
c.co_argcount,
c.co_posonlyargcount,
c.co_kwonlyargcount,
c.co_nlocals,
c.co_stacksize,
c.co_flags,
c.co_code,
c.co_consts,
c.co_names,
c.co_varnames,
c.co_filename,
c.co_name,
c.co_firstlineno,
c.co_lnotab,
c.co_freevars,
c.co_cellvars,
)
self.assertIsNone(cinder._get_qualname(co))
co = c.replace(co_flags=c.co_flags)
self.assertEqual(cinder._get_qualname(co), "f")
src = """\
import sys
import cinder
modname = cinder._get_qualname(sys._getframe(0).f_code)
clsname = None
class C:
global clsname
clsname = cinder._get_qualname(sys._getframe(0).f_code)
"""
g = {}
exec(dedent(src), g)
self.assertEqual(g["modname"], "<module>")
self.assertEqual(g["clsname"], "C")
class TestNoShadowingInstances(unittest.TestCase):
def check_no_shadowing(self, typ, expected):
got = cinder._has_no_shadowing_instances(typ)
self.assertEqual(got, expected)
def test_dict_retrieved(self):
class Foo:
def test(self):
return 1234
obj = Foo()
self.check_no_shadowing(Foo, True)
obj.__dict__
self.check_no_shadowing(Foo, False)
def test_dict_set(self):
class Foo:
def test(self):
return 1234
obj = Foo()
self.check_no_shadowing(Foo, True)
obj.__dict__ = {"testing": "123"}
self.check_no_shadowing(Foo, False)
def test_shadowing_method(self):
class Foo:
def test(self):
return 1234
obj = Foo()
self.check_no_shadowing(Foo, True)
obj.test = 1234
self.check_no_shadowing(Foo, False)
def test_shadowing_classvar(self):
class Foo:
test = 1234
obj = Foo()
self.check_no_shadowing(Foo, True)
obj.test = 1234
self.check_no_shadowing(Foo, True)
def test_method_added_on_class(self):
class Foo:
pass
self.check_no_shadowing(Foo, True)
def test(self):
return 1234
Foo.test = test
self.check_no_shadowing(Foo, False)
def test_method_added_on_base(self):
class Foo:
pass
class Bar(Foo):
pass
class Baz(Bar):
pass
self.check_no_shadowing(Foo, True)
self.check_no_shadowing(Bar, True)
self.check_no_shadowing(Baz, True)
def test(self):
return 1234
Foo.test = test
self.check_no_shadowing(Foo, False)
self.check_no_shadowing(Bar, False)
self.check_no_shadowing(Baz, False)
def test_custom_metaclass(self):
class MyMeta(type):
pass
class Foo(metaclass=MyMeta):
pass
self.check_no_shadowing(Foo, True)
def test_custom_metaclass_with_setattr(self):
class MyMeta(type):
def __setattr__(cls, name, value):
return super().__setattr__(name, value)
class Foo(metaclass=MyMeta):
pass
self.check_no_shadowing(Foo, True)
Foo.notamethod = 1
self.check_no_shadowing(Foo, True)
def amethod(self):
return 1234
Foo.amethod = amethod
self.check_no_shadowing(Foo, False)
def test_init_subclass(self):
class Base:
def amethod(self):
return 1234
def __init_subclass__(cls, /, **kwargs):
cls.new_meth = Base.amethod
class Derived(Base):
pass
self.check_no_shadowing(Derived, True)
def test_init_subclass_that_creates_instance(self):
import sys
outer = None
class Base:
def amethod(self):
return 1234
def __init_subclass__(cls, /, **kwargs):
nonlocal outer
cls.new_meth = Base.amethod
outer = cls()
class Derived(Base):
pass
self.check_no_shadowing(Derived, False)
class GetCallStackTest(unittest.TestCase):
def a(self):
return self.b()
def b(self):
return self.c()
def c(self):
return self.d()
def d(self):
return cinder._get_call_stack()
def test_get_call_stack(self):
stack = self.a()
self.assertGreater(len(stack), 5)
expected = [
self.test_get_call_stack.__code__,
self.a.__code__,
self.b.__code__,
self.c.__code__,
self.d.__code__,
]
self.assertEqual(stack[-5:], expected)
class GetEntireCallStackTest(unittest.TestCase):
def setUp(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = loop
def tearDown(self):
self.loop.close()
asyncio.set_event_loop_policy(None)
def test_get_entire_call_stack_as_qualnames(self):
a1_stack = None
a4_stack = None
async def a1():
nonlocal a1_stack
await asyncio.sleep(0.1)
a1_stack = cinder._get_entire_call_stack_as_qualnames()
async def a2():
await a1()
async def a3():
return None
async def a4():
nonlocal a4_stack
a4_stack = cinder._get_entire_call_stack_as_qualnames()
async def drive():
await asyncio.gather(a2(), a3(), a4())
asyncio.run(drive())
verify_stack(self, a1_stack, ["drive", "a2", "a1"])
verify_stack(self, a4_stack, ["drive", "a4"])
def test_get_entire_call_stack_as_qualnames_long_awaiter_chain(self):
a1_stack = None
async def a1():
nonlocal a1_stack
await asyncio.sleep(0.1)
a1_stack = cinder._get_entire_call_stack_as_qualnames()
async def a2():
await a1()
async def a3():
return await a2()
async def a4():
return await a3()
async def a5():
return await a4()
async def drive():
await a5()
asyncio.run(drive())
verify_stack(self, a1_stack, ["drive", "a5", "a4", "a3", "a2", "a1"])
def test_get_entire_call_stack_as_qualnames_mixed_awaiter_and_shadow_stacks(self):
a1_stack = None
async def a1():
nonlocal a1_stack
await asyncio.sleep(0)
a1_stack = cinder._get_entire_call_stack_as_qualnames()
async def a2():
await a1()
async def a3():
await asyncio.sleep(0)
return await a2()
async def a4():
return await a3()
async def a5():
return await a4()
async def drive():
await a5()
asyncio.run(drive())
verify_stack(self, a1_stack, ["drive", "a5", "a4", "a3", "a2", "a1"])
def test_get_entire_call_stack_as_qualnames_with_generator(self):
a1_stack = None
def a1():
nonlocal a1_stack
a1_stack = cinder._get_entire_call_stack_as_qualnames()
def a2():
yield a1()
def drive():
for _ in a2():
pass
drive()
verify_stack(self, a1_stack, ["drive", "a2", "a1"])
def test_get_stack_across_coro_with_no_awaiter_and_eager_invoker(self):
# We want to test the scenario where we:
# 1. Walk the sync stack
# 2. Transition to the await stack
# 3. Reach a suspended coroutine with no awaiter but that was
# invoked eagerly.
def a1(g):
c = a2(g)
# Manually start the coroutine so that no awaiter is set
c.send(None)
return c
async def a2(g):
# When a3 wakes up from the sleep it will walk the awaiter to find
# a2. a2 won't have an awaiter set. The prev pointer in it's shadow
# frame should also be NULL since it's suspended. Thus, the stack
# walk should terminate here.
fut = asyncio.ensure_future(a3(g))
return await fut
async def a3(g):
await asyncio.sleep(0.1)
res = cinder._get_entire_call_stack_as_qualnames()
g.set_result(res)
stack = None
async def drive():
nonlocal stack
f = asyncio.Future()
c = a1(f)
stack = await f
asyncio.run(drive())
verify_stack(self, stack, ["a2", "a3"])
@unittest.skipUnderCinderJIT("Profiling only works under interpreter")
class TestInterpProfiling(unittest.TestCase):
def tearDown(self):
cinder.set_profile_interp(False)
def test_profiles_instrs(self):
def workload(a, b, c):
r = 0.0
for i in range(c):
r += a * b
was_enabled_before = cinder.set_profile_interp(True)
repetitions = 101
result = workload(1, 2, repetitions)
was_enabled_after = cinder.set_profile_interp(False)
profiles = cinder.get_and_clear_type_profiles()
self.assertFalse(was_enabled_before)
self.assertTrue(was_enabled_after)
profile_by_op = {}
for item in profiles:
if (
item["normal"]["func_qualname"].endswith("<locals>.workload")
and "opname" in item["normal"]
):
opname = item["normal"]["opname"]
self.assertNotIn(opname, profile_by_op)
profile_by_op[opname] = item
# We don't want to overfit to the current shape of the bytecode, so do
# a quick sanity check of a few key instructions.
self.assertIn("FOR_ITER", profile_by_op)
item = profile_by_op["FOR_ITER"]
self.assertEqual(item["int"]["count"], repetitions + 1)
self.assertEqual(item["normvector"]["types"], ["range_iterator"])
self.assertIn("BINARY_MULTIPLY", profile_by_op)
item = profile_by_op["BINARY_MULTIPLY"]
self.assertEqual(item["int"]["count"], repetitions)
self.assertEqual(item["normvector"]["types"], ["int", "int"])
self.assertIn("INPLACE_ADD", profile_by_op)
item = profile_by_op["INPLACE_ADD"]
self.assertEqual(item["int"]["count"], repetitions)
self.assertEqual(item["normvector"]["types"], ["float", "int"])
class TestWaitForAwaiter(unittest.TestCase):
def setUp(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = loop
def tearDown(self):
self.loop.close()
asyncio.set_event_loop_policy(None)
@async_test
async def test_get_awaiter_wait_for(self):
coro = None
async def sleeper():
nonlocal coro
# Force suspension
await asyncio.sleep(0.1)
return get_await_stack(coro)
async def waiter(c):
return await asyncio.tasks.wait_for(c, 10)
coro = sleeper()
await_stack = await waiter(coro)
self.assertIs(await_stack[0].cr_code, asyncio.tasks.wait_for.__code__)
self.assertIs(await_stack[1].cr_code, waiter.__code__)
@async_test
async def test_get_awaiter_wait_for_gather(self):
coros = [None, None]
async def sleeper(idx):
nonlocal coros
# Force suspension
await asyncio.sleep(0.1)
return get_await_stack(coros[idx])
async def waiter(c0, c1):
return await asyncio.tasks.wait_for(asyncio.gather(c0, c1), 10)
coros[0] = sleeper(0)
coros[1] = sleeper(1)
await_stacks = await waiter(coros[0], coros[1])
self.assertIs(await_stacks[0][0].cr_code, asyncio.tasks.wait_for.__code__)
self.assertIs(await_stacks[0][1].cr_code, waiter.__code__)
self.assertIs(await_stacks[1][0].cr_code, asyncio.tasks.wait_for.__code__)
self.assertIs(await_stacks[1][1].cr_code, waiter.__code__)
class Rendez:
def __init__(self):
self.started = asyncio.Future()
self.barrier = asyncio.Future()
class TestClearAwaiter(unittest.TestCase):
def setUp(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = loop
def tearDown(self):
self.loop.close()
asyncio.set_event_loop_policy(None)
@async_test
async def test_clear_on_throw(self):
"""Awaiter should be cleared when a coroutine completes because an exception
was thrown into it.
"""
class MyException(Exception):
pass
async def inner(rendez):
rendez.started.set_result(None)
await rendez.barrier
raise MyException("Hello!")
async def outer(rendez):
return await asyncio.create_task(inner(rendez))
inner_rendez = Rendez()
outer_coro = outer(inner_rendez)
task = asyncio.create_task(outer_coro)
# Wait for the inner coroutine to start running before unblocking
# it
await inner_rendez.started
inner_rendez.barrier.set_result(None)
with self.assertRaises(MyException):
await task
self.assertIs(cinder._get_coro_awaiter(outer_coro), None)
class TestAwaiterForNonExceptingGatheredTask(unittest.TestCase):
def setUp(self) -> None:
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
self.loop = loop
def tearDown(self):
self.loop.close()
asyncio.set_event_loop_policy(None)
@async_test
async def test_awaiter_for_gathered_coroutines_are_not_cleared_on_completion(self):
"""The awaiter for pending gathered coroutines should not be cleared when other
gathered coroutines complete normally.
"""
async def noop(rendez):
rendez.started.set_result(None)
await rendez.barrier
async def gatherer(*coros):
try:
await asyncio.gather(*coros)
except MyException:
return True
coro0_rendez = Rendez()
coro0 = noop(coro0_rendez)
coro0_task = asyncio.create_task(coro0)
coro1_rendez = Rendez()
coro1 = noop(coro1_rendez)
gatherer_coro = gatherer(coro0_task, coro1)
gatherer_task = asyncio.create_task(gatherer_coro)
# Wait until both gathered coroutines have started
await coro0_rendez.started
await coro1_rendez.started
self.assertIs(cinder._get_coro_awaiter(coro0), gatherer_coro)
self.assertIs(cinder._get_coro_awaiter(coro1), gatherer_coro)
# Unblock the first coroutine and wait for it to complete
coro0_rendez.barrier.set_result(None)
await coro0_task
# coro0 shouldn't have an awaiter because it is complete, while coro1 should
# still have an awaiter because it hasn't completed
self.assertIs(cinder._get_coro_awaiter(coro0), None)
self.assertIs(cinder._get_coro_awaiter(coro1), gatherer_coro)
coro1_rendez.barrier.set_result(None)
await gatherer_task
# coro1 shouldn't have an awaiter now that it has completed
self.assertIs(cinder._get_coro_awaiter(coro1), None)
@async_test
async def test_awaiter_for_gathered_coroutines_are_cleared_on_exception(self):
"""Ensure that the awaiter is cleared for gathered coroutines when a gathered
coroutine raises an exception and the gather propagates exceptions.
"""
class MyException(Exception):
pass
async def noop(rendez):
rendez.started.set_result(None)
await rendez.barrier
async def raiser(rendez):
rendez.started.set_result(None)
await rendez.barrier
raise MyException("Testing 123")
async def gatherer(*coros):
try:
await asyncio.gather(*coros)
except MyException:
return True
noop_rendez = Rendez()
noop_coro = noop(noop_rendez)
raiser_rendez = Rendez()
raiser_coro = raiser(raiser_rendez)
gatherer_coro = gatherer(raiser_coro, noop_coro)
gatherer_task = asyncio.create_task(gatherer_coro)
# Wait until both child coroutines have started
await noop_rendez.started
await raiser_rendez.started
self.assertIs(cinder._get_coro_awaiter(noop_coro), gatherer_coro)
self.assertIs(cinder._get_coro_awaiter(raiser_coro), gatherer_coro)
# Unblock the coroutine that raises an exception. Both it and the
# gathering coroutine should complete; the exception should be
# propagated into the gathering coroutine. The other gathered coroutine
# (noop) should continue running.
raiser_rendez.barrier.set_result(None)
await gatherer_task
# The awaiter for lone running coroutine should be cleared; its awaiter
# is gone.
self.assertIs(cinder._get_coro_awaiter(noop_coro), None)
noop_rendez.barrier.set_result(None)
@async_test
async def test_awaiter_for_gathered_coroutines_are_cleared_on_cancellation(self):
"""Ensure that the awaiter is cleared for gathered coroutines when a gathered
coroutine is cancelled and the gather propagates exceptions.
"""
async def noop(rendez):
rendez.started.set_result(None)
await rendez.barrier
async def gatherer(*coros):
await asyncio.gather(*coros)
coro1_rendez = Rendez()
coro1 = noop(coro1_rendez)
coro1_task = asyncio.create_task(coro1)
coro2_rendez = Rendez()
coro2 = noop(coro2_rendez)
coro2_task = asyncio.create_task(coro2)
gatherer_coro = gatherer(coro1_task, coro2_task)
gatherer_task = asyncio.create_task(gatherer_coro)
# Wait until both child coroutines have started
await coro1_rendez.started
await coro2_rendez.started
self.assertIs(cinder._get_coro_awaiter(coro1), gatherer_coro)
self.assertIs(cinder._get_coro_awaiter(coro2), gatherer_coro)
# Cancel one task. Both it and the gathering coroutine should complete;
# the cancellation should be propagated into the gathering coroutine. The
# other gathered coroutine should continue running.
coro1_task.cancel()
with self.assertRaises(asyncio.CancelledError):
await gatherer_task
with self.assertRaises(asyncio.CancelledError):
await coro1_task
# The awaiter for lone running coroutine should be cleared; its awaiter
# is gone.
self.assertIs(cinder._get_coro_awaiter(coro2), None)
coro2_rendez.barrier.set_result(None)
await coro2_task
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
1723275 | import os
'''
import re
data = os.popen('ls -l')
count = 0
for lines in data.readlines() :
#pat = '*\s*'
pat = '\d[0]'
for line in lines :
if re.search(pat, line) :
count = count + 1
else :
continue
print(count)
'''
data = os.popen('ls -l')
records = list(data)
count = 0
for record in records[1 : ] :
fields = record.split()
size = int(fields[4])
if size == 0 : count = count + 1
print('num of zero size files ', count) | StarcoderdataPython |
3202956 | """
Automatic detection of natural language used in a text.
Use this program as a CLI.
Without arguments, enters into a REPL that recognises sentences.
"""
import argparse
import csv
import math
import pathlib
import sys
from typing import List
from pynapl.APL import APL
from pynapl.APLPyConnect import Connection
LANGUAGES = ["en", "fr", "es", "pt"]
DATA_FOLDER = pathlib.Path(__file__).parent / "data"
FILE_NAME_TEMPLATE = "{lang}_trigram_count_filtered.tsv"
def init_data(apl: Connection.APL) -> List[int]:
"""Initialise the data arrays on the APL side.
As a side effect, this function defines some arrays on the APL instance.
For each language, {lang}_trigrams and {lang}_counts arrays are created.
The trigrams array is a nested character vector,
and the counts array is a simple integer vector.
The counts vector is one item longer than the trigrams array,
having an extra 1 at the end.
Returns an integer list, with the total trigram count for each language.
"""
totals = []
for lang in LANGUAGES:
total = 0
trigrams, counts = [], []
with open(DATA_FOLDER / FILE_NAME_TEMPLATE.format(lang=lang), "r") as f:
reader = csv.reader(f, delimiter="\t")
for trigram, count in reader:
trigrams.append(trigram)
total += int(count)
counts.append(int(count) + 1)
totals.append(total)
_ = apl.eval(f"{lang}_trigrams ← ⊃∆", trigrams)
_ = apl.eval(f"{lang}_counts ← 1,⍨⊃∆", counts)
return totals
def get_counts(apl: Connection.APL, sentence: str, language: str) -> List[int]:
"""Return the trigram counts for each trigram of a sentence."""
code = "{lang}_counts[{lang}_trigrams ⍳ 3,/⊃∆]".format(lang=language)
return apl.eval(code, sentence.lower())
def recognise_sentence(apl: Connection.APL, totals: List[int], sentence: str) -> str:
"""Performs automatic language recognition on the given sentence."""
log_probabilities = [
sum(math.log(c/total) for c in get_counts(apl, sentence.lower(), lang))
for lang, total in zip(LANGUAGES, totals)
]
# Find the index where log_probabilities is maximal and return respective language.
return LANGUAGES[max(range(len(LANGUAGES)), key=log_probabilities.__getitem__)]
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("-s", "--sentence", help="Sentence to recognise.")
parser.add_argument(
"-i",
"--interactive",
help="Enter interactive mode.",
action="store_true"
)
args = parser.parse_args()
if not args.sentence and not args.interactive:
sys.exit()
apl = APL()
totals = init_data(apl)
if args.sentence:
print(recognise_sentence(apl, totals, args.sentence))
if args.interactive:
print("Type sentences to be recognised:")
sentence = input(" >> ")
while sentence:
print(recognise_sentence(apl, totals, sentence))
sentence = input(" >> ")
| StarcoderdataPython |
67556 | import test_support
class DiagnosticHandlerTest(test_support.TestBase):
def test_compile_warning_in_source_file(self):
self.assert_colobot_lint_result(
source_file_lines = [
'int Foo()',
'{',
'}',
''
],
additional_compile_flags = ['-Wall'],
expected_errors = [
{
'id': 'compile warning',
'severity': 'warning',
'msg': "control reaches end of non-void function",
'line': '3'
}
])
def test_compile_error_in_source_file(self):
self.assert_colobot_lint_result(
source_file_lines = [
'void Foo()',
'{',
' return Bar();',
'}',
''
],
additional_compile_flags = ['-Wall'],
expected_errors = [
{
'id': 'compile error',
'severity': 'error',
'msg': "use of undeclared identifier 'Bar'",
'line': '3'
}
])
def test_fatal_compile_error_in_source_file(self):
self.assert_colobot_lint_result(
source_file_lines = [
'#include "nonexistent_include_file.h"',
''
],
expected_errors = [
{
'id': 'compile error',
'severity': 'error',
'msg': "'nonexistent_include_file.h' file not found",
'line': '1'
}
])
def test_compile_error_in_fake_header_source(self):
self.assert_colobot_lint_result_with_custom_files(
source_files_data = {
'foo/bar.h' : [
'Bar Foo() {}',
''
],
'fake_header_sources/foo/bar.cpp': [
'#include "foo/bar.h"',
''
]
},
compilation_database_files = ['fake_header_sources/foo/bar.cpp'],
target_files = ['fake_header_sources/foo/bar.cpp'],
additional_compile_flags = ['-I$TEMP_DIR'],
additional_options = ['-project-local-include-path', '$TEMP_DIR'],
expected_errors = [
{
'id': 'header file not self-contained',
'severity': 'error',
'msg': "Including single header file should not result in compile error: unknown type name 'Bar'",
'line': '1'
}
])
def test_print_only_unique_compile_warnings(self):
self.assert_colobot_lint_result_with_custom_files(
source_files_data = {
'header.h': [
'int Foo()',
'{',
'}',
''
],
'src1.cpp': [
'#include "header.h"',
''
],
'src2.cpp': [
'#include "header.h"',
''
]
},
compilation_database_files = ['src1.cpp', 'src2.cpp'],
target_files = ['src1.cpp', 'src2.cpp'],
additional_compile_flags = ['-Wall'],
additional_options = ['-project-local-include-path', '$TEMP_DIR'],
expected_errors = [
{
'id': 'compile warning',
'severity': 'warning',
'msg': "control reaches end of non-void function",
'line': '3'
}
])
| StarcoderdataPython |
52682 | import numpy as np
import xarray as xr
from numpy import asarray
import scipy.sparse
from itertools import product
from .util import get_shape_of_data
from .grid_stretching_transforms import scs_transform
from .constants import R_EARTH_m
def get_troposphere_mask(ds):
"""
Returns a mask array for picking out the tropospheric grid boxes.
Args:
ds: xarray Dataset
Dataset containing certain met field variables (i.e.
Met_TropLev, Met_BXHEIGHT).
Returns:
tropmask: numpy ndarray
Tropospheric mask. False denotes grid boxes that are
in the troposphere and True in the stratosphere
(as per Python masking logic).
"""
# ==================================================================
# Initialization
# ==================================================================
# Make sure ds is an xarray Dataset object
if not isinstance(ds, xr.Dataset):
raise TypeError("The ds argument must be an xarray Dataset!")
# Make sure certain variables are found
if "Met_BXHEIGHT" not in ds.data_vars.keys():
raise ValueError("Met_BXHEIGHT could not be found!")
if "Met_TropLev" not in ds.data_vars.keys():
raise ValueError("Met_TropLev could not be found!")
# Mask of tropospheric grid boxes in the Ref dataset
shape = get_shape_of_data(np.squeeze(ds["Met_BXHEIGHT"]))
# Determine if this is GCHP data
is_gchp = "nf" in ds["Met_BXHEIGHT"].dims
# ==================================================================
# Create the mask arrays for the troposphere
#
# Convert the Met_TropLev DataArray objects to numpy ndarrays of
# integer. Also subtract 1 to convert from Fortran to Python
# array index notation.
# ==================================================================
multi_time_slices = (is_gchp and len(shape) == 5) or \
(not is_gchp and len(shape) == 4)
if multi_time_slices:
# --------------------------------------------------------------
# GCC: There are multiple time slices
# --------------------------------------------------------------
# Create the tropmask array with dims
# (time, lev, nf*lat*lon) for GCHP, or
# (time, lev, lat*lon ) for GCC
tropmask = np.ones((shape[0], shape[1],
np.prod(np.array(shape[2:]))), bool)
# Loop over each time
for t in range(tropmask.shape[0]):
# Pick the tropopause level and make a 1-D array
values = ds["Met_TropLev"].isel(time=t).values
lev = np.int_(np.squeeze(values) - 1)
lev_1d = lev.flatten()
# Create the tropospheric mask array
for x in range(tropmask.shape[2]):
tropmask[t, 0: lev_1d[x], x] = False
else:
# --------------------------------------------------------------
# There is only one time slice
# --------------------------------------------------------------
# Create the tropmask array with dims (lev, lat*lon)
tropmask = np.ones((shape[0], np.prod(np.array(shape[1:]))), bool)
# Pick the tropopause level and make a 1-D array
values = ds["Met_TropLev"].values
lev = np.int_(np.squeeze(values) - 1)
lev_1d = lev.flatten()
# Create the tropospheric mask array
for x in range(tropmask.shape[1]):
tropmask[0: lev_1d[x], x] = False
# Reshape into the same shape as Met_BxHeight
return tropmask.reshape(shape)
def get_input_res(data):
"""
Returns resolution of dataset passed to compare_single_level or compare_zonal_means
Args:
data: xarray Dataset
Input GEOS-Chem dataset
Returns:
res: str or int
Lat/lon res of the form 'latresxlonres' or cubed-sphere resolution
gridtype: str
'll' for lat/lon or 'cs' for cubed-sphere
"""
vdims = data.dims
if "lat" in vdims and "lon" in vdims:
lat = data["lat"].values
lon = data["lon"].values
if lat.size / 6 == lon.size:
return lon.size, "cs"
else:
lat.sort()
lon.sort()
# use increment of second and third coordinates
# to avoid polar mischief
lat_res = np.abs(lat[2] - lat[1])
lon_res = np.abs(lon[2] - lon[1])
return str(lat_res) + "x" + str(lon_res), "ll"
else:
#print("grid is cs: ", vdims)
# GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim
if isinstance(data.dims, tuple):
return len(data["Xdim"].values), "cs"
else:
return data.dims["Xdim"], "cs"
def call_make_grid(res, gridtype, in_extent=[-180, 180, -90, 90],
out_extent=[-180, 180, -90, 90], sg_params=[1, 170, -90]):
"""
Create a mask with NaN values removed from an input array
Args:
res: str or int
Resolution of grid (format 'latxlon' or csres)
gridtype: str
'll' for lat/lon or 'cs' for cubed-sphere
Keyword Args (optional):
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of input data
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Desired minimum and maximum latitude and longitude of output grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
sg_params: list[float, float, float] (stretch_factor, target_longitude, target_latitude)
Desired stretched-grid parameters in the format
[stretch_factor, target_longitude, target_latitude].
Will trigger stretched-grid creation if not default values.
Default value: [1, 170, -90] (no stretching)
Returns:
[grid, grid_list]: list(dict, list(dict))
Returns the created grid.
grid_list is a list of grids if gridtype is 'cs', else it is None
"""
# call appropriate make_grid function and return new grid
if gridtype == "ll":
return [make_grid_LL(res, in_extent, out_extent), None]
elif sg_params == [1, 170, -90]:
# standard CS
return make_grid_CS(res)
else:
return make_grid_SG(res, *sg_params)
def get_grid_extents(data, edges=True):
"""
Get min and max lat and lon from an input GEOS-Chem xarray dataset or grid dict
Args:
data: xarray Dataset or dict
A GEOS-Chem dataset or a grid dict
edges (optional): bool
Whether grid extents should use cell edges instead of centers
Default value: True
Returns:
minlon: float
Minimum longitude of data grid
maxlon: float
Maximum longitude of data grid
minlat: float
Minimum latitude of data grid
maxlat: float
Maximum latitude of data grid
"""
if isinstance(data, dict):
if "lon_b" in data and edges:
return np.min(
data["lon_b"]), np.max(
data["lon_b"]), np.min(
data["lat_b"]), np.max(
data["lat_b"])
elif not edges:
return np.min(
data["lon"]), np.max(
data["lon"]), np.min(
data["lat"]), np.max(
data["lat"])
else:
return -180, 180, -90, 90
elif "lat" in data.dims and "lon" in data.dims:
lat = data["lat"].values
lon = data["lon"].values
if lat.size / 6 == lon.size:
# No extents for CS plots right now
return -180, 180, -90, 90
else:
lat = np.sort(lat)
minlat = np.min(lat)
if abs(abs(lat[1]) - abs(lat[0])
) != abs(abs(lat[2]) - abs(lat[1])):
#pole is cutoff
minlat = minlat - 1
maxlat = np.max(lat)
if abs(abs(lat[-1]) - abs(lat[-2])
) != abs(abs(lat[-2]) - abs(lat[-3])):
maxlat = maxlat + 1
# add longitude res to max longitude
lon = np.sort(lon)
minlon = np.min(lon)
maxlon = np.max(lon) + abs(abs(lon[-1]) - abs(lon[-2]))
return minlon, maxlon, minlat, maxlat
else:
# GCHP data using MAPL v1.0.0+ has dims time, lev, nf, Ydim, and Xdim
return -180, 180, -90, 90
def get_vert_grid(dataset, AP=[], BP=[]):
"""
Determine vertical grid of input dataset
Args:
dataset: xarray Dataset
A GEOS-Chem output dataset
Keyword Args (optional):
AP: list-like type
Hybrid grid parameter A in hPa
Default value: []
BP: list-like type
Hybrid grid parameter B (unitless)
Default value: []
Returns:
p_edge: numpy array
Edge pressure values for vertical grid
p_mid: numpy array
Midpoint pressure values for vertical grid
nlev: int
Number of levels in vertical grid
"""
if dataset.sizes["lev"] in (72, 73):
return GEOS_72L_grid.p_edge(), GEOS_72L_grid.p_mid(), 72
elif dataset.sizes["lev"] in (47, 48):
return GEOS_47L_grid.p_edge(), GEOS_47L_grid.p_mid(), 47
elif AP == [] or BP == []:
if dataset.sizes["lev"] == 1:
AP = [1, 1]
BP = [1]
new_grid = vert_grid(AP, BP)
return new_grid.p_edge(), new_grid.p_mid(), np.size(AP)
else:
raise ValueError(
"Only 72/73 or 47/48 level vertical grids are automatically determined" +
"from input dataset by get_vert_grid(), please pass grid parameters AP and BP" +
"as keyword arguments")
else:
new_grid = vert_grid(AP, BP)
return new_grid.p_edge(), new_grid.p_mid(), np.size(AP)
def get_pressure_indices(pedge, pres_range):
"""
Get indices where edge pressure values are within a given pressure range
Args:
pedge: numpy array
A GEOS-Chem output dataset
pres_range: list(float, float)
Contains minimum and maximum pressure
Returns:
numpy array
Indices where edge pressure values are within a given pressure range
"""
return np.where(
(pedge <= np.max(pres_range)) & (
pedge >= np.min(pres_range)))[0]
def pad_pressure_edges(pedge_ind, max_ind, pmid_len):
"""
Add outer indices to edge pressure index list
Args:
pedge_ind: list
List of edge pressure indices
max_ind: int
Maximum index
pmid_len: int
Length of pmid which should not be exceeded by indices
Returns:
pedge_ind: list
List of edge pressure indices, possibly with new minimum and maximum indices
"""
if max_ind > pmid_len:
# don't overstep array bounds for full array
max_ind = max_ind - 1
if min(pedge_ind) != 0:
pedge_ind = np.append(min(pedge_ind) - 1, pedge_ind)
if max(pedge_ind) != max_ind:
pedge_ind = np.append(pedge_ind, max(pedge_ind) + 1)
return pedge_ind
def get_ind_of_pres(dataset, pres):
"""
Get index of pressure level that contains the requested pressure value.
Args:
dataset: xarray Dataset
GEOS-Chem dataset
pres: int or float
Desired pressure value
Returns:
index: int
Index of level in dataset that corresponds to requested pressure
"""
pedge, pmid, _ = get_vert_grid(dataset)
converted_dataset = convert_lev_to_pres(dataset, pmid, pedge)
return np.argmin(np.abs(converted_dataset['lev'] - pres).values)
def convert_lev_to_pres(dataset, pmid, pedge, lev_type='pmid'):
"""
Convert lev dimension to pressure in a GEOS-Chem dataset
Args:
dataset: xarray Dataset
GEOS-Chem dataset
pmid: np.array
Midpoint pressure values
pedge: np.array
Edge pressure values
lev_type (optional): str
Denote whether lev is 'pedge' or 'pmid' if grid is not 72/73 or 47/48 levels
Default value: 'pmid'
Returns:
dataset: xarray Dataset
Input dataset with "lev" dimension values replaced with pressure values
"""
if dataset.sizes["lev"] in (72, 47):
dataset["lev"] = pmid
elif dataset.sizes["lev"] in (73, 48):
dataset["lev"] = pedge
elif lev_type == 'pmid':
print('Warning: Assuming levels correspond with midpoint pressures')
dataset["lev"] = pmid
else:
dataset["lev"] = pedge
dataset["lev"].attrs["unit"] = "hPa"
dataset["lev"].attrs["long_name"] = "level pressure"
return dataset
class vert_grid:
def __init__(self, AP=None, BP=None, p_sfc=1013.25):
if (len(AP) != len(BP)) or (AP is None):
# Throw error?
print('Inconsistent vertical grid specification')
self.AP = np.array(AP)
self.BP = np.array(BP)
self.p_sfc = p_sfc
def p_edge(self):
# Calculate pressure edges using eta coordinate
return self.AP + self.BP * self.p_sfc
def p_mid(self):
p_edge = self.p_edge()
return (p_edge[1:] + p_edge[:-1]) / 2.0
# Standard vertical grids
_GEOS_72L_AP = np.array([0.000000e+00,
4.804826e-02,
6.593752e+00,
1.313480e+01,
1.961311e+01,
2.609201e+01,
3.257081e+01,
3.898201e+01,
4.533901e+01,
5.169611e+01,
5.805321e+01,
6.436264e+01,
7.062198e+01,
7.883422e+01,
8.909992e+01,
9.936521e+01,
1.091817e+02,
1.189586e+02,
1.286959e+02,
1.429100e+02,
1.562600e+02,
1.696090e+02,
1.816190e+02,
1.930970e+02,
2.032590e+02,
2.121500e+02,
2.187760e+02,
2.238980e+02,
2.243630e+02,
2.168650e+02,
2.011920e+02,
1.769300e+02,
1.503930e+02,
1.278370e+02,
1.086630e+02,
9.236572e+01,
7.851231e+01,
6.660341e+01,
5.638791e+01,
4.764391e+01,
4.017541e+01,
3.381001e+01,
2.836781e+01,
2.373041e+01,
1.979160e+01,
1.645710e+01,
1.364340e+01,
1.127690e+01,
9.292942e+00,
7.619842e+00,
6.216801e+00,
5.046801e+00,
4.076571e+00,
3.276431e+00,
2.620211e+00,
2.084970e+00,
1.650790e+00,
1.300510e+00,
1.019440e+00,
7.951341e-01,
6.167791e-01,
4.758061e-01,
3.650411e-01,
2.785261e-01,
2.113490e-01,
1.594950e-01,
1.197030e-01,
8.934502e-02,
6.600001e-02,
4.758501e-02,
3.270000e-02,
2.000000e-02,
1.000000e-02])
_GEOS_72L_BP = np.array([1.000000e+00,
9.849520e-01,
9.634060e-01,
9.418650e-01,
9.203870e-01,
8.989080e-01,
8.774290e-01,
8.560180e-01,
8.346609e-01,
8.133039e-01,
7.919469e-01,
7.706375e-01,
7.493782e-01,
7.211660e-01,
6.858999e-01,
6.506349e-01,
6.158184e-01,
5.810415e-01,
5.463042e-01,
4.945902e-01,
4.437402e-01,
3.928911e-01,
3.433811e-01,
2.944031e-01,
2.467411e-01,
2.003501e-01,
1.562241e-01,
1.136021e-01,
6.372006e-02,
2.801004e-02,
6.960025e-03,
8.175413e-09,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00,
0.000000e+00])
GEOS_72L_grid = vert_grid(_GEOS_72L_AP, _GEOS_72L_BP)
# Reduced grid
_GEOS_47L_AP = np.zeros(48)
_GEOS_47L_BP = np.zeros(48)
# Fill in the values for the surface
_GEOS_47L_AP[0] = _GEOS_72L_AP[0]
_GEOS_47L_BP[0] = _GEOS_72L_BP[0]
# Build the GEOS 72-layer to 47-layer mapping matrix at the same time
_xmat_i = np.zeros((72))
_xmat_j = np.zeros((72))
_xmat_s = np.zeros((72))
# Index here is the 1-indexed layer number
for _i_lev in range(1, 37):
# Map from 1-indexing to 0-indexing
_x_lev = _i_lev - 1
# Sparse matrix for regridding
# Below layer 37, it's 1:1
_xct = _x_lev
_xmat_i[_xct] = _x_lev
_xmat_j[_xct] = _x_lev
_xmat_s[_xct] = 1.0
# Copy over the pressure edge for the top of the grid cell
_GEOS_47L_AP[_i_lev] = _GEOS_72L_AP[_i_lev]
_GEOS_47L_BP[_i_lev] = _GEOS_72L_BP[_i_lev]
# Now deal with the lumped layers
_skip_size_vec = [2, 4]
_number_lumped = [4, 7]
# Initialize
_i_lev = 36
_i_lev_72 = 36
for _lump_seg in range(2):
_skip_size = _skip_size_vec[_lump_seg]
# 1-indexed starting point in the 47-layer grid
_first_lev_47 = _i_lev + 1
_first_lev_72 = _i_lev_72 + 1
# Loop over the coarse vertical levels (47-layer grid)
for _i_lev_offset in range(_number_lumped[_lump_seg]):
# i_lev is the index for the current level on the 47-level grid
_i_lev = _first_lev_47 + _i_lev_offset
# Map from 1-indexing to 0-indexing
_x_lev = _i_lev - 1
# Get the 1-indexed location of the last layer in the 72-layer grid
# which is below the start of the current lumping region
_i_lev_72_base = _first_lev_72 + (_i_lev_offset * _skip_size) - 1
# Get the 1-indexed location of the uppermost level in the 72-layer
# grid which is within the target layer on the 47-layer grid
_i_lev_72 = _i_lev_72_base + _skip_size
# Do the pressure edges first
# These are the 0-indexed locations of the upper edge for the
# target layers in 47- and 72-layer grids
_GEOS_47L_AP[_i_lev] = _GEOS_72L_AP[_i_lev_72]
_GEOS_47L_BP[_i_lev] = _GEOS_72L_BP[_i_lev_72]
# Get the total pressure delta across the layer on the lumped grid
# We are within the fixed pressure levels so don't need to account
# for variations in surface pressure
_dp_total = _GEOS_47L_AP[_i_lev - 1] - _GEOS_47L_AP[_i_lev]
# Now figure out the mapping
for _i_lev_offset_72 in range(_skip_size):
# Source layer in the 72 layer grid (0-indexed)
_x_lev_72 = _i_lev_72_base + _i_lev_offset_72
_xct = _x_lev_72
_xmat_i[_xct] = _x_lev_72
# Target in the 47 layer grid
_xmat_j[_xct] = _x_lev
# Proportion of 72-layer grid cell, by pressure, within expanded
# layer
_xmat_s[_xct] = (_GEOS_72L_AP[_x_lev_72] -
_GEOS_72L_AP[_x_lev_72 + 1]) / _dp_total
_start_pt = _i_lev
# Do last entry separately (no layer to go with it)
_xmat_72to47 = scipy.sparse.coo_matrix(
(_xmat_s, (_xmat_i, _xmat_j)), shape=(72, 47))
GEOS_47L_grid = vert_grid(_GEOS_47L_AP, _GEOS_47L_BP)
# CAM 26-layer grid
_CAM_26L_AP = np.flip(np.array([219.4067, 489.5209, 988.2418, 1805.201,
2983.724, 4462.334, 6160.587, 7851.243,
7731.271, 7590.131, 7424.086, 7228.744,
6998.933, 6728.574, 6410.509, 6036.322,
5596.111, 5078.225, 4468.96, 3752.191,
2908.949, 2084.739, 1334.443, 708.499,
252.136, 0., 0.]), axis=0) * 0.01
_CAM_26L_BP = np.flip(np.array([0., 0., 0., 0.,
0., 0., 0., 0.,
0.01505309, 0.03276228, 0.05359622, 0.07810627,
0.1069411, 0.14086370, 0.180772, 0.227722,
0.2829562, 0.3479364, 0.4243822, 0.5143168,
0.6201202, 0.7235355, 0.8176768, 0.8962153,
0.9534761, 0.9851122, 1.]), axis=0)
CAM_26L_grid = vert_grid(_CAM_26L_AP, _CAM_26L_BP)
def make_grid_LL(llres, in_extent=[-180, 180, -90, 90], out_extent=[]):
"""
Creates a lat/lon grid description.
Args:
llres: str
lat/lon resolution in 'latxlon' format (e.g. '4x5')
Keyword Args (optional):
in_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of initial grid
in the format [minlon, maxlon, minlat, maxlat]
Default value: [-180, 180, -90, 90]
out_extent: list[float, float, float, float]
Describes minimum and maximum latitude and longitude of target grid
in the format [minlon, maxlon, minlat, maxlat]. Needed when intending
to use grid to trim extent of input data
Default value: [] (assumes value of in_extent)
Returns:
llgrid: dict
dict grid description of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
"""
# get initial bounds of grid
[minlon, maxlon, minlat, maxlat] = in_extent
[dlat, dlon] = list(map(float, llres.split('x')))
lon_b = np.linspace(minlon - dlon / 2, maxlon - dlon /
2, int((maxlon - minlon) / dlon) + 1)
lat_b = np.linspace(minlat - dlat / 2, maxlat + dlat / 2,
int((maxlat - minlat) / dlat) + 2)
if minlat <= -90:
lat_b = lat_b.clip(-90, None)
if maxlat >= 90:
lat_b = lat_b.clip(None, 90)
lat = (lat_b[1:] + lat_b[:-1]) / 2
lon = (lon_b[1:] + lon_b[:-1]) / 2
# trim grid bounds when your desired extent is not the same as your
# initial grid extent
if out_extent == []:
out_extent = in_extent
if out_extent != in_extent:
[minlon, maxlon, minlat, maxlat] = out_extent
minlon_ind = np.nonzero(lon >= minlon)
maxlon_ind = np.nonzero(lon <= maxlon)
lon_inds = np.intersect1d(minlon_ind, maxlon_ind)
lon = lon[lon_inds]
# make sure to get edges of grid correctly
lon_inds = np.append(lon_inds, np.max(lon_inds) + 1)
lon_b = lon_b[lon_inds]
minlat_ind = np.nonzero(lat >= minlat)
maxlat_ind = np.nonzero(lat <= maxlat)
lat_inds = np.intersect1d(minlat_ind, maxlat_ind)
lat = lat[lat_inds]
# make sure to get edges of grid correctly
lat_inds = np.append(lat_inds, np.max(lat_inds) + 1)
lat_b = lat_b[lat_inds]
llgrid = {'lat': lat,
'lon': lon,
'lat_b': lat_b,
'lon_b': lon_b}
return llgrid
def make_grid_CS(csres):
"""
Creates a cubed-sphere grid description.
Args:
csres: int
cubed-sphere resolution of target grid
Returns:
[csgrid, csgrid_list]: list[dict, list[dict]]
csgrid is a dict of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
where each value has an extra face dimension of length 6.
csgrid_list is a list of dicts separated by face index
"""
csgrid = csgrid_GMAO(csres)
csgrid_list = [None] * 6
for i in range(6):
csgrid_list[i] = {'lat': csgrid['lat'][i],
'lon': csgrid['lon'][i],
'lat_b': csgrid['lat_b'][i],
'lon_b': csgrid['lon_b'][i]}
return [csgrid, csgrid_list]
def make_grid_SG(csres, stretch_factor, target_lon, target_lat):
"""
Creates a stretched-grid grid description.
Args:
csres: int
cubed-sphere resolution of target grid
stretch_factor: float
stretch factor of target grid
target_lon: float
target stretching longitude of target grid
target_lon: float
target stretching latitude of target grid
Returns:
[csgrid, csgrid_list]: list[dict, list[dict]]
csgrid is a dict of format {'lat' : lat midpoints,
'lon' : lon midpoints,
'lat_b' : lat edges,
'lon_b' : lon edges}
where each value has an extra face dimension of length 6.
csgrid_list is a list of dicts separated by face index
"""
csgrid = csgrid_GMAO(csres, offset=0)
csgrid_list = [None] * 6
for i in range(6):
lat = csgrid['lat'][i].flatten()
lon = csgrid['lon'][i].flatten()
lon, lat = scs_transform(
lon, lat, stretch_factor, target_lon, target_lat)
lat = lat.reshape((csres, csres))
lon = lon.reshape((csres, csres))
lat_b = csgrid['lat_b'][i].flatten()
lon_b = csgrid['lon_b'][i].flatten()
lon_b, lat_b = scs_transform(
lon_b, lat_b, stretch_factor, target_lon, target_lat)
lat_b = lat_b.reshape((csres + 1, csres + 1))
lon_b = lon_b.reshape((csres + 1, csres + 1))
csgrid_list[i] = {'lat': lat,
'lon': lon,
'lat_b': lat_b,
'lon_b': lon_b}
for i in range(6):
csgrid['lat'][i] = csgrid_list[i]['lat']
csgrid['lon'][i] = csgrid_list[i]['lon']
csgrid['lat_b'][i] = csgrid_list[i]['lat_b']
csgrid['lon_b'][i] = csgrid_list[i]['lon_b']
return [csgrid, csgrid_list]
def calc_rectilinear_lon_edge(lon_stride, center_at_180):
""" Compute longitude edge vector for a rectilinear grid.
Parameters
----------
lon_stride: float
Stride length in degrees. For example, for a standard GEOS-Chem Classic
4x5 grid, lon_stride would be 5.
center_at_180: bool
Whether or not the grid should have a cell center at 180 degrees (i.e.
on the date line). If true, the first grid cell is centered on the date
line; if false, the first grid edge is on the date line.
Returns
-------
Longitudes of cell edges in degrees East.
Notes
-----
All values are forced to be between [-180,180]. For a grid with N cells in
each band, N+1 edges will be returned, with the first and last value being
duplicates.
Examples
--------
>>> from gcpy.grid.horiz import calc_rectilinear_lon_edge
>>> calc_rectilinear_lon_edge(5.0,true)
np.array([177.5,-177.5,-172.5,...,177.5])
See Also
--------
[NONE]
"""
n_lon = np.round(360.0 / lon_stride)
lon_edge = np.linspace(-180.0, 180.0, num=n_lon + 1)
if center_at_180:
lon_edge = lon_edge - (lon_stride / 2.0)
lon_edge[lon_edge < -180.0] = lon_edge[lon_edge < -180] + 360.0
lon_edge[lon_edge > 180.0] = lon_edge[lon_edge > 180.0] - 360.0
return lon_edge
def calc_rectilinear_lat_edge(lat_stride, half_polar_grid):
""" Compute latitude edge vector for a rectilinear grid.
Parameters
----------
lat_stride: float
Stride length in degrees. For example, for a standard GEOS-Chem Classic
4x5 grid, lat_stride would be 4.
half_polar_grid: bool
Whether or not the grid should be "half-polar" (i.e. bands at poles are
half the size). In either case the grid will start and end at -/+ 90,
but when half_polar_grid is True, the first and last bands will have a
width of 1/2 the normal lat_stride.
Returns
-------
Latitudes of cell edges in degrees North.
Notes
-----
All values are forced to be between [-90,90]. For a grid with N cells in
each band, N+1 edges will be returned, with the first and last value being
duplicates.
Examples
--------
>>> from gcpy.grid.horiz import calc_rectilinear_lat_edge
>>> calc_rectilinear_lat_edge(4.0,true)
np.array([-90,-88,-84,-80,...,84,88,90])
See Also
--------
[NONE]
"""
if half_polar_grid:
start_pt = 90.0 + (lat_stride / 2.0)
else:
start_pt = 90.0
lat_edge = np.linspace(-1.0 * start_pt, start_pt,
num=1 + np.round(2.0 * start_pt / lat_stride))
# Force back onto +/- 90
lat_edge[lat_edge > 90.0] = 90.0
lat_edge[lat_edge < -90.0] = -90.0
return lat_edge
def calc_rectilinear_grid_area(lon_edge, lat_edge):
""" Compute grid cell areas (in m2) for a rectilinear grid.
Parameters
----------
#TODO
Returns
-------
#TODO
Notes
-----
#TODO
Examples
--------
#TODO
See Also
--------
[NONE]
"""
# Convert from km to m
_radius_earth_m = R_EARTH_m
lon_edge = asarray(lon_edge, dtype=float)
lat_edge = asarray(lat_edge, dtype=float)
n_lon = (lon_edge.size) - 1
n_lat = (lat_edge.size) - 1
grid_area = np.zeros((n_lat, n_lon))
sfc_area_const = 2.0 * np.pi * _radius_earth_m * _radius_earth_m
# Longitudes loop, so need to be careful
lon_delta = calc_delta_lon(lon_edge)
# Convert into weights relative to the total circle
lon_delta = lon_delta / 360.0
# Precalculate this
sin_lat_edge = np.sin(np.deg2rad(lat_edge))
for i_lat in range(0, n_lat):
sin_diff = sin_lat_edge[i_lat + 1] - sin_lat_edge[i_lat]
grid_area[i_lat, :] = sin_diff * sfc_area_const * lon_delta
return grid_area
def calc_delta_lon(lon_edge):
""" Compute grid cell longitude widths from an edge vector.
Parameters
----------
lon_edge: float
Vector of longitude edges, in degrees East.
Returns
-------
Width of each cell, degrees East
Notes
-----
Accounts for looping over the domain.
Examples
--------
#TODO
"""
n_lon = (lon_edge.size) - 1
lon_edge = asarray(lon_edge)
# Set up output array
lon_delta = np.zeros((n_lon))
offset = 0.0
next_lon = lon_edge[0]
for i_lon in range(0, n_lon):
last_lon = next_lon
next_lon = lon_edge[i_lon + 1] + offset
while next_lon < last_lon:
offset = offset + 360.0
next_lon = next_lon + 360.0
lon_delta[i_lon] = next_lon - last_lon
return lon_delta
def csgrid_GMAO(res, offset=-10):
"""
Return cubedsphere coordinates with GMAO face orientation
Parameters
----------
res: cubed-sphere Resolution
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
CS = CSGrid(res, offset=offset)
lon = CS.lon_center.transpose(2, 0, 1)
lon_b = CS.lon_edge.transpose(2, 0, 1)
lat = CS.lat_center.transpose(2, 0, 1)
lat_b = CS.lat_edge.transpose(2, 0, 1)
lon[lon < 0] += 360
lon_b[lon_b < 0] += 360
for a in [lon, lon_b, lat, lat_b]:
for tile in [0, 1, 3, 4]:
a[tile] = a[tile].T
for tile in [3, 4]:
a[tile] = np.flip(a[tile], 1)
for tile in [3, 4, 2, 5]:
a[tile] = np.flip(a[tile], 0)
a[2], a[5] = a[5].copy(), a[2].copy() # swap north&south pole
return {'lon': lon, 'lat': lat, 'lon_b': lon_b, 'lat_b': lat_b}
_INV_SQRT_3 = 1.0 / np.sqrt(3.0)
_ASIN_INV_SQRT_3 = np.arcsin(_INV_SQRT_3)
class CSGrid(object):
"""Generator for cubed-sphere grid geometries.
CSGrid computes the latitutde and longitudes of cell centers and edges
on a cubed-sphere grid, providing a way to retrieve these geometries
on-the-fly if your model output data does not include them.
Attributes
----------
{lon,lat}_center: np.ndarray
lat/lon coordinates for each cell center along the cubed-sphere mesh
{lon,lat}_edge: np.ndarray
lat/lon coordinates for the midpoint of the edges separating each
element on the cubed-sphere mesh.
xyz_{center,edge}: np.ndarray
As above, except coordinates are projected into a 3D cartesian space
with common origin to the original lat/lon coordinate system, assuming
a unit sphere.
This class was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
def __init__(self, c, offset=None):
"""
Parameters
----------
c: int
Number edges along each cubed-sphere edge.
======= ====================
C Lat/Lon Resolution
------- --------------------
24 4 deg x 5 deg
48,45 2 deg x 2.5 deg
96,90 1 deg x 1.25 deg
192,180 0.5 deg x 0.625 deg
384,360 0.25 deg x 0.3125 deg
720 0.12g deg x 0.15625 deg
offset: float (optional)
Degrees to offset the first faces' edge in the latitudinal
direction. If not passed, then the western edge of the first face
will align with the prime meridian.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
self.c = c
self.delta_y = 2. * _ASIN_INV_SQRT_3 / c
self.nx = self.ny = c + 1
self.offset = offset
self._initialize()
def _initialize(self):
c = self.c
nx, ny = self.nx, self.ny
lambda_rad = np.zeros((nx, ny))
lambda_rad[0, :] = 3. * np.pi / 4. # West edge
lambda_rad[-1, :] = 5. * np.pi / 4. # East edge
theta_rad = np.zeros((nx, ny))
theta_rad[0, :] = -_ASIN_INV_SQRT_3 + \
(self.delta_y * np.arange(c + 1)) # West edge
theta_rad[-1, :] = theta_rad[0, :] # East edge
# Cache the reflection points - our upper-left and lower-right corners
lonMir1, lonMir2 = lambda_rad[0, 0], lambda_rad[-1, -1]
latMir1, latMir2 = theta_rad[0, 0], theta_rad[-1, -1]
xyzMir1 = latlon_to_cartesian(lonMir1, latMir1)
xyzMir2 = latlon_to_cartesian(lonMir2, latMir2)
xyzCross = np.cross(xyzMir1, xyzMir2)
norm = np.sqrt(np.sum(xyzCross**2))
xyzCross /= norm
for i in range(1, c):
lonRef, latRef = lambda_rad[0, i], theta_rad[0, i]
xyzRef = np.asarray(latlon_to_cartesian(lonRef, latRef, ))
xyzDot = np.sum(xyzCross * xyzRef)
xyzImg = xyzRef - (2. * xyzDot * xyzCross)
xsImg, ysImg, zsImg = xyzImg
lonImg, latImg = cartesian_to_latlon(xsImg, ysImg, zsImg)
lambda_rad[i, 0] = lonImg
lambda_rad[i, -1] = lonImg
theta_rad[i, 0] = latImg
theta_rad[i, -1] = -latImg
pp = np.zeros([3, c + 1, c + 1])
# Set the four corners
# print("CORNERS")
for i, j in product([0, -1], [0, -1]):
# print(i, j)
pp[:, i, j] = latlon_to_cartesian(
lambda_rad[i, j], theta_rad[i, j])
# Map the edges on the sphere back to the cube.
#Note that all intersections are at x = -rsq3
# print("EDGES")
for ij in range(1, c + 1):
# print(ij)
pp[:, 0, ij] = latlon_to_cartesian(
lambda_rad[0, ij], theta_rad[0, ij])
pp[1, 0, ij] = -pp[1, 0, ij] * _INV_SQRT_3 / pp[0, 0, ij]
pp[2, 0, ij] = -pp[2, 0, ij] * _INV_SQRT_3 / pp[0, 0, ij]
pp[:, ij, 0] = latlon_to_cartesian(
lambda_rad[ij, 0], theta_rad[ij, 0])
pp[1, ij, 0] = -pp[1, ij, 0] * _INV_SQRT_3 / pp[0, ij, 0]
pp[2, ij, 0] = -pp[2, ij, 0] * _INV_SQRT_3 / pp[0, ij, 0]
# # Map interiors
pp[0, :, :] = -_INV_SQRT_3
# print("INTERIOR")
for i in range(1, c + 1):
for j in range(1, c + 1):
# Copy y-z face of the cube along j=1
pp[1, i, j] = pp[1, i, 0]
# Copy along i=1
pp[2, i, j] = pp[2, 0, j]
_pp = pp.copy()
llr, ttr = vec_cartesian_to_latlon(_pp[0], _pp[1], _pp[2])
lambda_rad, theta_rad = llr.copy(), ttr.copy()
# Make grid symmetrical to i = im/2 + 1
for j in range(1, c + 1):
for i in range(1, c + 1):
# print("({}, {}) -> ({}, {})".format(i, 0, i, j))
lambda_rad[i, j] = lambda_rad[i, 0]
for j in range(c + 1):
for i in range(c // 2):
isymm = c - i
# print(isymm)
avgPt = 0.5 * (lambda_rad[i, j] - lambda_rad[isymm, j])
# print(lambda_rad[i, j], lambda_rad[isymm, j], avgPt)
lambda_rad[i, j] = avgPt + np.pi
lambda_rad[isymm, j] = np.pi - avgPt
avgPt = 0.5 * (theta_rad[i, j] + theta_rad[isymm, j])
theta_rad[i, j] = avgPt
theta_rad[isymm, j] = avgPt
# Make grid symmetrical to j = im/2 + 1
for j in range(c // 2):
jsymm = c - j
for i in range(1, c + 1):
avgPt = 0.5 * (lambda_rad[i, j] + lambda_rad[i, jsymm])
lambda_rad[i, j] = avgPt
lambda_rad[i, jsymm] = avgPt
avgPt = 0.5 * (theta_rad[i, j] - theta_rad[i, jsymm])
theta_rad[i, j] = avgPt
theta_rad[i, jsymm] = -avgPt
# Final correction
lambda_rad -= np.pi
llr, ttr = lambda_rad.copy(), theta_rad.copy()
#######################################################################
# MIRROR GRIDS
#######################################################################
new_xgrid = np.zeros((c + 1, c + 1, 6))
new_ygrid = np.zeros((c + 1, c + 1, 6))
xgrid = llr.copy()
ygrid = ttr.copy()
new_xgrid[..., 0] = xgrid.copy()
new_ygrid[..., 0] = ygrid.copy()
# radius = 6370.0e3
radius = 1.
for face in range(1, 6):
for j in range(c + 1):
for i in range(c + 1):
x = xgrid[i, j]
y = ygrid[i, j]
z = radius
if face == 1:
# Rotate about z only
new_xyz = rotate_sphere_3D(x, y, z, -np.pi / 2., 'z')
elif face == 2:
# Rotate about z, then x
temp_xyz = rotate_sphere_3D(x, y, z, -np.pi / 2., 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'x')
elif face == 3:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi, 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'x')
if ((c % 2) != 0) and (j == c // 2 - 1):
print(i, j, face)
new_xyz = (np.pi, *new_xyz)
elif face == 4:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'z')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'y')
elif face == 5:
temp_xyz = rotate_sphere_3D(x, y, z, np.pi / 2., 'y')
x, y, z = temp_xyz[:]
new_xyz = rotate_sphere_3D(x, y, z, 0., 'z')
# print((x, y, z), "\n", new_xyz, "\n" + "--"*40)
new_x, new_y, _ = new_xyz
new_xgrid[i, j, face] = new_x
new_ygrid[i, j, face] = new_y
lon_edge, lat_edge = new_xgrid.copy(), new_ygrid.copy()
#######################################################################
# CLEANUP GRID
#######################################################################
for i, j, f in product(range(c + 1), range(c + 1), range(6)):
new_lon = lon_edge[i, j, f]
if new_lon < 0:
new_lon += 2 * np.pi
if np.abs(new_lon) < 1e-10:
new_lon = 0.
lon_edge[i, j, f] = new_lon
if np.abs(lat_edge[i, j, f]) < 1e-10:
lat_edge[i, j, f] = 0.
lon_edge_deg = np.rad2deg(lon_edge)
lat_edge_deg = np.rad2deg(lat_edge)
#######################################################################
# COMPUTE CELL CENTROIDS
#######################################################################
lon_ctr = np.zeros((c, c, 6))
lat_ctr = np.zeros((c, c, 6))
xyz_ctr = np.zeros((3, c, c, 6))
xyz_edge = np.zeros((3, c + 1, c + 1, 6))
for f in range(6):
for i in range(c):
last_x = (i == (c - 1))
for j in range(c):
last_y = (j == (c - 1))
# Get the four corners
lat_corner = [
lat_edge[i, j, f],
lat_edge[i + 1, j, f],
lat_edge[i + 1, j + 1, f],
lat_edge[i, j + 1, f]]
lon_corner = [
lon_edge[i, j, f],
lon_edge[i + 1, j, f],
lon_edge[i + 1, j + 1, f],
lon_edge[i, j + 1, f]]
# Convert from lat-lon back to cartesian
xyz_corner = np.asarray(
vec_latlon_to_cartesian(
lon_corner, lat_corner))
# Store the edge information
xyz_edge[:, i, j, f] = xyz_corner[:, 0]
if last_x:
xyz_edge[:, i + 1, j, f] = xyz_corner[:, 1]
if last_x or last_y:
xyz_edge[:, i + 1, j + 1, f] = xyz_corner[:, 2]
if last_y:
xyz_edge[:, i, j + 1, f] = xyz_corner[:, 3]
e_mid = np.sum(xyz_corner, axis=1)
e_abs = np.sqrt(np.sum(e_mid * e_mid))
if e_abs > 0:
e_mid = e_mid / e_abs
xyz_ctr[:, i, j, f] = e_mid
_lon, _lat = cartesian_to_latlon(*e_mid)
lon_ctr[i, j, f] = _lon
lat_ctr[i, j, f] = _lat
lon_ctr_deg = np.rad2deg(lon_ctr)
lat_ctr_deg = np.rad2deg(lat_ctr)
if self.offset is not None:
lon_edge_deg += self.offset
lon_ctr_deg += self.offset
#######################################################################
# CACHE
#######################################################################
self.lon_center = lon_ctr_deg
self.lat_center = lat_ctr_deg
self.lon_edge = lon_edge_deg
self.lat_edge = lat_edge_deg
self.xyz_center = xyz_ctr
self.xyz_edge = xyz_edge
def latlon_to_cartesian(lon, lat):
""" Convert latitude/longitude coordinates along the unit sphere to cartesian
coordinates defined by a vector pointing from the sphere's center to its
surface.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
x = np.cos(lat) * np.cos(lon)
y = np.cos(lat) * np.sin(lon)
z = np.sin(lat)
return x, y, z
vec_latlon_to_cartesian = np.vectorize(latlon_to_cartesian)
def cartesian_to_latlon(x, y, z, ret_xyz=False):
""" Convert a cartesian coordinate to latitude/longitude coordinates.
Optionally return the original cartesian coordinate as a tuple.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
xyz = np.array([x, y, z])
vector_length = np.sqrt(np.sum(xyz * xyz, axis=0))
xyz /= vector_length
x, y, z = xyz
if (np.abs(x) + np.abs(y)) < 1e-20:
lon = 0.
else:
lon = np.arctan2(y, x)
if lon < 0.:
lon += 2 * np.pi
lat = np.arcsin(z)
# If not normalizing vector, take lat = np.arcsin(z/vector_length)
if ret_xyz:
return lon, lat, xyz
else:
return lon, lat
vec_cartesian_to_latlon = np.vectorize(cartesian_to_latlon)
def spherical_to_cartesian(theta, phi, r=1):
""" Convert spherical coordinates in the form (theta, phi[, r]) to
cartesian, with the origin at the center of the original spherical
coordinate system.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
x = r * np.cos(phi) * np.cos(theta)
y = r * np.cos(phi) * np.sin(theta)
z = r * np.sin(phi)
return x, y, z
vec_spherical_to_cartesian = np.vectorize(spherical_to_cartesian)
def cartesian_to_spherical(x, y, z):
""" Convert cartesian coordinates to spherical in the form
(theta, phi[, r]) with the origin remaining at the center of the
original spherical coordinate system.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
r = np.sqrt(x**2 + y**2 + z**2)
#theta = np.arccos(z / r)
theta = np.arctan2(y, x)
phi = np.arctan2(z, np.sqrt(x**2 + y**2))
# if np.abs(x) < 1e-16:
# phi = np.pi
# else:
# phi = np.arctan(y / x)
return theta, phi, r
vec_cartesian_to_spherical = np.vectorize(cartesian_to_spherical)
def rotate_sphere_3D(theta, phi, r, rot_ang, rot_axis='x'):
""" Rotate a spherical coordinate in the form (theta, phi[, r])
about the indicating axis, 'rot_axis'.
This method accomplishes the rotation by projecting to a
cartesian coordinate system and performing a solid body rotation
around the requested axis.
This function was originally written by <NAME> and included
in package cubedsphere: https://github.com/JiaweiZhuang/cubedsphere
"""
cos_ang = np.cos(rot_ang)
sin_ang = np.sin(rot_ang)
x, y, z = spherical_to_cartesian(theta, phi, r)
if rot_axis == 'x':
x_new = x
y_new = cos_ang * y + sin_ang * z
z_new = -sin_ang * y + cos_ang * z
elif rot_axis == 'y':
x_new = cos_ang * x - sin_ang * z
y_new = y
z_new = sin_ang * x + cos_ang * z
elif rot_axis == 'z':
x_new = cos_ang * x + sin_ang * y
y_new = -sin_ang * x + cos_ang * y
z_new = z
theta_new, phi_new, r_new = cartesian_to_spherical(x_new, y_new, z_new)
return theta_new, phi_new, r_new
| StarcoderdataPython |
185782 | <reponame>earth-chris/elapid
"""Backend helper and convenience functions."""
import gzip
import multiprocessing as mp
import os
import pickle
import sys
from typing import Any, Callable, Dict, Iterable, Tuple, Union
import numpy as np
import pandas as pd
import rasterio as rio
n_cpus = mp.cpu_count()
MAXENT_DEFAULTS = {
"clamp": True,
"beta_multiplier": 1.0,
"beta_hinge": 1.0,
"beta_lqp": 1.0,
"beta_threshold": 1.0,
"beta_categorical": 1.0,
"feature_types": ["linear", "hinge", "product"],
"n_hinge_features": 30,
"n_threshold_features": 20,
"scorer": "roc_auc",
"tau": 0.5,
"tolerance": 1e-7,
"use_lambdas": "last",
}
Number = Union[int, float]
ArrayLike = Union[np.array, pd.DataFrame]
class NoDataException(Exception):
pass
def repeat_array(x: np.array, length: int = 1, axis: int = 0) -> np.ndarray:
"""Repeats a 1D numpy array along an axis to an arbitrary length
Args:
x: the n-dimensional array to repeat
length: the number of times to repeat the array
axis: the axis along which to repeat the array (valid values include 0 to n+1)
Returns:
An n+1 dimensional numpy array
"""
return np.expand_dims(x, axis=axis).repeat(length, axis=axis)
def load_sample_data(name: str = "bradypus") -> Tuple[pd.DataFrame, pd.DataFrame]:
"""Loads example species presence/background and covariate data.
Args:
name: the sample dataset to load. options currently include ["bradypus"], from the R 'maxnet' package
Returns:
(x, y): a tuple of dataframes containing covariate and response data, respectively
"""
assert name.lower() in ["bradypus"], "Invalid sample data requested"
package_path = os.path.realpath(__file__)
package_dir = os.path.dirname(package_path)
if name.lower() == "bradypus":
file_path = os.path.join(package_dir, "data", "bradypus.csv.gz")
df = pd.read_csv(file_path, compression="gzip").astype("int64")
y = df["presence"].astype("int8")
x = df.drop(columns=["presence"]).astype({"ecoreg": "category"})
return x, y
def save_object(obj: object, path: str, compress: bool = True) -> None:
"""Writes a python object to disk for later access.
Args:
obj: a python object or variable to be saved (e.g., a MaxentModel() instance)
path: the output file path
"""
obj = pickle.dumps(obj)
if compress:
obj = gzip.compress(obj)
with open(path, "wb") as f:
f.write(obj)
def load_object(path: str, compressed: bool = True) -> Any:
"""Reads a python object into memory that's been saved to disk.
Args:
path: the file path of the object to load
compressed: flag to specify whether the file was compressed prior to saving
Returns:
obj: the python object that has been saved (e.g., a MaxentModel() instance)
"""
with open(path, "rb") as f:
obj = f.read()
if compressed:
obj = gzip.decompress(obj)
return pickle.loads(obj)
def create_output_raster_profile(
raster_paths: list,
template_idx: int = 0,
windowed: bool = True,
nodata: Number = None,
compression: str = None,
driver: str = "GTiff",
bigtiff: bool = True,
dtype: str = "float32",
) -> Tuple[Iterable, Dict]:
"""Gets parameters for windowed reading/writing to output rasters.
Args:
raster_paths: raster paths of covariates to apply the model to
template_idx: index of the raster file to use as a template. template_idx=0 sets the first raster as template
windowed: perform a block-by-block data read. slower, but reduces memory use.
nodata: output nodata value
output_driver: output raster file format (from rasterio.drivers.raster_driver_extensions())
compression: compression type to apply to the output file
bigtiff: specify the output file as a bigtiff (for rasters > 2GB)
dtype: rasterio data type string
Returns:
(windows, profile): an iterable and a dictionary for the window reads and the raster profile.
"""
with rio.open(raster_paths[template_idx]) as src:
if windowed:
windows = src.block_windows()
else:
idx = (0, 0)
window = rio.windows.Window(0, 0, src.width, src.height)
windows = iter([(idx, window)])
dst_profile = src.profile
dst_profile.update(
count=1,
dtype=dtype,
nodata=nodata,
compress=compression,
driver=driver,
)
if bigtiff and driver == "GTiff":
dst_profile.update(BIGTIFF="YES")
return windows, dst_profile
def get_raster_band_indexes(raster_paths: list) -> Tuple[int, list]:
"""Counts the number raster bands to index multi-source, multi-band covariates.
Args:
raster_paths: a list of raster paths
Returns:
(nbands, band_idx): int and list of the total number of bands and the 0-based start/stop
band index for each path
"""
nbands = 0
band_idx = [0]
for i, raster_path in enumerate(raster_paths):
with rio.open(raster_path) as src:
nbands += src.count
band_idx.append(band_idx[i] + src.count)
return nbands, band_idx
def check_raster_alignment(raster_paths: list) -> bool:
"""Checks whether the extent, resolution and projection of multiple rasters match exactly.
Args:
raster_paths: a list of raster covariate paths
Returns:
whether all rasters align
"""
first = raster_paths[0]
rest = raster_paths[1:]
with rio.open(first) as src:
res = src.res
bounds = src.bounds
transform = src.transform
for path in rest:
with rio.open(path) as src:
if src.res != res or src.bounds != bounds or src.transform != transform:
return False
return True
def in_notebook() -> bool:
"""Evaluate whether the module is currently running in a jupyter notebook."""
return "ipykernel" in sys.modules
def get_tqdm() -> Callable:
"""Returns a context-appropriate tqdm progress tracking function.
Determines the appropriate tqdm based on the user context, as
behavior changes inside/outside of jupyter notebooks.
Returns:
tqdm: the context-specific tqdm module
"""
if in_notebook():
from tqdm.notebook import tqdm
else:
from tqdm import tqdm
return tqdm
def n_digits(number: Number) -> int:
"""Counts the number of significant integer digits of a number.
Args:
number: the number to evaluate.
Returns:
order: number of digits required to represent a number
"""
if number == 0:
order = 1
else:
order = np.floor(np.log10(number)).astype(int) + 1
return order
def count_raster_bands(raster_paths: list) -> int:
"""Returns the total number of bands from a list of rasters.
Args:
raster_paths: List of raster data file paths.
Returns:
n_bands: total band count.
"""
n_bands = 0
for path in raster_paths:
with rio.open(path) as src:
n_bands += src.count
return n_bands
def make_band_labels(n_bands: int) -> list:
"""Creates a list of band names to assign as dataframe columns.
Args:
n_bands: total number of raster bands to create labels for.
Returns:
labels: list of column names.
"""
n_zeros = n_digits(n_bands)
labels = ["band_{band_number:0{n_zeros}d}".format(band_number=i + 1, n_zeros=n_zeros) for i in range(n_bands)]
return labels
| StarcoderdataPython |
102500 | <reponame>Maosef/qb
import json
from os import path
from luigi import LocalTarget, Task, WrapperTask, Parameter
import yaml
from sklearn.model_selection import train_test_split
from qanta.util.io import shell, get_tmp_filename, safe_path, safe_open
from qanta.util.constants import (
DATASET_PREFIX,
DS_VERSION,
QANTA_MAP_REPORT_PATH,
QANTA_MAPPED_DATASET_PATH,
QANTA_SQL_DATASET_PATH,
QANTA_TRAIN_DATASET_PATH,
QANTA_DEV_DATASET_PATH,
QANTA_TEST_DATASET_PATH,
QANTA_TORCH_TRAIN_LOCAL_PATH,
QANTA_TORCH_VAL_LOCAL_PATH,
QANTA_TORCH_DEV_LOCAL_PATH,
GUESSER_TRAIN_FOLD,
GUESSER_DEV_FOLD,
)
from qanta.pipeline.preprocess import WikipediaTitles, WikipediaRawRedirects
from qanta.ingestion.normalization import (
Protobowl,
QuizdbOrg,
merge_datasets,
assign_folds_,
)
from qanta.ingestion.answer_mapping import (
create_answer_map,
write_answer_map,
unmapped_to_mapped_questions,
)
from qanta.ingestion.annotated_mapping import PageAssigner
from qanta.ingestion.preprocess import (
format_qanta_json,
add_sentences_,
questions_to_sqlite,
)
from qanta.ingestion.protobowl import compute_question_player_counts
S3_HTTP_PREFIX = (
"https://s3-us-west-2.amazonaws.com/pinafore-us-west-2/qanta-jmlr-datasets/"
)
QANTA_UNMAPPED_DATASET_PATH = path.join(
DATASET_PREFIX, f"qanta.unmapped.{DS_VERSION}.json"
)
QANTA_PREPROCESSED_DATASET_PATH = path.join(
DATASET_PREFIX, f"qanta.processed.{DS_VERSION}.json"
)
QANTA_FOLDED_DATASET_PATH = path.join(DATASET_PREFIX, f"qanta.folded.{DS_VERSION}.json")
ANSWER_MAP_PATH = "data/external/answer_mapping/answer_map.json"
UNBOUND_ANSWER_PATH = "data/external/answer_mapping/unbound_answers.json"
QDB_DATE = "04182018"
QDB_CATEGORIES = f"quizdb.org-{QDB_DATE}.categories.json"
QDB_SUBCATEGORIES = f"quizdb.org-{QDB_DATE}.subcategories.json"
QDB_TOURNAMENTS = f"quizdb.org-{QDB_DATE}.tournaments.json"
QDB_TOSSUPS = f"quizdb.org-{QDB_DATE}.tossups.json"
QDB_CATEGORIES_PATH = path.join(DATASET_PREFIX, "quizdb", QDB_CATEGORIES)
QDB_SUBCATEGORIES_PATH = path.join(DATASET_PREFIX, "quizdb", QDB_SUBCATEGORIES)
QDB_TOURNAMENTS_PATH = path.join(DATASET_PREFIX, "quizdb", QDB_TOURNAMENTS)
QDB_TOSSUPS_PATH = path.join(DATASET_PREFIX, "quizdb", QDB_TOSSUPS)
PROTOBOWL_TOSSUPS = "protobowl-05052017.json"
PROTOBOWL_TOSSUPS_PATH = path.join(DATASET_PREFIX, "protobowl", PROTOBOWL_TOSSUPS)
PROTOBOWL_LOGS = "protobowl-042818.log"
PROTOBOWL_LOGS_PATH = path.join(DATASET_PREFIX, "protobowl", PROTOBOWL_LOGS)
PROTOBOWL_QUESTION_PLAYER_COUNTS = path.join(
DATASET_PREFIX, "protobowl", "question_player_counts.json"
)
class Download(Task):
url = Parameter() # type: str
path = Parameter() # type: str
def run(self):
tmp_file = get_tmp_filename()
shell(f"wget {self.url} -O {tmp_file}")
shell(f"mv {tmp_file} {self.path}")
shell(f"rm -f {tmp_file}")
def output(self):
return LocalTarget(self.path)
class DownloadProtobowl(WrapperTask):
def requires(self):
yield Download(
url=path.join(S3_HTTP_PREFIX, PROTOBOWL_TOSSUPS),
path=safe_path(PROTOBOWL_TOSSUPS_PATH),
)
yield Download(
url=path.join(S3_HTTP_PREFIX, PROTOBOWL_LOGS),
path=safe_path(PROTOBOWL_LOGS_PATH),
)
class DownloadQuizdbOrg(WrapperTask):
def requires(self):
yield Download(
url=path.join(S3_HTTP_PREFIX, QDB_CATEGORIES),
path=safe_path(QDB_CATEGORIES_PATH),
)
yield Download(
url=path.join(S3_HTTP_PREFIX, QDB_SUBCATEGORIES),
path=safe_path(QDB_SUBCATEGORIES_PATH),
)
yield Download(
url=path.join(S3_HTTP_PREFIX, QDB_TOURNAMENTS),
path=safe_path(QDB_TOURNAMENTS_PATH),
)
yield Download(
url=path.join(S3_HTTP_PREFIX, QDB_TOSSUPS), path=safe_path(QDB_TOSSUPS_PATH)
)
class DownloadDatasets(WrapperTask):
def requires(self):
yield DownloadProtobowl()
yield DownloadQuizdbOrg()
class CreateUnmappedQantaDataset(Task):
def requires(self):
yield DownloadDatasets()
def run(self):
protobowl_questions = Protobowl.parse_tossups(PROTOBOWL_TOSSUPS_PATH)
quizdb_tournaments = QuizdbOrg.parse_tournaments(QDB_TOURNAMENTS_PATH)
quizdb_categories = QuizdbOrg.parse_categories(QDB_CATEGORIES_PATH)
quizdb_subcategories = QuizdbOrg.parse_subcategories(QDB_SUBCATEGORIES_PATH)
quizdb_questions = QuizdbOrg.parse_tossups(
quizdb_tournaments,
quizdb_categories,
quizdb_subcategories,
QDB_TOSSUPS_PATH,
)
qanta_questions = merge_datasets(protobowl_questions, quizdb_questions)
with open(safe_path(QANTA_UNMAPPED_DATASET_PATH), "w") as f:
json.dump(format_qanta_json(qanta_questions, DS_VERSION), f)
def output(self):
return LocalTarget(QANTA_UNMAPPED_DATASET_PATH)
class CreateProcessedQantaDataset(Task):
def requires(self):
yield CreateUnmappedQantaDataset()
def run(self):
with open(QANTA_UNMAPPED_DATASET_PATH) as f:
qanta_questions = json.load(f)["questions"]
add_sentences_(qanta_questions)
with open(QANTA_PREPROCESSED_DATASET_PATH, "w") as f:
json.dump(format_qanta_json(qanta_questions, DS_VERSION), f)
def output(self):
return LocalTarget(QANTA_PREPROCESSED_DATASET_PATH)
class CreateAnswerMap(Task):
def requires(self):
yield CreateProcessedQantaDataset()
yield WikipediaRawRedirects()
yield WikipediaTitles()
def run(self):
with open(QANTA_PREPROCESSED_DATASET_PATH) as f:
unmapped_qanta_questions = json.load(f)["questions"]
answer_map, amb_answer_map, unbound_answers, report = create_answer_map(
unmapped_qanta_questions
)
with safe_open("data/external/answer_mapping/automatic_report.json", "w") as f:
json.dump(report, f)
write_answer_map(
answer_map,
amb_answer_map,
unbound_answers,
ANSWER_MAP_PATH,
UNBOUND_ANSWER_PATH,
)
def output(self):
return (
LocalTarget(ANSWER_MAP_PATH),
LocalTarget(UNBOUND_ANSWER_PATH),
LocalTarget("data/external/answer_mapping/automatic_report.json"),
)
class CreateProtobowlQuestionPlayerCounts(Task):
def requires(self):
yield DownloadProtobowl()
def run(self):
question_player_counts = compute_question_player_counts(PROTOBOWL_LOGS_PATH)
with open(PROTOBOWL_QUESTION_PLAYER_COUNTS, "w") as f:
json.dump(question_player_counts, f)
def output(self):
return LocalTarget(PROTOBOWL_QUESTION_PLAYER_COUNTS)
class CreateFoldedQantaDataset(Task):
def requires(self):
yield CreateProcessedQantaDataset()
yield CreateProtobowlQuestionPlayerCounts()
def run(self):
with open(QANTA_PREPROCESSED_DATASET_PATH) as f:
qanta_questions = json.load(f)["questions"]
with open(PROTOBOWL_QUESTION_PLAYER_COUNTS) as f:
question_player_counts = json.load(f)
assign_folds_(qanta_questions, question_player_counts)
with open(QANTA_FOLDED_DATASET_PATH, "w") as f:
json.dump(format_qanta_json(qanta_questions, DS_VERSION), f)
def output(self):
return LocalTarget(QANTA_FOLDED_DATASET_PATH)
class CreateMappedQantaDataset(Task):
def requires(self):
yield CreateFoldedQantaDataset()
yield CreateAnswerMap()
yield WikipediaTitles()
def run(self):
with open(ANSWER_MAP_PATH) as f:
content = json.load(f)
answer_map = content["answer_map"]
ambig_answer_map = content["ambig_answer_map"]
with open(QANTA_FOLDED_DATASET_PATH) as f:
qanta_questions = json.load(f)["questions"]
with open("data/internal/page_assignment/unmappable.yaml") as f:
unmappable = yaml.load(f)
page_assigner = PageAssigner()
mapping_report = unmapped_to_mapped_questions(
qanta_questions, answer_map, ambig_answer_map, unmappable, page_assigner
)
with open(QANTA_MAPPED_DATASET_PATH, "w") as f:
json.dump(format_qanta_json(qanta_questions, DS_VERSION), f)
with open(QANTA_MAP_REPORT_PATH, "w") as f:
json.dump(mapping_report, f)
def output(self):
return (
LocalTarget(QANTA_MAPPED_DATASET_PATH),
LocalTarget(QANTA_MAP_REPORT_PATH),
)
class GenerateSqliteDB(Task):
def requires(self):
yield CreateMappedQantaDataset()
def run(self):
with open(QANTA_MAPPED_DATASET_PATH) as f:
qanta_questions = json.load(f)["questions"]
tmp_db = get_tmp_filename()
questions_to_sqlite(qanta_questions, tmp_db)
shell(f"mv {tmp_db} {QANTA_SQL_DATASET_PATH}")
def output(self):
return LocalTarget(QANTA_SQL_DATASET_PATH)
class FilterAndPartitionQantaDataset(Task):
def requires(self):
yield CreateMappedQantaDataset()
yield CreateProtobowlQuestionPlayerCounts()
def run(self):
with open(QANTA_MAPPED_DATASET_PATH) as f:
questions = [q for q in json.load(f)["questions"] if q["page"] is not None]
train_questions = [q for q in questions if "train" in q["fold"]]
dev_questions = [q for q in questions if "dev" in q["fold"]]
test_questions = [q for q in questions if "test" in q["fold"]]
with open(QANTA_TRAIN_DATASET_PATH, "w") as f:
json.dump(format_qanta_json(train_questions, DS_VERSION), f)
with open(QANTA_DEV_DATASET_PATH, "w") as f:
json.dump(format_qanta_json(dev_questions, DS_VERSION), f)
with open(QANTA_TEST_DATASET_PATH, "w") as f:
json.dump(format_qanta_json(test_questions, DS_VERSION), f)
def output(self):
return [
LocalTarget(QANTA_TRAIN_DATASET_PATH),
LocalTarget(QANTA_DEV_DATASET_PATH),
LocalTarget(QANTA_TEST_DATASET_PATH),
]
class TorchTextDataset(Task):
def requires(self):
yield FilterAndPartitionQantaDataset()
def run(self):
with open(QANTA_TRAIN_DATASET_PATH) as f:
all_guess_train = [
q for q in json.load(f)["questions"] if q["fold"] == GUESSER_TRAIN_FOLD
]
guess_train, guess_val = train_test_split(
all_guess_train, random_state=42, train_size=0.9
)
with open(QANTA_DEV_DATASET_PATH) as f:
guess_dev = [
q for q in json.load(f)["questions"] if q["fold"] == GUESSER_DEV_FOLD
]
with open(QANTA_TORCH_TRAIN_LOCAL_PATH, "w") as f:
json.dump(format_qanta_json(guess_train, DS_VERSION), f)
with open(QANTA_TORCH_VAL_LOCAL_PATH, "w") as f:
json.dump(format_qanta_json(guess_val, DS_VERSION), f)
with open(QANTA_TORCH_DEV_LOCAL_PATH, "w") as f:
json.dump(format_qanta_json(guess_dev, DS_VERSION), f)
def output(self):
return [
LocalTarget(QANTA_TORCH_TRAIN_LOCAL_PATH),
LocalTarget(QANTA_TORCH_VAL_LOCAL_PATH),
LocalTarget(QANTA_TORCH_DEV_LOCAL_PATH),
]
class QantaDataset(WrapperTask):
def requires(self):
yield FilterAndPartitionQantaDataset()
yield GenerateSqliteDB()
| StarcoderdataPython |
3242323 | import os
import socket
from OpenSSL import crypto, SSL
# OpenVPN is fairly simple since it works on OpenSSL. The OpenVPN server contains
# a root certificate authority that can sign sub-certificates. The certificates
# have very little or no information on who they belong to besides a filename
# and any required information. Everything else is omitted or blank.
# The client certificate and private key are inserted into the .ovpn file
# which contains some settins as well and the entire thing is then ready for
# the user.
# EasyRSA generates a standard unsigned certificate, certificate request, and private key.
# It then signs the certificate against the CA then dumps the certificate request in the trash.
# The now signed certificate and private key are returned.
# Create a new keypair of specified algorithm and number of bits.
def make_keypair(algorithm=crypto.TYPE_RSA, numbits=2048):
pkey = crypto.PKey()
pkey.generate_key(algorithm, numbits)
return pkey
# Creates a certificate signing request (CSR) given the specified subject attributes.
def make_csr(pkey, CN, C=None, ST=None, L=None, O=None, OU=None, emailAddress=None, hashalgorithm='sha256WithRSAEncryption'):
req = crypto.X509Req()
req.get_subject()
subj = req.get_subject()
if C:
subj.C = C
if ST:
subj.ST = ST
if L:
subj.L = L
if O:
subj.O = O
if OU:
subj.OU = OU
if CN:
subj.CN = CN
if emailAddress:
subj.emailAddress = emailAddress
req.set_pubkey(pkey)
req.sign(pkey, hashalgorithm)
return req
# Create a certificate authority (if we need one)
def create_ca(CN, C="", ST="", L="", O="", OU="", emailAddress="", hashalgorithm='sha256WithRSAEncryption'):
cakey = make_keypair()
careq = make_csr(cakey, cn=CN)
cacert = crypto.X509()
cacert.set_serial_number(0)
cacert.gmtime_adj_notBefore(0)
cacert.gmtime_adj_notAfter(60*60*24*365*10) # 10 yrs - hard to beat this kind of cert!
cacert.set_issuer(careq.get_subject())
cacert.set_subject(careq.get_subject())
cacert.set_pubkey(careq.get_pubkey())
cacert.set_version(2)
# Set the extensions in two passes
cacert.add_extensions([
crypto.X509Extension('basicConstraints', True,'CA:TRUE'),
crypto.X509Extension('subjectKeyIdentifier' , True , 'hash', subject=cacert)
])
# ... now we can set the authority key since it depends on the subject key
cacert.add_extensions([
crypto.X509Extension('authorityKeyIdentifier' , False, 'issuer:always, keyid:always', issuer=cacert, subject=cacert)
])
cacert.sign(cakey, hashalgorithm)
return (cacert, cakey)
# Create a new slave cert.
def create_slave_certificate(csr, cakey, cacert, serial):
cert = crypto.X509()
cert.set_serial_number(serial)
cert.gmtime_adj_notBefore(0)
cert.gmtime_adj_notAfter(60*60*24*365*10) # 10 yrs - hard to beat this kind of cert!
cert.set_issuer(cacert.get_subject())
cert.set_subject(csr.get_subject())
cert.set_pubkey(csr.get_pubkey())
cert.set_version(2)
extensions = []
extensions.append(crypto.X509Extension('basicConstraints', False ,'CA:FALSE'))
extensions.append(crypto.X509Extension('subjectKeyIdentifier' , False , 'hash', subject=cert))
extensions.append(crypto.X509Extension('authorityKeyIdentifier' , False, 'keyid:always,issuer:always', subject=cacert, issuer=cacert))
cert.add_extensions(extensions)
cert.sign(cakey, 'sha256WithRSAEncryption')
return cert
# Dumps content to a string
def dump_file_in_mem(material, format=crypto.FILETYPE_PEM):
dump_func = None
if isinstance(material, crypto.X509):
dump_func = crypto.dump_certificate
elif isinstance(material, crypto.PKey):
dump_func = crypto.dump_privatekey
elif isinstance(material, crypto.X509Req):
dump_func = crypto.dump_certificate_request
else:
raise Exception("Don't know how to dump content type to file: %s (%r)" % (type(material), material))
return dump_func(format, material)
# Loads the file into the appropriate openssl object type.
def load_from_file(materialfile, objtype, format=crypto.FILETYPE_PEM):
if objtype is crypto.X509:
load_func = crypto.load_certificate
elif objtype is crypto.X509Req:
load_func = crypto.load_certificate_request
elif objtype is crypto.PKey:
load_func = crypto.load_privatekey
else:
raise Exception("Unsupported material type: %s" % (objtype,))
with open(materialfile, 'r') as fp:
buf = fp.read()
material = load_func(format, buf)
return material
def retrieve_key_from_file(keyfile):
return load_from_file(keyfile, crypto.PKey)
def retrieve_csr_from_file(csrfile):
return load_from_file(csrfile, crypto.X509Req)
def retrieve_cert_from_file(certfile):
return load_from_file(certfile, crypto.X509)
def make_new_ovpn_file(ca_cert, ca_key, clientname, serial, commonoptspath, filepath):
# Read our common options file first
f = open(commonoptspath, 'r')
common = f.read()
f.close()
cacert = retrieve_cert_from_file(ca_cert)
cakey = retrieve_key_from_file(ca_key)
# Generate a new private key pair for a new certificate.
key = make_keypair()
# Generate a certificate request
csr = make_csr(key, clientname)
# Sign the certificate with the new csr
crt = create_slave_certificate(csr, cakey, cacert, serial)
# Now we have a successfully signed certificate. We must now
# create a .ovpn file and then dump it somewhere.
clientkey = dump_file_in_mem(key)
clientcert = dump_file_in_mem(crt)
cacertdump = dump_file_in_mem(cacert)
ovpn = "%s<ca>\n%s</ca>\n<cert>\n%s</cert>\n<key>\n%s</key>\n" % (common, cacertdump, clientcert, clientkey)
# Write our file.
f = open(filepath, 'w')
f.write(ovpn)
f.close()
if __name__ == "__main__":
make_new_ovpn_file("ca.crt", "ca.key", "justasictest", 0x0C, "common.txt", "justastictest.ovpn")
print("Done") | StarcoderdataPython |
3222568 | <gh_stars>0
# -*- coding: utf-8 -*-
import numpy as np
from kbcr.util import make_batches
from kbcr.training.data import Data
from typing import Tuple
class Batcher:
def __init__(self,
data: Data,
batch_size: int,
nb_epochs: int,
random_state: np.random.RandomState) -> None:
self.data = data
self.batch_size = batch_size
self.random_state = random_state
size = nb_epochs * data.nb_examples
self.curriculum_Xs = np.zeros(size, dtype=np.int32)
self.curriculum_Xp = np.zeros(size, dtype=np.int32)
self.curriculum_Xo = np.zeros(size, dtype=np.int32)
self.curriculum_Xi = np.zeros(size, dtype=np.int32)
for epoch_no in range(nb_epochs):
curriculum_order = self.random_state.permutation(data.nb_examples)
start = epoch_no * data.nb_examples
end = (epoch_no + 1) * data.nb_examples
self.curriculum_Xs[start: end] = data.Xs[curriculum_order]
self.curriculum_Xp[start: end] = data.Xp[curriculum_order]
self.curriculum_Xo[start: end] = data.Xo[curriculum_order]
self.curriculum_Xi[start: end] = data.Xi[curriculum_order]
self.batches = make_batches(self.curriculum_Xs.shape[0], batch_size)
self.nb_batches = len(self.batches)
def get_batch(self, batch_start: int, batch_end: int) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray]:
# Positive examples
xs_batch = self.curriculum_Xs[batch_start:batch_end]
xp_batch = self.curriculum_Xp[batch_start:batch_end]
xo_batch = self.curriculum_Xo[batch_start:batch_end]
xi_batch = self.curriculum_Xi[batch_start:batch_end]
return xp_batch, xs_batch, xo_batch, xi_batch
| StarcoderdataPython |
1681728 | from os import path
#import argparse
#from collections import namedtuple
from futen import get_netlocs, execute
from Timer import Timer
from retic import Int
#bg: all test files should be in current directory when tests run
def main(n:Int)->Void:
testfile = path.join(path.dirname(__file__), 'ssh.config.dat')
expect = {'web': '2200', 'app': '2201', 'db': '2202'}
with open(testfile) as fd:
lines = fd.readlines()
for i in range(n):
actual = get_netlocs(lines)
if expect != actual:
raise AssertionError("'%s' is not equal to '%s'" % (expect, actual))
testfile = path.join(path.dirname(__file__), 'ssh.config.dat')
template = path.join(path.dirname(__file__), 'inventory_template.dat')
expectfile = path.join(path.dirname(__file__), 'inventory_expect.dat')
with open(expectfile) as fd:
expect = ''.join(fd.readlines()).strip()
with open(testfile) as fd:
lines = fd.readlines()
for i in range(n):
result = execute(lines, template)
if result != expect:
raise ValueError("'%s' is not equal to '%s'" % (expect, result))
return
t = Timer()
with t:
main(1900)
| StarcoderdataPython |
1757381 | <reponame>MioYvo/app_manager
# coding=utf-8
# __author__ = 'Mio'
from os import getenv
from pathlib import Path
from sanic import Sanic
from gino.ext.sanic import Gino
# -------------------- Application --------------------
app = Sanic()
APP_PORT = getenv("APP_PORT", "8888")
# -------------------- Database --------------------
DB_USER = getenv("DB_USER", "postgres")
DB_PASS = getenv("DB_PASS", "<PASSWORD>")
DB_HOST = getenv("DB_HOST", "localhost")
DB_PORT = int(getenv("DB_PORT", "5432"))
DB_NAME = getenv("DB_NAME", "Manager")
DB_DSN = f"postgresql+asyncpg://{DB_USER}:{DB_PASS}@{DB_HOST}:{DB_PORT}/{DB_NAME}"
db = Gino()
# -------------------- ORM --------------------
PLAT_ANDROID = "android"
PLAT_IOS = "ios"
# -------------------- Path --------------------
SETTINGS_FILE_PATH = Path(__file__).absolute()
SYSPATH = SETTINGS_FILE_PATH.parent.parent
APP_PATH = SETTINGS_FILE_PATH.parent
TEMPLATE_PATH = APP_PATH / "templates"
STATIC_PATH = APP_PATH / "static"
UPLOADS_PATH = APP_PATH / "uploads"
| StarcoderdataPython |
185696 | from .translated_object import TranslatedObject
from .base_translator import BaseTranslator
BASE_HEADERS: dict = {
"User-Agent": "GoogleTranslate/6.6.1.RC09.302039986 (Linux; U; Android 9; Redmi Note 8)",
}
| StarcoderdataPython |
3272339 | <gh_stars>0
"""
PEP8 - Python Enhancement Proposal
São propostas de melhorias para a linguagem Python
A ideia de PEp8 é que possamos escrever códigos de forma Pythonica.
[1] - Utilize Camel Case para nomes de classes:
class Calculadora:
pass
class CalculadoraCientifica:
pass
[2] - Utilizae nomes em minúsculo, separados por underline para funções ou variáveis;
def soma():
pass
def soma_dois():
pass
numero = 4
numero_impar = 3
[3] - Ultilize 4 espaços para identação: (Use o Tab apenas se o mesmo for configurado para 4 espaços)
if "a" in "banana":
print("Tem")
[4] - Linhas em branco
-Separar funções e definições de classe com duas linhas em duas linhas em branco;
-Métodos dentro de uma classe devem ser separados com uma única linha em branco;
[5] - Imports
-Imports devem ser sempre feitos em linhas separadas;
#Maneiras corretas de fazer imports;
import math
import os
from math import sqrt, acos, tanf
#Caso haja muitos imports de um mesmo pacote, recomenda-se fazer:
from math import (
sqrt,
cos,
pow,
radians,
log
)
# imports devem ser colocados no topo do arquivo, logo depois de qualer comentário ou docstrings e antes de
constantes ou variáveis globais.
[6] - Não faça espaços desnecessário em expressões e instruções:
funcao(algo[1]), {outro: 2})
dict["chave"] = lista[indice]
[7] - Termine sempre uma instruçõo com uma nova linha
"""
| StarcoderdataPython |
3335447 | <filename>pclub/account/migrations/0001_initial.py
# Generated by Django 3.0.7 on 2020-06-23 03:04
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Account',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('username', models.CharField(max_length=220, unique=True)),
('email', models.EmailField(max_length=254, unique=True)),
('fullname', models.CharField(blank=True, max_length=220, null=True)),
('cf_username', models.CharField(blank=True, max_length=220, null=True, unique=True)),
('is_active', models.BooleanField(default=True)),
('staff', models.BooleanField(default=False)),
('is_superuser', models.BooleanField(default=False)),
('is_activated', models.BooleanField(default=False)),
],
options={
'abstract': False,
},
),
]
| StarcoderdataPython |
182002 | <gh_stars>0
###############################################################################
# Author: CallMeCCLemon
# Date: 2019
# Copyright: 2019 <NAME> (@CallMeCCLemon) - Modified BSD License
###############################################################################
import os
from PythonApp.pillar.PillarMessage import PillarMessage
from PythonApp.util.Config import Config
class MessageClient:
def __init__(self):
self.config = Config()
self.pipe_name = self.config.get_pillar_config_value("PipeName")
def send_message_to_queue(self, message: PillarMessage):
print("New message: {}".format(message))
pipe = os.open(self.pipe_name, os.O_WRONLY)
os.write(pipe, message.to_bytes())
os.close(pipe)
| StarcoderdataPython |
1645301 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
#
# Author : <NAME>
# E-mail : <EMAIL>
# Date : 15/12/21 12:16:48
# Desc :
#
import sae.storage
def Courses(Content):
kebiao = Content.split()
try:
numxh = int(kebiao[0])
shijian = str(kebiao[1])
s = sae.storage.Client()
s.list_domain()
ob = s.get('201403kebiao', str(numxh) + '.txt')
ob = ob.data
hello1 = ob.splitlines()
Content = ''
for shi in hello1:
if shijian == '1' and '周一' in shi:
Content += shi
Content += '\n++++++++++++\n'
elif shijian == '2' and '周二' in shi:
Content += shi
Content += '\n++++++++++++\n'
elif shijian == '3' and '周三' in shi:
Content += shi
Content += '\n++++++++++++\n'
elif shijian == '4' and '周四' in shi:
Content += shi
Content += '\n++++++++++++\n'
elif shijian == '5' and '周五' in shi:
Content += shi
Content += '\n++++++++++++\n'
if len(Content) == 0:
return '好羡慕T_T!!!!!!\n你!今!天!居!然!没!课!'
except:
return '非常抱歉!您的数据没有录入或者学号不存在!\n\n欢迎加开发者帐号QQ:563125575^_^\nAnyway,谢谢支持!\n输入help查看具体功能'
| StarcoderdataPython |
3346343 | import bz2
import codecs
import gzip
import io
import lzma
import typing
import urllib.request
import warnings
from http.client import HTTPResponse
from typing import BinaryIO, ByteString, Dict, Optional, Union, cast
import chardet
MAGIC_GZIP = bytearray([0x1F, 0x8B])
MAGIC_LZMA = bytearray([0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00, 0x00])
MAGIC_BZIP2 = bytearray([0x42, 0x5A, 0x68])
class BufferedReader(io.BufferedReader):
"""A patch for `io.BufferedReader` supporting `http.client.HTTPResponse`."""
def read(self, size: Optional[int] = -1) -> bytes:
try:
return super(BufferedReader, self).read(size)
except ValueError:
if typing.cast(io.BufferedReader, self.closed):
return b""
raise
class EncodedFile(codecs.StreamRecoder):
def __init__(
self,
file: BinaryIO,
data_encoding: str,
file_encoding: Optional[str] = None,
errors: str = "strict",
):
if file_encoding is None:
file_encoding = data_encoding
data_info = codecs.lookup(data_encoding)
file_info = codecs.lookup(file_encoding)
super().__init__(
file,
data_info.encode,
data_info.decode,
file_info.streamreader,
file_info.streamwriter,
errors,
)
# Add attributes to simplify introspection
self.data_encoding = data_encoding
self.file_encoding = file_encoding
def read(self, size: Optional[int] = -1) -> bytes:
chunk = super().read(-1 if size is None else size)
return chunk.replace(b"\r\n", b"\n")
def readinto(self, buffer: ByteString) -> int:
chunk = self.read(len(buffer) // 2)
typing.cast(bytearray, buffer)[: len(chunk)] = chunk
return len(chunk)
def get_handle(path: str, timeout: int = 2) -> BinaryIO:
"""Given a path or URL, get a binary handle for that path."""
try:
return open(path, "rb", buffering=0)
except Exception as err:
headers = {"Keep-Alive": f"timeout={timeout}"}
request = urllib.request.Request(path, headers=headers)
res: HTTPResponse = urllib.request.urlopen(request, timeout=timeout)
if not res.status == 200:
raise ValueError(f"could not open {path}: {res.status} ({res.msg})")
if res.headers.get("Content-Encoding") in {"gzip", "deflate"}:
f = gzip.GzipFile(filename=res.geturl(), mode="rb", fileobj=res)
return typing.cast(BinaryIO, f)
return res
def get_location(reader: BinaryIO) -> Optional[str]:
"""Given a binary file-handle, try to extract the path/URL to the file."""
return (
getattr(reader, "name", None)
or getattr(reader, "url", None)
or getattr(reader, "geturl", lambda: None)()
)
def decompress(
reader: io.RawIOBase, path: Optional[str] = None, encoding: Optional[str] = None
) -> BinaryIO:
"""Given a binary file-handle, decompress it if it is compressed."""
buffered = BufferedReader(reader)
# Decompress the stream if it is compressed
if buffered.peek().startswith(MAGIC_GZIP):
decompressed = BufferedReader(
typing.cast(
io.RawIOBase,
gzip.GzipFile(mode="rb", fileobj=typing.cast(BinaryIO, buffered)),
)
)
elif buffered.peek().startswith(MAGIC_LZMA):
decompressed = BufferedReader(
typing.cast(
io.RawIOBase, lzma.LZMAFile(typing.cast(BinaryIO, buffered), mode="rb")
)
)
elif buffered.peek().startswith(MAGIC_BZIP2):
decompressed = BufferedReader(
typing.cast(
io.RawIOBase, bz2.BZ2File(typing.cast(BinaryIO, buffered), mode="rb")
)
)
else:
decompressed = buffered
# Attempt to detect the encoding and decode the stream
det: Dict[str, Union[str, float]] = chardet.detect(decompressed.peek())
confidence = 1.0 if encoding is not None else cast(float, det["confidence"])
encoding = encoding if encoding is not None else cast(str, det["encoding"])
if encoding == "ascii":
encoding = "UTF-8"
if confidence < 1.0:
warnings.warn(
f"unsound encoding, assuming {encoding} ({confidence:.0%} confidence)",
UnicodeWarning,
stacklevel=3,
)
if encoding == "UTF-8":
return typing.cast(BinaryIO, decompressed)
else:
return typing.cast(
BinaryIO,
BufferedReader(
typing.cast(
io.RawIOBase,
EncodedFile(
typing.cast(typing.BinaryIO, decompressed),
"UTF-8",
typing.cast(str, det["encoding"]),
),
)
),
)
| StarcoderdataPython |
3357748 | """
determine_data_frequency
"""
import logging
import traceback
from collections import Counter
# @added 20210619 - Feature #4148: analyzer.metrics_manager.resolutions
# Bug #4146: check_data_sparsity - incorrect on low fidelity and inconsistent metrics
# Feature #3870: metrics_manager - check_data_sparsity
from settings import FULLY_POPULATED_PERCENTAGE
# @added 20210419 - Feature #4014: Ionosphere - inference
# Branch #3590: inference
# A common pattern to determine metric_resolution
def determine_data_frequency(current_skyline_app, timeseries, log=False):
"""
Determine the data frequency of a timeseries:
:param current_skyline_app: the app calling the function so the function
knows which log to write too.
:param timeseries: the timeseries as a list [[1618761782, 123],...,[1618848182, 78]]
:param log: whether to log or not, optional, defaults to False
:type current_skyline_app: str
:type timeseries: list
:type log: boolean
:return: metric_resolution
:rtype: int
"""
function_str = 'functions.timeseries.determine_data_frequency'
metric_resolution = None
timestamp_resolutions_count = {}
current_logger = None
if log:
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
try:
# Deal with lower frequency data
# Determine resolution from the last 100 data points
resolution_timestamps = []
# @modified 20210619 - Bug #4146: check_data_sparsity - incorrect on low fidelity and inconsistent metrics
# for metric_datapoint in timeseries[-100:]:
for metric_datapoint in timeseries[-101:]:
timestamp = int(metric_datapoint[0])
resolution_timestamps.append(timestamp)
# @added 20210618 - Bug #4146: check_data_sparsity - incorrect on low fidelity and inconsistent metrics
# Identify and handle low fidelity, inconsistent and
# sparsely populated metrics
fully_populated_sample = False
# @modified 20210619 - Feature #4148: analyzer.metrics_manager.resolutions
# Bug #4146: check_data_sparsity - incorrect on low fidelity and inconsistent metrics
# Feature #3870: metrics_manager - check_data_sparsity
# if len(resolution_timestamps) > 94:
if len(resolution_timestamps) > FULLY_POPULATED_PERCENTAGE:
fully_populated_sample = True
if log:
current_logger.info('%s :: %s :: timeseries sample is fully populated' % (
current_skyline_app, function_str))
# If the sample is not fully populated use the whole
# time series
if not fully_populated_sample:
if log:
current_logger.info('%s :: %s :: timeseries sample is not fully populated, using entire timeseries' % (
current_skyline_app, function_str))
resolution_timestamps = []
for metric_datapoint in timeseries:
timestamp = int(metric_datapoint[0])
resolution_timestamps.append(timestamp)
if log:
current_logger.info('%s :: %s :: calculating resolution from %s timestamp' % (
current_skyline_app, function_str, str(len(resolution_timestamps))))
timestamp_resolutions = []
if resolution_timestamps:
last_timestamp = None
for timestamp in resolution_timestamps:
if last_timestamp:
resolution = timestamp - last_timestamp
timestamp_resolutions.append(resolution)
last_timestamp = timestamp
else:
last_timestamp = timestamp
try:
del resolution_timestamps
except:
pass
if timestamp_resolutions:
timestamp_resolutions_count = {}
try:
timestamp_resolutions_count = Counter(timestamp_resolutions)
if log:
current_logger.info('%s :: %s :: timestamp_resolutions_count: %s' % (
current_skyline_app, function_str,
str(dict(timestamp_resolutions_count))))
ordered_timestamp_resolutions_count = timestamp_resolutions_count.most_common()
metric_resolution = int(ordered_timestamp_resolutions_count[0][0])
if log:
current_logger.info('%s :: %s :: resolution determined as %s' % (
current_skyline_app, function_str, str(resolution)))
except Exception as e:
if log:
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: functions.timeseries.determine_data_frequency :: failed to determine metric_resolution from timeseries - %s' % (
current_skyline_app, e))
try:
del timestamp_resolutions
except:
pass
# @added 20210617 - Bug #4146: check_data_sparsity - incorrect on low fidelity and inconsistent metrics
# Feature #4144: webapp - stale_metrics API endpoint
# Feature #4076: CUSTOM_STALE_PERIOD
# Branch #1444: thunder
if timestamp_resolutions_count:
# @added 20210618 - Bug #4146: check_data_sparsity - incorrect on low fidelity and inconsistent metrics
# Identify low fidelity and inconsistent
# frequency metrics and set the metric
# resolution according
if len(dict(timestamp_resolutions_count)) > 1:
timestamp_resolutions_count_dict = dict(timestamp_resolutions_count)
timestamp_resolutions = []
resolution_counts = []
for resolution in list(timestamp_resolutions_count_dict.keys()):
timestamp_resolutions.append([resolution, timestamp_resolutions_count_dict[resolution]])
resolution_counts.append(timestamp_resolutions_count_dict[resolution])
# If there is equal counts for each resolution
# use the highest resolution/frequency
if len(list(set(resolution_counts))) == 1:
sorted_timestamp_resolutions = sorted(timestamp_resolutions, key=lambda x: (x[1], x[0]))
metric_resolution = sorted_timestamp_resolutions[0][0]
if log:
current_logger.info('%s :: %s :: equal resolution counts - resolution adjusted to %s' % (
current_skyline_app, function_str, str(resolution)))
# If there are multiple resolutions/frequencies
# set the metric resolution to the
# highest resolution with the highest
# count, e.g,
# {2700: 4, 900: 25, 1500: 6, 1200: 4, 3300: 1, 300: 25, 600: 22, 1800: 4, 2400: 1, 3600: 1}
# Should be set to 300
if len(list(set(resolution_counts))) > 1:
max_count = max([count for resolution, count in ordered_timestamp_resolutions_count])
metric_resolution = min([resolution for resolution, count in ordered_timestamp_resolutions_count if count == max_count])
if log:
current_logger.info('%s :: %s :: multiple resolution counts - resolution adjusted to %s' % (
current_skyline_app, function_str, str(resolution)))
# @added 20201215 - Feature #3870: metrics_manager - check_data_sparsity
# Handle the slight variances that occur in real time
# metric streams
if metric_resolution and metric_resolution != 60:
if metric_resolution in range(51, 69):
metric_resolution = 60
if log:
current_logger.info('%s :: %s :: resolution in range(51, 69) - resolution adjusted to %s' % (
current_skyline_app, function_str, str(resolution)))
except Exception as e:
if log:
current_logger.error(traceback.format_exc())
current_logger.error('error :: %s :: functions.timeseries.determine_data_frequency :: failed to determine metric_resolution from timeseries - %s' % (
current_skyline_app, e))
if current_skyline_app == 'analyzer':
return metric_resolution, dict(timestamp_resolutions_count)
return metric_resolution
| StarcoderdataPython |
1628859 | <filename>mtl_coherency.py
import time
import os
import operator
import random
import datetime
import logging
import sys
import argparse
import numpy as np
import pandas as pd
from copy import deepcopy
from collections import Counter
from ast import literal_eval
from tqdm import tqdm, trange
from nltk.corpus import stopwords
from sklearn.metrics import mean_squared_error, f1_score, accuracy_score, label_ranking_average_precision_score, confusion_matrix, average_precision_score
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import torchtext as tt
from torch.optim.lr_scheduler import MultiStepLR
from torch.utils.data.distributed import DistributedSampler
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset, Dataset)
from torch.nn.modules.distance import CosineSimilarity
from torch.nn.modules import HingeEmbeddingLoss
from model.mtl_models import CosineCoherence, MTL_Model3
from data_preparation import get_dataloader
test_amount = 1
def main():
args = parse_args()
init_logging(args)
# Init randomization
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda != -1:
cuda_device_name = "cuda:{}".format(args.cuda)
device = torch.device(
cuda_device_name if torch.cuda.is_available() else 'cpu')
else:
device = torch.device('cpu') # if torch.cuda.is_available() else 'cpu')
logging.info("Used Device = {}".format(str(device)))
train_datasetfile = os.path.join(
args.datadir, "train", "coherency_dset_{}.txt".format(str(args.task)))
val_datasetfile = os.path.join(
args.datadir, "validation", "coherency_dset_{}.txt".format(str(args.task)))
test_datasetfile = os.path.join(
args.datadir, "test", "coherency_dset_{}.txt".format(str(args.task)))
if args.model == "cosine":
if args.do_train:
logging.info("cannot train the cosine model!, we ignore --do_train ")
args.do_train = False
model = CosineCoherence(args, device).to(device)
elif args.model == "random":
if args.do_train:
logging.info("cannot train the random model!, we ignore --do_train ")
args.do_train = False
model = None
elif args.model == "model-3":
model = MTL_Model3(args, device).to(device)
else:
raise NotImplementedError("specified model is not supported")
logging.info("Used Model = {}".format(str(args.model)))
best_epoch = -1
train_dl = None
val_dl = None
test_dl = None
if args.do_train:
logging.info('load training data from: %s' % train_datasetfile)
train_dl = get_dataloader(train_datasetfile, args)
logging.info('load validation data from: %s' % train_datasetfile)
val_dl = get_dataloader(val_datasetfile, args)
sigma_1 = nn.Parameter(torch.tensor(
args.mtl_sigma, requires_grad=True).to(device))
sigma_2 = nn.Parameter(torch.tensor(
args.mtl_sigma, requires_grad=True).to(device))
if args.loss == "mtl":
optimizer = torch.optim.Adam(list(model.parameters())+[
sigma_1, sigma_2], lr=args.learning_rate)
else:
optimizer = torch.optim.Adam(model.parameters(), lr=args.learning_rate)
hinge = torch.nn.MarginRankingLoss(reduction='none', margin=0.1).to(device)
epoch_scores = dict()
for epoch in trange(args.epochs, desc="Epoch"):
output_model_file_epoch = os.path.join(
args.datadir, "{}_task-{}_loss-{}_epoch-{}.ckpt".format(str(model), str(args.task), str(args.loss), str(epoch)))
for i, ((utts_left, utts_right),
(coh_ixs, (acts_left, acts_right)), (len_u1, len_u2, len_d1, len_d2)) in tqdm(enumerate(train_dl),
total=len(train_dl), desc='Training', postfix="LR: {}".format(args.learning_rate)):
if args.test and i >= test_amount:
break
coh_ixs = coh_ixs.to(device)
coh1, (da1, loss1) = model(utts_left.to(device),
acts_left.to(device),
(len_u1.to(device), len_d1.to(device)))
coh2, (da2, loss2) = model(utts_right.to(device),
acts_right.to(device),
(len_u2.to(device), len_d2.to(device)))
# coh_ixs is of the form [0,1,1,0,1], where 0 indicates the first one is the more coherent one
# for this loss, the input is expected as [1,-1,-1,1,-1], where 1 indicates the first to be coherent, while -1 the second
# therefore, we need to transform the coh_ixs accordingly
loss_coh_ixs = torch.add(torch.add(coh_ixs*(-1), torch.ones(
coh_ixs.size()).to(device))*2, torch.ones(coh_ixs.size()).to(device)*(-1))
loss_da = loss1+loss2
loss_coh = hinge(coh1, coh2, loss_coh_ixs)
if args.loss == "da":
loss = loss_da
elif args.loss == "coh":
loss = hinge(coh1, coh2, loss_coh_ixs)
elif args.loss == "mtl":
loss = torch.div(loss_da, sigma_1**2) + torch.div(loss_coh,
sigma_2**2) + torch.log(sigma_1) + torch.log(sigma_2)
elif args.loss == 'coin':
d = random.uniform(0, 1)
if d < 0.5:
loss = loss_da
else:
loss = loss_coh
elif args.loss == 'sum':
loss = loss_da + loss_coh
optimizer.zero_grad()
loss.mean().backward()
optimizer.step()
# save after every epoch
torch.save(model.state_dict(), output_model_file_epoch)
if args.do_eval:
if train_dl == None:
train_dl = get_dataloader(train_datasetfile, args)
if val_dl == None:
val_dl = get_dataloader(val_datasetfile, args)
test_dl = get_dataloader(test_datasetfile, args)
def _eval_datasource(dl, desc_str):
coh_y_true = []
coh_y_pred = []
da_f1_scores = []
da_y_true = []
da_y_pred = []
for i, ((utts_left, utts_right),
(coh_ixs, (acts_left, acts_right)),
(len_u1, len_u2, len_d1, len_d2)) in tqdm(enumerate(dl),
total=len(dl), desc=desc_str, postfix="LR: {}".format(args.learning_rate)):
# additional code to shorten context/ dialogue
if args.test and i >= test_amount:
break
if model == None: # generate random values
pred = [random.randint(0, 1) for _ in range(coh_ixs.size(0))]
coh_y_pred += pred
coh_y_true += coh_ixs.detach().cpu().numpy().tolist()
else:
coh1, lda1 = model(utts_left.to(device), acts_left.to(
device), (len_u1.to(device), len_d1.to(device)))
coh2, lda2 = model(utts_right.to(device), acts_right.to(
device), (len_u2.to(device), len_d2.to(device)))
_, pred = torch.max(
torch.cat([coh1.unsqueeze(1), coh2.unsqueeze(1)], dim=1), dim=1)
coh_y_pred += pred.detach().cpu().numpy().tolist()
coh_y_true += coh_ixs.detach().cpu().numpy().tolist()
if lda1 != None and lda2 != None:
da1 = lda1[0].view(acts_left.size(
0)*acts_left.size(1)).detach().cpu().numpy()
da2 = lda2[0].view(acts_left.size(
0)*acts_left.size(1)).detach().cpu().numpy()
acts_left = acts_left.view(acts_left.size(
0)*acts_left.size(1)).detach().cpu().numpy()
acts_right = acts_right.view(acts_right.size(
0)*acts_right.size(1)).detach().cpu().numpy()
acts_left, da1 = da_filter_zero(acts_left.tolist(), da1.tolist())
acts_right, da2 = da_filter_zero(acts_right.tolist(), da2.tolist())
da_y_pred += da1 + da2
da_y_true += acts_left + acts_right
return (coh_y_true, coh_y_pred), (da_y_true, da_y_pred)
def _log_dataset_scores(name, coh_y_true, coh_y_pred, da_y_true, da_y_pred):
logging.info("%s size: %d" % (name, len(coh_y_true)))
coh_acc = accuracy_score(coh_y_true, coh_y_pred)
logging.info("%s coherence accuracy: %2.2f" % (name, coh_acc*100))
da_acc = accuracy_score(da_y_true, da_y_pred)
logging.info("%s DA accuracy: %2.2f" % (name, da_acc*100))
da_f1 = f1_score(da_y_true, da_y_pred, average='weighted')
logging.info("%s DA MicroF1: %2.2f" % (name, da_f1*100))
# choose the best epoch
if args.model != "random" and args.model != "cosine" and args.oot_model == None:
best_epoch = 0
if args.best_epoch:
best_epoch = args.best_epoch
else:
best_coh_acc, best_da_acc = None, None
for i in range(args.epochs):
model_file_epoch = os.path.join(args.datadir, "{}_task-{}_loss-{}_epoch-{}.ckpt".format(
str(model), str(args.task), str(args.loss), str(i)))
model.load_state_dict(torch.load(model_file_epoch))
model.to(device)
model.eval()
(coh_y_true, coh_y_pred), (da_y_true, da_y_pred) = _eval_datasource(
val_dl, "Validation {}:".format(i))
if i == 0:
best_coh_acc = accuracy_score(coh_y_true, coh_y_pred)
best_da_acc = accuracy_score(da_y_true, da_y_pred)
elif args.loss == 'da':
curr_da_acc = accuracy_score(da_y_true, da_y_pred)
if curr_da_acc > best_da_acc:
best_epoch = i
elif args.loss == 'coh':
curr_coh_acc = accuracy_score(coh_y_true, coh_y_pred)
if curr_coh_acc > best_coh_acc:
best_epoch = i
elif args.loss == 'mtl' or args.loss == 'coin' or args.loss == 'sum':
curr_coh_acc = accuracy_score(coh_y_true, coh_y_pred)
curr_da_acc = accuracy_score(da_y_true, da_y_pred)
if curr_coh_acc+curr_da_acc > best_coh_acc+best_da_acc:
best_epoch = i
logging.info("Best Epoch = {}".format(best_epoch))
# evaluate all sets on the best epoch
model_file_epoch = os.path.join(args.datadir, "{}_task-{}_loss-{}_epoch-{}.ckpt".format(
str(model), str(args.task), str(args.loss), str(best_epoch)))
model.load_state_dict(torch.load(model_file_epoch))
model.to(device)
model.eval()
elif args.oot_model:
model.load_state_dict(torch.load(args.oot_model))
model.to(device)
model.eval()
datasets = [('train', train_dl), ('validation', val_dl), ('test', test_dl)]
for (name, dl) in datasets:
(coh_y_true, coh_y_pred), (da_y_true, da_y_pred) = _eval_datasource(
dl, "Final Eval {}".format(name))
_log_dataset_scores(name, coh_y_true, coh_y_pred, da_y_true, da_y_pred)
def da_filter_zero(y_true, y_pred):
x = zip(y_true, y_pred)
x = list(filter(lambda y: y[0] != 0, x))
return [yt for (yt, _) in x], [yp for (_, yp) in x]
def init_logging(args):
now = datetime.datetime.now()
proc = "train" if args.do_train else "eval"
if os.path.exists(args.logdir) == False:
os.makedirs(args.logdir)
logfile = os.path.join(args.logdir, 'coherency_{}_{}_loss_{}_task_{}_{}.log'.format(
proc, args.model, args.loss, args.task, now.strftime("%Y-%m-%d-%H-%M-%S")))
for handler in logging.root.handlers[:]:
logging.root.removeHandler(handler)
logging.basicConfig(filename=logfile, filemode='w',
level=logging.DEBUG, format='%(levelname)s:%(message)s')
print("Logging to ", logfile)
logging.info("Used Hyperparameters:")
logging.info("learning_rate = {}".format(args.learning_rate))
logging.info("num_epochs = {}".format(args.epochs))
logging.info("lstm_hidden_sent = {}".format(args.lstm_sent_size))
logging.info("lstm_hidden_utt = {}".format(args.lstm_utt_size))
logging.info("lstm_layers = {}".format(args.lstm_layers))
logging.info("batch_size = {}".format(args.batch_size))
logging.info("dropout probability = {}".format(args.dropout_prob))
logging.info("MTL Sigma Init = {}".format(args.mtl_sigma))
if args.oot_model:
logging.info("using OOT Model = {}".format(args.oot_model))
logging.info("========================")
logging.info("task = {}".format(args.task))
logging.info("loss = {}".format(args.loss))
logging.info("seed = {}".format(args.seed))
logging.info("embedding = {}".format(args.embedding))
logging.info("========================")
def parse_args():
parser = argparse.ArgumentParser()
# This also serves as a kind of configuration object, so some parameters are not ought to be changed (listed below)
parser.add_argument("--datadir",
required=True,
type=str,
help="""The input directory where the files of daily
dialog are located. the folder should have
train/test/validation as subfolders""")
parser.add_argument("--logdir",
default="./logs",
type=str,
help="the folder to save the logfile to.")
parser.add_argument('--seed',
type=int,
default=80591,
help="random seed for initialization")
parser.add_argument('--batch_size',
type=int,
default=128,
help="")
parser.add_argument('--epochs',
type=int,
default=20,
help="amount of epochs")
parser.add_argument('--learning_rate',
type=float,
default=0.0005,
help="")
parser.add_argument('--dropout_prob',
type=float,
default=0.1,
help="the dropout probality for DA classification")
parser.add_argument('--lstm_sent_size',
type=int,
default=128,
help="hidden size for the lstm models")
parser.add_argument('--lstm_utt_size',
type=int,
default=256,
help="hidden size for the lstm models")
parser.add_argument('--mtl_sigma',
type=float,
default=2.0,
help="initialization value for the two sigma values when using MTL Loss")
parser.add_argument('--embedding',
type=str,
default="glove",
help="""from which embedding should the word ids be used.
alternatives: glove """)
parser.add_argument('--model',
type=str,
default="cosine",
help="""with which model the dataset should be trained/evaluated.
alternatives: random | cosine | model-3 """)
parser.add_argument('--loss',
type=str,
default="mtl",
help="""with which loss the dataset should be trained/evaluated.
alternatives: mtl | coin | da | coh """)
parser.add_argument('--task',
required=True,
type=str,
default="up",
help="""for which task the dataset should be created.
alternatives: up (utterance permutation)
us (utterance sampling)
ui (utterance insertion)
hup (half utterance petrurbation) """)
parser.add_argument('--oot_model',
required=False,
type=str,
default=None,
help="""when doing Out-Of-Task evaluations, this provides the model file""")
parser.add_argument('--best_epoch',
type=int,
default=None,
help="when evaluating, tell the best epoch to choose the file")
parser.add_argument('--test',
action='store_true',
help="just do a test run on small amount of data")
parser.add_argument('--cuda',
type=int,
default=-1,
help="which cuda device to take")
parser.add_argument('--do_train',
action='store_true',
help="just do a test run on small amount of data")
parser.add_argument('--do_eval',
action='store_true',
help="just do a test run on small amount of data")
# usually unmodified parameters, keept here to have a config like object
parser.add_argument('--num_classes',
type=int,
default=0,
help="amount of classes 1-4 for DA acts, 0 for none. default: 5 (for DailyDialog), set to 50 for Switchboard")
parser.add_argument('--lstm_layers',
type=int,
default=1,
help="DONT CHANGE. amount of layers for LSTM models")
parser.add_argument('--embedding_dim',
type=int,
default=300,
help="DONT CHANGE. embedding dimension for GloVe vectors")
args = parser.parse_args()
if args.num_classes == 0:
dataset = os.path.split(args.datadir)[-1]
if dataset == "daily_dialog":
args.num_classes = 5
else:
args.num_classes = 50
return args
##########################################
if __name__ == "__main__":
start_time = time.time()
main()
end_time = time.time()
logging.info("Took %5.2f seconds" % (end_time - start_time))
logging.shutdown()
| StarcoderdataPython |
3382587 | <reponame>phillipjhl/artoo_engine<gh_stars>0
#!/usr/bin/env python3
import board
import busio
import time
import adafruit_dht
from datetime import datetime
class DHT_SENSOR:
def __init__(self, sensor_type = "DHT11", temp_format = "F"):
self.sensor_type = sensor_type
self.temp_format = temp_format
if self.sensor_type == "DHT22":
self.dht = adafruit_dht.DHT22(board.D17)
elif self.sensor_type == "DHT11":
self.dht = adafruit_dht.DHT11(board.D17)
else:
raise Exception("Unavailable sensor_type.")
def read_sensor(self):
try:
temp_c = self.dht.temperature
now = datetime.now()
temp_f = temp_c * (9/5) + 32
hum = self.dht.humidity
print(
"Temp: {:.1f}F / {:.1f}C Humidity: {}% ".format(temp_f, temp_c, hum))
if self.temp_format == "C":
return [temp_c, hum, now]
else:
return [temp_f, hum, now]
except RuntimeError as error:
print(error.args[0])
return error.args[0]
# sensor = DHT_SENSOR("DHT22")
# while True:
# data = sensor.read_sensor()
# print(data)
# time.sleep(3.0)'
| StarcoderdataPython |
173374 | <gh_stars>0
from django_filters import rest_framework as fl
from django_filters.filters import CharFilter, NumberFilter
from reviews.models import Title
class TitleFilter(fl.FilterSet):
category = CharFilter(field_name='category__slug',
lookup_expr='contains')
genre = CharFilter(field_name='genre__slug',
lookup_expr='contains')
name = CharFilter(field_name='name',
lookup_expr='contains')
year = NumberFilter(field_name='year',
lookup_expr='exact')
class Meta:
model = Title
fields = ['name', 'year', 'genre', 'category']
| StarcoderdataPython |
3282267 | # -*- coding: utf-8 -*- #
# Copyright 2016 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A command that prints an access token for Application Default Credentials.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
from googlecloudsdk.api_lib.auth import util as auth_util
from googlecloudsdk.calliope import base
from googlecloudsdk.calliope import exceptions as c_exc
from googlecloudsdk.core import http
from googlecloudsdk.core import log
from googlecloudsdk.core import properties
from oauth2client import client
import six
class PrintAccessToken(base.Command):
r"""Print an access token for your current Application Default Credentials.
{command} generates and prints an access token for the current
Application Default Credential (ADC). The ADC can be specified either by
setting the `GOOGLE_APPLICATION_CREDENTIALS` environment variable to the path
of a service account key file (JSON) or using
`gcloud auth application-default login`.
The access token generated by {command} is useful for manually testing
APIs via curl or similar tools.
In order to print details of the access token, such as the associated account
and the token's expiration time in seconds, run:
$ curl https://www.googleapis.com/oauth2/v1/tokeninfo?access_token=\
$(gcloud auth application-default print-access-token)
This command should be used sparingly and for debugging alone.
"""
@staticmethod
def Args(parser):
parser.display_info.AddFormat('value(access_token)')
def Run(self, args):
"""Run the helper command."""
impersonate_service_account = (
properties.VALUES.auth.impersonate_service_account.Get())
if impersonate_service_account:
log.warning(
"Impersonate service account '{}' is detected. This command cannot be"
' used to print the access token for an impersonate account. The '
"token below is still the application default credentials' access "
'token.'.format(impersonate_service_account))
try:
creds = client.GoogleCredentials.get_application_default()
except client.ApplicationDefaultCredentialsError as e:
log.debug(e, exc_info=True)
raise c_exc.ToolException(six.text_type(e))
if creds.create_scoped_required():
creds_type = creds.serialization_data['type']
token_uri_override = properties.VALUES.auth.token_host.Get()
if creds_type == client.SERVICE_ACCOUNT and token_uri_override:
creds = creds.create_scoped([auth_util.CLOUD_PLATFORM_SCOPE],
token_uri=token_uri_override)
else:
creds = creds.create_scoped([auth_util.CLOUD_PLATFORM_SCOPE])
access_token_info = creds.get_access_token(http.Http())
if not access_token_info:
raise c_exc.ToolException(
'No access token could be obtained from the current credentials.')
return access_token_info
| StarcoderdataPython |
81848 | <gh_stars>0
# -*- coding: utf-8 -*-
from typing import Dict, List
# If id <> number indicates an alternate form of the Pokémon.
# https://www.pokemon.com/es/pokedex/, https://bulbagarden.net/
KANTO = [
{'id': '1', 'name': 'Bulbasaur', 'number': '1'},
{'id': '2', 'name': 'Ivysaur', 'number': '2'},
{'id': '3', 'name': 'Venusaur', 'number': '3'},
{'id': '4', 'name': 'Charmander', 'number': '4'},
{'id': '5', 'name': 'Charmeleon', 'number': '5'},
{'id': '6', 'name': 'Charizard', 'number': '6'},
{'id': '7', 'name': 'Squirtle', 'number': '7'},
{'id': '8', 'name': 'Wartortle', 'number': '8'},
{'id': '9', 'name': 'Blastoise', 'number': '9'},
{'id': '10', 'name': 'Caterpie', 'number': '10'},
{'id': '11', 'name': 'Metapod', 'number': '11'},
{'id': '12', 'name': 'Butterfree', 'number': '12'},
{'id': '13', 'name': 'Weedle', 'number': '13'},
{'id': '14', 'name': 'Kakuna', 'number': '14'},
{'id': '15', 'name': 'Beedrill', 'number': '15'},
{'id': '16', 'name': 'Pidgey', 'number': '16'},
{'id': '17', 'name': 'Pidgeotto', 'number': '17'},
{'id': '18', 'name': 'Pidgeot', 'number': '18'},
{'id': '19', 'name': 'Rattata', 'number': '19'},
{'id': '20', 'name': 'Raticate', 'number': '20'},
{'id': '21', 'name': 'Spearow', 'number': '21'},
{'id': '22', 'name': 'Fearow', 'number': '22'},
{'id': '23', 'name': 'Ekans', 'number': '23'},
{'id': '24', 'name': 'Arbok', 'number': '24'},
{'id': '25', 'name': 'Pikachu', 'number': '25'},
{'id': '26', 'name': 'Raichu', 'number': '26'},
{'id': '27', 'name': 'Sandshrew', 'number': '27'},
{'id': '28', 'name': 'Sandslash', 'number': '28'},
{'id': '29', 'name': 'Nidoran♀', 'number': '29'},
{'id': '30', 'name': 'Nidorina', 'number': '30'},
{'id': '31', 'name': 'Nidoqueen', 'number': '31'},
{'id': '32', 'name': 'Nidoran♂', 'number': '32'},
{'id': '33', 'name': 'Nidorino', 'number': '33'},
{'id': '34', 'name': 'Nidoking', 'number': '34'},
{'id': '35', 'name': 'Clefairy', 'number': '35'},
{'id': '36', 'name': 'Clefable', 'number': '36'},
{'id': '37', 'name': 'Vulpix', 'number': '37'},
{'id': '38', 'name': 'Ninetales', 'number': '38'},
{'id': '39', 'name': 'Jigglypuff', 'number': '39'},
{'id': '40', 'name': 'Wigglytuff', 'number': '40'},
{'id': '41', 'name': 'Zubat', 'number': '41'},
{'id': '42', 'name': 'Golbat', 'number': '42'},
{'id': '43', 'name': 'Oddish', 'number': '43'},
{'id': '44', 'name': 'Gloom', 'number': '44'},
{'id': '45', 'name': 'Vileplume', 'number': '45'},
{'id': '46', 'name': 'Paras', 'number': '46'},
{'id': '47', 'name': 'Parasect', 'number': '47'},
{'id': '48', 'name': 'Venonat', 'number': '48'},
{'id': '49', 'name': 'Venomoth', 'number': '49'},
{'id': '50', 'name': 'Diglett', 'number': '50'},
{'id': '51', 'name': 'Dugtrio', 'number': '51'},
{'id': '52', 'name': 'Meowth', 'number': '52'},
{'id': '53', 'name': 'Persian', 'number': '53'},
{'id': '54', 'name': 'Psyduck', 'number': '54'},
{'id': '55', 'name': 'Golduck', 'number': '55'},
{'id': '56', 'name': 'Mankey', 'number': '56'},
{'id': '57', 'name': 'Primeape', 'number': '57'},
{'id': '58', 'name': 'Growlithe', 'number': '58'},
{'id': '59', 'name': 'Arcanine', 'number': '59'},
{'id': '60', 'name': 'Poliwag', 'number': '60'},
{'id': '61', 'name': 'Poliwhirl', 'number': '61'},
{'id': '62', 'name': 'Poliwrath', 'number': '62'},
{'id': '63', 'name': 'Abra', 'number': '63'},
{'id': '64', 'name': 'Kadabra', 'number': '64'},
{'id': '65', 'name': 'Alakazam', 'number': '65'},
{'id': '66', 'name': 'Machop', 'number': '66'},
{'id': '67', 'name': 'Machoke', 'number': '67'},
{'id': '68', 'name': 'Machamp', 'number': '68'},
{'id': '69', 'name': 'Bellsprout', 'number': '69'},
{'id': '70', 'name': 'Weepinbell', 'number': '70'},
{'id': '71', 'name': 'Victreebel', 'number': '71'},
{'id': '72', 'name': 'Tentacool', 'number': '72'},
{'id': '73', 'name': 'Tentacruel', 'number': '73'},
{'id': '74', 'name': 'Geodude', 'number': '74'},
{'id': '75', 'name': 'Graveler', 'number': '75'},
{'id': '76', 'name': 'Golem', 'number': '76'},
{'id': '77', 'name': 'Ponyta', 'number': '77'},
{'id': '78', 'name': 'Rapidash', 'number': '78'},
{'id': '79', 'name': 'Slowpoke', 'number': '79'},
{'id': '80', 'name': 'Slowbro', 'number': '80'},
{'id': '81', 'name': 'Magnemite', 'number': '81'},
{'id': '82', 'name': 'Magneton', 'number': '82'},
{'id': '83', 'name': "Farfetch'd", 'number': '83'},
{'id': '84', 'name': 'Doduo', 'number': '84'},
{'id': '85', 'name': 'Dodrio', 'number': '85'},
{'id': '86', 'name': 'Seel', 'number': '86'},
{'id': '87', 'name': 'Dewgong', 'number': '87'},
{'id': '88', 'name': 'Grimer', 'number': '88'},
{'id': '89', 'name': 'Muk', 'number': '89'},
{'id': '90', 'name': 'Shellder', 'number': '90'},
{'id': '91', 'name': 'Cloyster', 'number': '91'},
{'id': '92', 'name': 'Gastly', 'number': '92'},
{'id': '93', 'name': 'Haunter', 'number': '93'},
{'id': '94', 'name': 'Gengar', 'number': '94'},
{'id': '95', 'name': 'Onix', 'number': '95'},
{'id': '96', 'name': 'Drowzee', 'number': '96'},
{'id': '97', 'name': 'Hypno', 'number': '97'},
{'id': '98', 'name': 'Krabby', 'number': '98'},
{'id': '99', 'name': 'Kingler', 'number': '99'},
{'id': '100', 'name': 'Voltorb', 'number': '100'},
{'id': '101', 'name': 'Electrode', 'number': '101'},
{'id': '102', 'name': 'Exeggcute', 'number': '102'},
{'id': '103', 'name': 'Exeggutor', 'number': '103'},
{'id': '104', 'name': 'Cubone', 'number': '104'},
{'id': '105', 'name': 'Marowak', 'number': '105'},
{'id': '106', 'name': 'Hitmonlee', 'number': '106'},
{'id': '107', 'name': 'Hitmonchan', 'number': '107'},
{'id': '108', 'name': 'Lickitung', 'number': '108'},
{'id': '109', 'name': 'Koffing', 'number': '109'},
{'id': '110', 'name': 'Weezing', 'number': '110'},
{'id': '111', 'name': 'Rhyhorn', 'number': '111'},
{'id': '112', 'name': 'Rhydon', 'number': '112'},
{'id': '113', 'name': 'Chansey', 'number': '113'},
{'id': '114', 'name': 'Tangela', 'number': '114'},
{'id': '115', 'name': 'Kangaskhan', 'number': '115'},
{'id': '116', 'name': 'Horsea', 'number': '116'},
{'id': '117', 'name': 'Seadra', 'number': '117'},
{'id': '118', 'name': 'Goldeen', 'number': '118'},
{'id': '119', 'name': 'Seaking', 'number': '119'},
{'id': '120', 'name': 'Staryu', 'number': '120'},
{'id': '121', 'name': 'Starmie', 'number': '121'},
{'id': '122', 'name': '<NAME>', 'number': '122'},
{'id': '123', 'name': 'Scyther', 'number': '123'},
{'id': '124', 'name': 'Jynx', 'number': '124'},
{'id': '125', 'name': 'Electabuzz', 'number': '125'},
{'id': '126', 'name': 'Magmar', 'number': '126'},
{'id': '127', 'name': 'Pinsir', 'number': '127'},
{'id': '128', 'name': 'Tauros', 'number': '128'},
{'id': '129', 'name': 'Magikarp', 'number': '129'},
{'id': '130', 'name': 'Gyarados', 'number': '130'},
{'id': '131', 'name': 'Lapras', 'number': '131'},
{'id': '131', 'name': 'Ditto', 'number': '131'},
{'id': '131', 'name': 'Eevee', 'number': '131'},
{'id': '131', 'name': 'Vaporeon', 'number': '131'},
{'id': '131', 'name': 'Jolteon', 'number': '131'},
{'id': '131', 'name': 'Flareon', 'number': '131'},
{'id': '131', 'name': 'Porygon', 'number': '131'},
{'id': '131', 'name': 'Omanyte', 'number': '131'},
{'id': '131', 'name': 'Omastar', 'number': '131'},
{'id': '140', 'name': 'Kabuto', 'number': '140'},
{'id': '141', 'name': 'Kabutops', 'number': '141'},
{'id': '142', 'name': 'Aerodactyl', 'number': '142'},
{'id': '143', 'name': 'Snorlax', 'number': '143'},
{'id': '144', 'name': 'Articuno', 'number': '144'},
{'id': '145', 'name': 'Zapdos', 'number': '145'},
{'id': '146', 'name': 'Moltres', 'number': '146'},
{'id': '147', 'name': 'Dratini', 'number': '147'},
{'id': '148', 'name': 'Dragonair', 'number': '148'},
{'id': '149', 'name': 'Dragonite', 'number': '149'},
{'id': '150', 'name': 'Mewtwo', 'number': '150'},
{'id': 'mewtwo-armor', 'name': '<NAME>', 'number': '150'},
{'id': '151', 'name': 'Mew', 'number': '151'},
]
JOHTO = [
{'id': '152', 'name': 'Chikorita', 'number': '152'},
{'id': '153', 'name': 'Bayleef', 'number': '153'},
{'id': '154', 'name': 'Meganium', 'number': '154'},
{'id': '155', 'name': 'Cyndaquil', 'number': '155'},
{'id': '156', 'name': 'Quilava', 'number': '156'},
{'id': '157', 'name': 'Typhlosion', 'number': '157'},
{'id': '158', 'name': 'Totodile', 'number': '158'},
{'id': '159', 'name': 'Croconaw', 'number': '159'},
{'id': '160', 'name': 'Feraligatr', 'number': '160'},
{'id': '161', 'name': 'Sentret', 'number': '161'},
{'id': '162', 'name': 'Furret', 'number': '162'},
{'id': '163', 'name': 'Hoothoot', 'number': '163'},
{'id': '164', 'name': 'Noctowl', 'number': '164'},
{'id': '165', 'name': 'Ledyba', 'number': '165'},
{'id': '166', 'name': 'Ledian', 'number': '166'},
{'id': '167', 'name': 'Spinarak', 'number': '167'},
{'id': '168', 'name': 'Ariados', 'number': '168'},
{'id': '169', 'name': 'Crobat', 'number': '169'},
{'id': '170', 'name': 'Chinchou', 'number': '170'},
{'id': '171', 'name': 'Lanturn', 'number': '171'},
{'id': '172', 'name': 'Pichu', 'number': '172'},
{'id': '173', 'name': 'Cleffa', 'number': '173'},
{'id': '174', 'name': 'Igglybuff', 'number': '174'},
{'id': '175', 'name': 'Togepi', 'number': '175'},
{'id': '176', 'name': 'Togetic', 'number': '176'},
{'id': '177', 'name': 'Natu', 'number': '177'},
{'id': '178', 'name': 'Xatu', 'number': '178'},
{'id': '179', 'name': 'Mareep', 'number': '179'},
{'id': '180', 'name': 'Flaaffy', 'number': '180'},
{'id': '181', 'name': 'Ampharos', 'number': '181'},
{'id': '182', 'name': 'Bellossom', 'number': '182'},
{'id': '183', 'name': 'Marill', 'number': '183'},
{'id': '184', 'name': 'Azumarill', 'number': '184'},
{'id': '185', 'name': 'Sudowoodo', 'number': '185'},
{'id': '186', 'name': 'Politoed', 'number': '186'},
{'id': '187', 'name': 'Hoppip', 'number': '187'},
{'id': '188', 'name': 'Skiploom', 'number': '188'},
{'id': '189', 'name': 'Jumpluff', 'number': '189'},
{'id': '190', 'name': 'Aipom', 'number': '190'},
{'id': '191', 'name': 'Sunkern', 'number': '191'},
{'id': '192', 'name': 'Sunflora', 'number': '192'},
{'id': '193', 'name': 'Yanma', 'number': '193'},
{'id': '194', 'name': 'Wooper', 'number': '194'},
{'id': '195', 'name': 'Quagsire', 'number': '195'},
{'id': '196', 'name': 'Espeon', 'number': '196'},
{'id': '197', 'name': 'Umbreon', 'number': '197'},
{'id': '198', 'name': 'Murkrow', 'number': '198'},
{'id': '199', 'name': 'Slowking', 'number': '199'},
{'id': '200', 'name': 'Misdreavus', 'number': '200'},
{'id': 'unown-a', 'name': 'Unown A', 'number': '201'},
{'id': 'unown-b', 'name': 'Unown B', 'number': '201'},
{'id': 'unown-c', 'name': 'Unown C', 'number': '201'},
{'id': 'unown-d', 'name': 'Unown D', 'number': '201'},
{'id': 'unown-e', 'name': 'Unown E', 'number': '201'},
{'id': 'unown-f', 'name': 'Unown F', 'number': '201'},
{'id': 'unown-g', 'name': 'Unown G', 'number': '201'},
{'id': 'unown-h', 'name': 'Unown H', 'number': '201'},
{'id': 'unown-i', 'name': 'Unown I', 'number': '201'},
{'id': 'unown-j', 'name': 'Unown J', 'number': '201'},
{'id': 'unown-k', 'name': 'Unown K', 'number': '201'},
{'id': 'unown-l', 'name': 'Unown L', 'number': '201'},
{'id': 'unown-m', 'name': 'Unown M', 'number': '201'},
{'id': 'unown-n', 'name': 'Unown N', 'number': '201'},
{'id': 'unown-o', 'name': 'Unown O', 'number': '201'},
{'id': 'unown-p', 'name': 'Unown P', 'number': '201'},
{'id': 'unown-q', 'name': 'Unown Q', 'number': '201'},
{'id': 'unown-r', 'name': 'Unown R', 'number': '201'},
{'id': 'unown-s', 'name': 'Unown S', 'number': '201'},
{'id': 'unown-t', 'name': 'Unown T', 'number': '201'},
{'id': 'unown-u', 'name': 'Unown U', 'number': '201'},
{'id': 'unown-v', 'name': 'Unown V', 'number': '201'},
{'id': 'unown-w', 'name': 'Unown W', 'number': '201'},
{'id': 'unown-x', 'name': 'Unown X', 'number': '201'},
{'id': 'unown-y', 'name': 'Unown Y', 'number': '201'},
{'id': 'unown-z', 'name': 'Unown Z', 'number': '201'},
{'id': '202', 'name': 'Wobbuffet', 'number': '202'},
{'id': '203', 'name': 'Girafarig', 'number': '203'},
{'id': '204', 'name': 'Pineco', 'number': '204'},
{'id': '205', 'name': 'Forretress', 'number': '205'},
{'id': '206', 'name': 'Dunsparce', 'number': '206'},
{'id': '207', 'name': 'Gligar', 'number': '207'},
{'id': '208', 'name': 'Steelix', 'number': '208'},
{'id': '209', 'name': 'Snubbull', 'number': '209'},
{'id': '210', 'name': 'Granbull', 'number': '210'},
{'id': '211', 'name': 'Qwilfish', 'number': '211'},
{'id': '212', 'name': 'Scizor', 'number': '212'},
{'id': '213', 'name': 'Shuckle', 'number': '213'},
{'id': '214', 'name': 'Heracross', 'number': '214'},
{'id': '215', 'name': 'Sneasel', 'number': '215'},
{'id': '216', 'name': 'Teddiursa', 'number': '216'},
{'id': '217', 'name': 'Ursaring', 'number': '217'},
{'id': '218', 'name': 'Slugma', 'number': '218'},
{'id': '219', 'name': 'Magcargo', 'number': '219'},
{'id': '220', 'name': 'Swinub', 'number': '220'},
{'id': '221', 'name': 'Piloswine', 'number': '221'},
{'id': '222', 'name': 'Corsola', 'number': '222'},
{'id': '223', 'name': 'Remoraid', 'number': '223'},
{'id': '224', 'name': 'Octillery', 'number': '224'},
{'id': '225', 'name': 'Delibird', 'number': '225'},
{'id': '226', 'name': 'Mantine', 'number': '226'},
{'id': '227', 'name': 'Skarmory', 'number': '227'},
{'id': '228', 'name': 'Houndour', 'number': '228'},
{'id': '229', 'name': 'Houndoom', 'number': '229'},
{'id': '230', 'name': 'Kingdra', 'number': '230'},
{'id': '231', 'name': 'Phanpy', 'number': '231'},
{'id': '232', 'name': 'Donphan', 'number': '232'},
{'id': '233', 'name': 'Porygon2', 'number': '233'},
{'id': '234', 'name': 'Stantler', 'number': '234'},
{'id': '235', 'name': 'Smeargle', 'number': '235'},
{'id': '236', 'name': 'Tyrogue', 'number': '236'},
{'id': '237', 'name': 'Hitmontop', 'number': '237'},
{'id': '238', 'name': 'Smoochum', 'number': '238'},
{'id': '239', 'name': 'Elekid', 'number': '239'},
{'id': '240', 'name': 'Magby', 'number': '240'},
{'id': '241', 'name': 'Miltank', 'number': '241'},
{'id': '242', 'name': 'Blissey', 'number': '242'},
{'id': '243', 'name': 'Raikou', 'number': '243'},
{'id': '244', 'name': 'Entei', 'number': '244'},
{'id': '245', 'name': 'Suicune', 'number': '245'},
{'id': '246', 'name': 'Larvitar', 'number': '246'},
{'id': '247', 'name': 'Pupitar', 'number': '247'},
{'id': '248', 'name': 'Tyranitar', 'number': '248'},
{'id': '249', 'name': 'Lugia', 'number': '249'},
{'id': '250', 'name': 'Ho-Oh', 'number': '250'},
{'id': '251', 'name': 'Celebi', 'number': '251'},
]
HOENN = [
{'id': 'unown-em', 'name': 'Unown !', 'number': '201'},
{'id': 'unown-qm', 'name': 'Unown ?', 'number': '201'},
{'id': '252', 'name': 'Treecko', 'number': '252'},
{'id': '253', 'name': 'Grovyle', 'number': '253'},
{'id': '254', 'name': 'Sceptile', 'number': '254'},
{'id': '255', 'name': 'Torchic', 'number': '255'},
{'id': '256', 'name': 'Combusken', 'number': '256'},
{'id': '257', 'name': 'Blaziken', 'number': '257'},
{'id': '258', 'name': 'Mudkip', 'number': '258'},
{'id': '259', 'name': 'Marshtomp', 'number': '259'},
{'id': '260', 'name': 'Swampert', 'number': '260'},
{'id': '261', 'name': 'Poochyena', 'number': '261'},
{'id': '262', 'name': 'Mightyena', 'number': '262'},
{'id': '263', 'name': 'Zigzagoon', 'number': '263'},
{'id': '264', 'name': 'Linoone', 'number': '264'},
{'id': '265', 'name': 'Wurmple', 'number': '265'},
{'id': '266', 'name': 'Silcoon', 'number': '266'},
{'id': '267', 'name': 'Beautifly', 'number': '267'},
{'id': '268', 'name': 'Cascoon', 'number': '268'},
{'id': '269', 'name': 'Dustox', 'number': '269'},
{'id': '270', 'name': 'Lotad', 'number': '270'},
{'id': '271', 'name': 'Lombre', 'number': '271'},
{'id': '272', 'name': 'Ludicolo', 'number': '272'},
{'id': '273', 'name': 'Seedot', 'number': '273'},
{'id': '274', 'name': 'Nuzleaf', 'number': '274'},
{'id': '275', 'name': 'Shiftry', 'number': '275'},
{'id': '276', 'name': 'Taillow', 'number': '276'},
{'id': '277', 'name': 'Swellow', 'number': '277'},
{'id': '278', 'name': 'Wingull', 'number': '278'},
{'id': '279', 'name': 'Pelipper', 'number': '279'},
{'id': '280', 'name': 'Ralts', 'number': '280'},
{'id': '281', 'name': 'Kirlia', 'number': '281'},
{'id': '282', 'name': 'Gardevoir', 'number': '282'},
{'id': '283', 'name': 'Surskit', 'number': '283'},
{'id': '284', 'name': 'Masquerain', 'number': '284'},
{'id': '285', 'name': 'Shroomish', 'number': '285'},
{'id': '286', 'name': 'Breloom', 'number': '286'},
{'id': '287', 'name': 'Slakoth', 'number': '287'},
{'id': '288', 'name': 'Vigoroth', 'number': '288'},
{'id': '289', 'name': 'Slaking', 'number': '289'},
{'id': '290', 'name': 'Nincada', 'number': '290'},
{'id': '291', 'name': 'Ninjask', 'number': '291'},
{'id': '292', 'name': 'Shedinja', 'number': '292'},
{'id': '293', 'name': 'Whismur', 'number': '293'},
{'id': '294', 'name': 'Loudred', 'number': '294'},
{'id': '295', 'name': 'Exploud', 'number': '295'},
{'id': '296', 'name': 'Makuhita', 'number': '296'},
{'id': '297', 'name': 'Hariyama', 'number': '297'},
{'id': '298', 'name': 'Azurill', 'number': '298'},
{'id': '299', 'name': 'Nosepass', 'number': '299'},
{'id': '300', 'name': 'Skitty', 'number': '300'},
{'id': '301', 'name': 'Delcatty', 'number': '301'},
{'id': '302', 'name': 'Sableye', 'number': '302'},
{'id': '303', 'name': 'Mawile', 'number': '303'},
{'id': '304', 'name': 'Aron', 'number': '304'},
{'id': '305', 'name': 'Lairon', 'number': '305'},
{'id': '306', 'name': 'Aggron', 'number': '306'},
{'id': '307', 'name': 'Meditite', 'number': '307'},
{'id': '308', 'name': 'Medicham', 'number': '308'},
{'id': '309', 'name': 'Electrike', 'number': '309'},
{'id': '310', 'name': 'Manectric', 'number': '310'},
{'id': '311', 'name': 'Plusle', 'number': '311'},
{'id': '312', 'name': 'Minun', 'number': '312'},
{'id': '313', 'name': 'Volbeat', 'number': '313'},
{'id': '314', 'name': 'Illumise', 'number': '314'},
{'id': '315', 'name': 'Roselia', 'number': '315'},
{'id': '316', 'name': 'Gulpin', 'number': '316'},
{'id': '317', 'name': 'Swalot', 'number': '317'},
{'id': '318', 'name': 'Carvanha', 'number': '318'},
{'id': '319', 'name': 'Sharpedo', 'number': '319'},
{'id': '320', 'name': 'Wailmer', 'number': '320'},
{'id': '321', 'name': 'Wailord', 'number': '321'},
{'id': '322', 'name': 'Numel', 'number': '322'},
{'id': '323', 'name': 'Camerupt', 'number': '323'},
{'id': '324', 'name': 'Torkoal', 'number': '324'},
{'id': '325', 'name': 'Spoink', 'number': '325'},
{'id': '326', 'name': 'Grumpig', 'number': '326'},
{'id': '327', 'name': 'Spinda', 'number': '327'},
{'id': '328', 'name': 'Trapinch', 'number': '328'},
{'id': '329', 'name': 'Vibrava', 'number': '329'},
{'id': '330', 'name': 'Flygon', 'number': '330'},
{'id': '331', 'name': 'Cacnea', 'number': '331'},
{'id': '332', 'name': 'Cacturne', 'number': '332'},
{'id': '333', 'name': 'Swablu', 'number': '333'},
{'id': '334', 'name': 'Altaria', 'number': '334'},
{'id': '335', 'name': 'Zangoose', 'number': '335'},
{'id': '336', 'name': 'Seviper', 'number': '336'},
{'id': '337', 'name': 'Lunatone', 'number': '337'},
{'id': '338', 'name': 'Solrock', 'number': '338'},
{'id': '339', 'name': 'Barboach', 'number': '339'},
{'id': '340', 'name': 'Whiscash', 'number': '340'},
{'id': '341', 'name': 'Corphish', 'number': '341'},
{'id': '342', 'name': 'Crawdaunt', 'number': '342'},
{'id': '343', 'name': 'Baltoy', 'number': '343'},
{'id': '344', 'name': 'Claydol', 'number': '344'},
{'id': '345', 'name': 'Lileep', 'number': '345'},
{'id': '346', 'name': 'Cradily', 'number': '346'},
{'id': '347', 'name': 'Anorith', 'number': '347'},
{'id': '348', 'name': 'Armaldo', 'number': '348'},
{'id': '349', 'name': 'Feebas', 'number': '349'},
{'id': '350', 'name': 'Milotic', 'number': '350'},
{'id': '351', 'name': 'Normal Castform', 'number': '351'},
{'id': 'castform-rainy', 'name': 'Rainy Castform', 'number': '351'},
{'id': 'castform-snowy', 'name': 'Snowy Castform', 'number': '351'},
{'id': 'castform-sunny', 'name': 'Sunny Castform', 'number': '351'},
{'id': '352', 'name': 'Kecleon', 'number': '352'},
{'id': '353', 'name': 'Shuppet', 'number': '353'},
{'id': '354', 'name': 'Banette', 'number': '354'},
{'id': '355', 'name': 'Duskull', 'number': '355'},
{'id': '356', 'name': 'Dusclops', 'number': '356'},
{'id': '357', 'name': 'Tropius', 'number': '357'},
{'id': '358', 'name': 'Chimecho', 'number': '358'},
{'id': '359', 'name': 'Absol', 'number': '359'},
{'id': '360', 'name': 'Wynaut', 'number': '360'},
{'id': '361', 'name': 'Snorunt', 'number': '361'},
{'id': '362', 'name': 'Glalie', 'number': '362'},
{'id': '363', 'name': 'Spheal', 'number': '363'},
{'id': '364', 'name': 'Sealeo', 'number': '364'},
{'id': '365', 'name': 'Walrein', 'number': '365'},
{'id': '366', 'name': 'Clamperl', 'number': '366'},
{'id': '367', 'name': 'Huntail', 'number': '367'},
{'id': '368', 'name': 'Gorebyss', 'number': '368'},
{'id': '369', 'name': 'Relicanth', 'number': '369'},
{'id': '370', 'name': 'Luvdisc', 'number': '370'},
{'id': '371', 'name': 'Bagon', 'number': '371'},
{'id': '372', 'name': 'Shelgon', 'number': '372'},
{'id': '373', 'name': 'Salamence', 'number': '373'},
{'id': '374', 'name': 'Beldum', 'number': '374'},
{'id': '375', 'name': 'Metang', 'number': '375'},
{'id': '376', 'name': 'Metagross', 'number': '376'},
{'id': '377', 'name': 'Regirock', 'number': '377'},
{'id': '378', 'name': 'Regice', 'number': '378'},
{'id': '379', 'name': 'Registeel', 'number': '379'},
{'id': '380', 'name': 'Latias', 'number': '380'},
{'id': '381', 'name': 'Latios', 'number': '381'},
{'id': '382', 'name': 'Kyogre', 'number': '382'},
{'id': '383', 'name': 'Groudon', 'number': '383'},
{'id': '384', 'name': 'Rayquaza', 'number': '384'},
{'id': '385', 'name': 'Jirachi', 'number': '385'},
{'id': '386', 'name': 'Deoxys (Normal Forme)', 'number': '386'},
{'id': 'deoxys-attack', 'name': 'Deoxys (Attack Forme)', 'number': '386'},
{'id': 'deoxys-defense', 'name': 'Deoxys (Defense Forme)', 'number': '386'},
{'id': 'deoxys-speed', 'name': 'Deoxys (Speed Forme)', 'number': '386'},
]
SINNOH = [
{'id': '387', 'name': 'Turtwig', 'number': '387'},
{'id': '388', 'name': 'Grotle', 'number': '388'},
{'id': '389', 'name': 'Torterra', 'number': '389'},
{'id': '390', 'name': 'Chimchar', 'number': '390'},
{'id': '391', 'name': 'Monferno', 'number': '391'},
{'id': '392', 'name': 'Infernape', 'number': '392'},
{'id': '393', 'name': 'Piplup', 'number': '393'},
{'id': '394', 'name': 'Prinplup', 'number': '394'},
{'id': '395', 'name': 'Empoleon', 'number': '395'},
{'id': '396', 'name': 'Starly', 'number': '396'},
{'id': '397', 'name': 'Staravia', 'number': '397'},
{'id': '398', 'name': 'Staraptor', 'number': '398'},
{'id': '399', 'name': 'Bidoof', 'number': '399'},
{'id': '400', 'name': 'Bibarel', 'number': '400'},
{'id': '401', 'name': 'Kricketot', 'number': '401'},
{'id': '402', 'name': 'Kricketune', 'number': '402'},
{'id': '403', 'name': 'Shinx', 'number': '403'},
{'id': '404', 'name': 'Luxio', 'number': '404'},
{'id': '405', 'name': 'Luxray', 'number': '405'},
{'id': '406', 'name': 'Budew', 'number': '406'},
{'id': '407', 'name': 'Roserade', 'number': '407'},
{'id': '408', 'name': 'Cranidos', 'number': '408'},
{'id': '409', 'name': 'Rampardos', 'number': '409'},
{'id': '410', 'name': 'Shieldon', 'number': '410'},
{'id': '411', 'name': 'Bastiodon', 'number': '411'},
{'id': 'burmy-plant', 'name': 'Burmy (Plant Cloak)', 'number': '412'},
{'id': 'burmy-sandy', 'name': 'Burmy (Sandy Cloak)', 'number': '412'},
{'id': 'burmy-trash', 'name': 'Burmy (Trash Cloak)', 'number': '412'},
{'id': 'wormadam-plant', 'name': 'Wormadam (Plant Cloak)', 'number': '413'},
{'id': 'wormadam-sandy', 'name': 'Wormadam (Sandy Cloak)', 'number': '413'},
{'id': 'wormadam-trash', 'name': 'Wormadam (Trash Cloak)', 'number': '413'},
{'id': '414', 'name': 'Mothim', 'number': '414'},
{'id': '415', 'name': 'Combee', 'number': '415'},
{'id': '416', 'name': 'Vespiquen', 'number': '416'},
{'id': '417', 'name': 'Pachirisu', 'number': '417'},
{'id': '418', 'name': 'Buizel', 'number': '418'},
{'id': '419', 'name': 'Floatzel', 'number': '419'},
{'id': '420', 'name': 'Cherubi', 'number': '420'},
{'id': 'cherrim-overcast', 'name': 'Cherrim (Overcast Form)', 'number': '421'},
{'id': 'cherrim-sunshine', 'name': 'Cherrim (Sunshine Form)', 'number': '421'},
{'id': 'shellos-west-sea', 'name': 'Shellos (West Sea)', 'number': '422'},
{'id': 'shellos-east-sea', 'name': 'Shellos (East Sea)', 'number': '422'},
{'id': 'gastrodon-west-sea', 'name': 'Gastrodon (West Sea)', 'number': '423'},
{'id': 'gastrodon-east-sea', 'name': 'Gastrodon (East Sea)', 'number': '423'},
{'id': '424', 'name': 'Ambipom', 'number': '424'},
{'id': '425', 'name': 'Drifloon', 'number': '425'},
{'id': '426', 'name': 'Drifblim', 'number': '426'},
{'id': '427', 'name': 'Buneary', 'number': '427'},
{'id': '428', 'name': 'Lopunny', 'number': '428'},
{'id': '429', 'name': 'Mismagius', 'number': '429'},
{'id': '430', 'name': 'Honchkrow', 'number': '430'},
{'id': '431', 'name': 'Glameow', 'number': '431'},
{'id': '432', 'name': 'Purugly', 'number': '432'},
{'id': '433', 'name': 'Chingling', 'number': '433'},
{'id': '434', 'name': 'Stunky', 'number': '434'},
{'id': '435', 'name': 'Skuntank', 'number': '435'},
{'id': '436', 'name': 'Bronzor', 'number': '436'},
{'id': '437', 'name': 'Bronzong', 'number': '437'},
{'id': '438', 'name': 'Bonsly', 'number': '438'},
{'id': '439', 'name': '<NAME>.', 'number': '439'},
{'id': '440', 'name': 'Happiny', 'number': '440'},
{'id': '441', 'name': 'Chatot', 'number': '441'},
{'id': '442', 'name': 'Spiritomb', 'number': '442'},
{'id': '443', 'name': 'Gible', 'number': '443'},
{'id': '444', 'name': 'Gabite', 'number': '444'},
{'id': '445', 'name': 'Garchomp', 'number': '445'},
{'id': '446', 'name': 'Munchlax', 'number': '446'},
{'id': '447', 'name': 'Riolu', 'number': '447'},
{'id': '448', 'name': 'Lucario', 'number': '448'},
{'id': '449', 'name': 'Hippopotas', 'number': '449'},
{'id': '450', 'name': 'Hippowdon', 'number': '450'},
{'id': '451', 'name': 'Skorupi', 'number': '451'},
{'id': '452', 'name': 'Drapion', 'number': '452'},
{'id': '453', 'name': 'Croagunk', 'number': '453'},
{'id': '454', 'name': 'Toxicroak', 'number': '454'},
{'id': '455', 'name': 'Carnivine', 'number': '455'},
{'id': '456', 'name': 'Finneon', 'number': '456'},
{'id': '457', 'name': 'Lumineon', 'number': '457'},
{'id': '458', 'name': 'Mantyke', 'number': '458'},
{'id': '459', 'name': 'Snover', 'number': '459'},
{'id': '460', 'name': 'Abomasnow', 'number': '460'},
{'id': '461', 'name': 'Weavile', 'number': '461'},
{'id': '462', 'name': 'Magnezone', 'number': '462'},
{'id': '463', 'name': 'Lickilicky', 'number': '463'},
{'id': '464', 'name': 'Rhyperior', 'number': '464'},
{'id': '465', 'name': 'Tangrowth', 'number': '465'},
{'id': '466', 'name': 'Electivire', 'number': '466'},
{'id': '467', 'name': 'Magmortar', 'number': '467'},
{'id': '468', 'name': 'Togekiss', 'number': '468'},
{'id': '469', 'name': 'Yanmega', 'number': '469'},
{'id': '470', 'name': 'Leafeon', 'number': '470'},
{'id': '471', 'name': 'Glaceon', 'number': '471'},
{'id': '472', 'name': 'Gliscor', 'number': '472'},
{'id': '473', 'name': 'Mamoswine', 'number': '473'},
{'id': '474', 'name': 'Porygon-Z', 'number': '474'},
{'id': '475', 'name': 'Gallade', 'number': '475'},
{'id': '476', 'name': 'Probopass', 'number': '476'},
{'id': '477', 'name': 'Dusknoir', 'number': '477'},
{'id': '478', 'name': 'Froslass', 'number': '478'},
{'id': '479', 'name': 'Rotom', 'number': '479'},
{'id': 'rotom-fan', 'name': '<NAME>', 'number': '479'},
{'id': 'rotom-frost', 'name': '<NAME>', 'number': '479'},
{'id': 'rotom-heat', 'name': '<NAME>', 'number': '479'},
{'id': 'rotom-mow', 'name': '<NAME>', 'number': '479'},
{'id': 'rotom-wash', 'name': '<NAME>', 'number': '479'},
{'id': '480', 'name': 'Uxie', 'number': '480'},
{'id': '481', 'name': 'Mesprit', 'number': '481'},
{'id': '482', 'name': 'Azelf', 'number': '482'},
{'id': '483', 'name': 'Dialga', 'number': '483'},
{'id': '484', 'name': 'Palkia', 'number': '484'},
{'id': '485', 'name': 'Heatran', 'number': '485'},
{'id': '486', 'name': 'Regigigas', 'number': '486'},
{'id': 'giratina-altered', 'name': 'Giratina (Altered Forme)', 'number': '487'},
{'id': 'giratina-origin', 'name': 'Giratina (Origin Forme)', 'number': '487'},
{'id': '488', 'name': 'Cresselia', 'number': '488'},
{'id': '489', 'name': 'Phione', 'number': '489'},
{'id': '490', 'name': 'Manaphy', 'number': '490'},
{'id': '491', 'name': 'Darkrai', 'number': '491'},
{'id': 'shaymin-land', 'name': 'Shaymin (Land Forme)', 'number': '492'},
{'id': 'shaymin-sky', 'name': 'Shaymin (Sky Forme)', 'number': '492'},
{'id': '493', 'name': 'Arceus', 'number': '493'},
]
UNOVA = [
{'id': '494', 'name': 'Victini', 'number': '494'},
{'id': '495', 'name': 'Snivy', 'number': '495'},
{'id': '496', 'name': 'Servine', 'number': '496'},
{'id': '497', 'name': 'Serperior', 'number': '497'},
{'id': '498', 'name': 'Tepig', 'number': '498'},
{'id': '499', 'name': 'Pignite', 'number': '499'},
{'id': '500', 'name': 'Emboar', 'number': '500'},
{'id': '501', 'name': 'Oshawott', 'number': '501'},
{'id': '502', 'name': 'Dewott', 'number': '502'},
{'id': '503', 'name': 'Samurott', 'number': '503'},
{'id': '504', 'name': 'Patrat', 'number': '504'},
{'id': '505', 'name': 'Watchog', 'number': '505'},
{'id': '506', 'name': 'Lillipup', 'number': '506'},
{'id': '507', 'name': 'Herdier', 'number': '507'},
{'id': '508', 'name': 'Stoutland', 'number': '508'},
{'id': '509', 'name': 'Purrloin', 'number': '509'},
{'id': '510', 'name': 'Liepard', 'number': '510'},
{'id': '511', 'name': 'Pansage', 'number': '511'},
{'id': '512', 'name': 'Simisage', 'number': '512'},
{'id': '513', 'name': 'Pansear', 'number': '513'},
{'id': '514', 'name': 'Simisear', 'number': '514'},
{'id': '515', 'name': 'Panpour', 'number': '515'},
{'id': '516', 'name': 'Simipour', 'number': '516'},
{'id': '517', 'name': 'Munna', 'number': '517'},
{'id': '518', 'name': 'Musharna', 'number': '518'},
{'id': '519', 'name': 'Pidove', 'number': '519'},
{'id': '520', 'name': 'Tranquill', 'number': '520'},
{'id': '521', 'name': 'Unfezant (Both)', 'number': '521'},
{'id': 'unfezant-male', 'name': 'Unfezant (Male)', 'number': '521'},
{'id': 'unfezant-female', 'name': 'Unfezant (Female)', 'number': '521'},
{'id': '522', 'name': 'Blitzle', 'number': '522'},
{'id': '523', 'name': 'Zebstrika', 'number': '523'},
{'id': '524', 'name': 'Roggenrola', 'number': '524'},
{'id': '525', 'name': 'Boldore', 'number': '525'},
{'id': '526', 'name': 'Gigalith', 'number': '526'},
{'id': '527', 'name': 'Woobat', 'number': '527'},
{'id': '528', 'name': 'Swoobat', 'number': '528'},
{'id': '529', 'name': 'Drilbur', 'number': '529'},
{'id': '530', 'name': 'Excadrill', 'number': '530'},
{'id': '531', 'name': 'Audino', 'number': '531'},
{'id': '532', 'name': 'Timburr', 'number': '532'},
{'id': '533', 'name': 'Gurdurr', 'number': '533'},
{'id': '534', 'name': 'Conkeldurr', 'number': '534'},
{'id': '535', 'name': 'Tympole', 'number': '535'},
{'id': '536', 'name': 'Palpitoad', 'number': '536'},
{'id': '537', 'name': 'Seismitoad', 'number': '537'},
{'id': '538', 'name': 'Throh', 'number': '538'},
{'id': '539', 'name': 'Sawk', 'number': '539'},
{'id': '540', 'name': 'Sewaddle', 'number': '540'},
{'id': '541', 'name': 'Swadloon', 'number': '541'},
{'id': '542', 'name': 'Leavanny', 'number': '542'},
{'id': '543', 'name': 'Venipede', 'number': '543'},
{'id': '544', 'name': 'Whirlipede', 'number': '544'},
{'id': '545', 'name': 'Scolipede', 'number': '545'},
{'id': '546', 'name': 'Cottonee', 'number': '546'},
{'id': '547', 'name': 'Whimsicott', 'number': '547'},
{'id': '548', 'name': 'Petilil', 'number': '548'},
{'id': '549', 'name': 'Lilligant', 'number': '549'},
{'id': '550', 'name': 'Basculin (Both Froms)', 'number': '550'},
{'id': 'basculin-red', 'name': 'Basculin (Red-Striped Form)', 'number': '550'},
{'id': 'basculin-blue', 'name': 'Basculin (Blue-Striped Form)', 'number': '550'},
{'id': '551', 'name': 'Sandile', 'number': '551'},
{'id': '552', 'name': 'Krokorok', 'number': '552'},
{'id': '553', 'name': 'Krookodile', 'number': '553'},
{'id': '554', 'name': 'Darumaka', 'number': '554'},
{'id': '555', 'name': 'Darmanitan (Standard Mode)', 'number': '555'},
{'id': 'darmanitan-zen', 'name': 'Darmanitan (Zen Mode)', 'number': '555'},
{'id': '556', 'name': 'Maractus', 'number': '556'},
{'id': '557', 'name': 'Dwebble', 'number': '557'},
{'id': '558', 'name': 'Crustle', 'number': '558'},
{'id': '559', 'name': 'Scraggy', 'number': '559'},
{'id': '560', 'name': 'Scrafty', 'number': '560'},
{'id': '561', 'name': 'Sigilyph', 'number': '561'},
{'id': '562', 'name': 'Yamask', 'number': '562'},
{'id': '563', 'name': 'Cofagrigus', 'number': '563'},
{'id': '564', 'name': 'Tirtouga', 'number': '564'},
{'id': '565', 'name': 'Carracosta', 'number': '565'},
{'id': '566', 'name': 'Archen', 'number': '566'},
{'id': '567', 'name': 'Archeops', 'number': '567'},
{'id': '568', 'name': 'Trubbish', 'number': '568'},
{'id': '569', 'name': 'Garbodor', 'number': '569'},
{'id': '570', 'name': 'Zorua', 'number': '570'},
{'id': '571', 'name': 'Zoroark', 'number': '571'},
{'id': '572', 'name': 'Minccino', 'number': '572'},
{'id': '573', 'name': 'Cinccino', 'number': '573'},
{'id': '574', 'name': 'Gothita', 'number': '574'},
{'id': '575', 'name': 'Gothorita', 'number': '575'},
{'id': '576', 'name': 'Gothitelle', 'number': '576'},
{'id': '577', 'name': 'Solosis', 'number': '577'},
{'id': '578', 'name': 'Duosion', 'number': '578'},
{'id': '579', 'name': 'Reuniclus', 'number': '579'},
{'id': '580', 'name': 'Ducklett', 'number': '580'},
{'id': '581', 'name': 'Swanna', 'number': '581'},
{'id': '582', 'name': 'Vanillite', 'number': '582'},
{'id': '583', 'name': 'Vanillish', 'number': '583'},
{'id': '584', 'name': 'Vanilluxe', 'number': '584'},
{'id': 'deerling-spring', 'name': 'Deerling (Spring Form)', 'number': '585'},
{'id': 'deerling-autumn', 'name': 'Deerling (Autumn Form)', 'number': '585'},
{'id': 'deerling-summer', 'name': 'Deerling (Summer Form)', 'number': '585'},
{'id': 'deerling-winter', 'name': 'Deerling (Winter Form)', 'number': '585'},
{'id': 'sawsbuck-spring', 'name': 'Sawsbuck (Spring Form)', 'number': '586'},
{'id': 'sawsbuck-autumn', 'name': 'Sawsbuck (Autumn Form)', 'number': '586'},
{'id': 'sawsbuck-summer', 'name': 'Sawsbuck (Summer Form)', 'number': '586'},
{'id': 'sawsbuck-winter', 'name': 'Sawsbuck (Winter Form)', 'number': '586'},
{'id': '587', 'name': 'Emolga', 'number': '587'},
{'id': '588', 'name': 'Karrablast', 'number': '588'},
{'id': '589', 'name': 'Escavalier', 'number': '589'},
{'id': '590', 'name': 'Foongus', 'number': '590'},
{'id': '591', 'name': 'Amoonguss', 'number': '591'},
{'id': '592', 'name': 'Frillish (Both)', 'number': '592'},
{'id': 'frillish-male', 'name': 'Frillish (Male)', 'number': '592'},
{'id': 'frillish-female', 'name': 'Frillish (Female)', 'number': '592'},
{'id': '593', 'name': 'Jellicent (Both)', 'number': '593'},
{'id': 'jellicent-male', 'name': 'Jellicent (Male)', 'number': '593'},
{'id': 'jellicent-female', 'name': 'Jellicent (Female)', 'number': '593'},
{'id': '594', 'name': 'Alomomola', 'number': '594'},
{'id': '595', 'name': 'Joltik', 'number': '595'},
{'id': '596', 'name': 'Galvantula', 'number': '596'},
{'id': '597', 'name': 'Ferroseed', 'number': '597'},
{'id': '598', 'name': 'Ferrothorn', 'number': '598'},
{'id': '599', 'name': 'Klink', 'number': '599'},
{'id': '600', 'name': 'Klang', 'number': '600'},
{'id': '601', 'name': 'Klinklang', 'number': '601'},
{'id': '602', 'name': 'Tynamo', 'number': '602'},
{'id': '603', 'name': 'Eelektrik', 'number': '603'},
{'id': '604', 'name': 'Eelektross', 'number': '604'},
{'id': '605', 'name': 'Elgyem', 'number': '605'},
{'id': '606', 'name': 'Beheeyem', 'number': '606'},
{'id': '607', 'name': 'Litwick', 'number': '607'},
{'id': '608', 'name': 'Lampent', 'number': '608'},
{'id': '609', 'name': 'Chandelure', 'number': '609'},
{'id': '610', 'name': 'Axew', 'number': '610'},
{'id': '611', 'name': 'Fraxure', 'number': '611'},
{'id': '612', 'name': 'Haxorus', 'number': '612'},
{'id': '613', 'name': 'Cubchoo', 'number': '613'},
{'id': '614', 'name': 'Beartic', 'number': '614'},
{'id': '615', 'name': 'Cryogonal', 'number': '615'},
{'id': '616', 'name': 'Shelmet', 'number': '616'},
{'id': '617', 'name': 'Accelgor', 'number': '617'},
{'id': '618', 'name': 'Stunfisk', 'number': '618'},
{'id': '619', 'name': 'Mienfoo', 'number': '619'},
{'id': '620', 'name': 'Mienshao', 'number': '620'},
{'id': '621', 'name': 'Druddigon', 'number': '621'},
{'id': '622', 'name': 'Golett', 'number': '622'},
{'id': '623', 'name': 'Golurk', 'number': '623'},
{'id': '624', 'name': 'Pawniard', 'number': '624'},
{'id': '625', 'name': 'Bisharp', 'number': '625'},
{'id': '626', 'name': 'Bouffalant', 'number': '626'},
{'id': '627', 'name': 'Rufflet', 'number': '627'},
{'id': '628', 'name': 'Braviary', 'number': '628'},
{'id': '629', 'name': 'Vullaby', 'number': '629'},
{'id': '630', 'name': 'Mandibuzz', 'number': '630'},
{'id': '631', 'name': 'Heatmor', 'number': '631'},
{'id': '632', 'name': 'Durant', 'number': '632'},
{'id': '633', 'name': 'Deino', 'number': '633'},
{'id': '634', 'name': 'Zweilous', 'number': '634'},
{'id': '635', 'name': 'Hydreigon', 'number': '635'},
{'id': '636', 'name': 'Larvesta', 'number': '636'},
{'id': '637', 'name': 'Volcarona', 'number': '637'},
{'id': '638', 'name': 'Cobalion', 'number': '638'},
{'id': '639', 'name': 'Terrakion', 'number': '639'},
{'id': '640', 'name': 'Virizion', 'number': '640'},
{'id': 'tornadus-incarnate', 'name': 'Tornadus (Incarnate Forme)', 'number': '641'},
{'id': 'tornadus-therian', 'name': 'Tornadus (Therian Forme)', 'number': '641'},
{'id': 'thundurus-incarnate', 'name': 'Thundurus (Incarnate Forme)', 'number': '642'},
{'id': 'thundurus-therian', 'name': 'Thundurus (Therian Forme)', 'number': '642'},
{'id': '643', 'name': 'Reshiram', 'number': '643'},
{'id': '644', 'name': 'Zekrom', 'number': '644'},
{'id': 'landorus-incarnate', 'name': 'Landorus (Incarnate Forme)', 'number': '645'},
{'id': 'landorus-therian', 'name': 'Landorus (Therian Forme)', 'number': '645'},
{'id': '646', 'name': 'Kyurem', 'number': '646'},
{'id': 'kyurem-black', 'name': '<NAME>', 'number': '646'},
{'id': 'kyurem-white', 'name': 'White Kyurem', 'number': '646'},
{'id': '647', 'name': 'Keldeo (Ordinary Form)', 'number': '647'},
{'id': 'keldeo-resolute', 'name': 'Keldeo (Resolute Form)', 'number': '647'},
{'id': 'meloetta-aria', 'name': 'Meloetta (Aria Forme)', 'number': '648'},
{'id': 'meloetta-pirouette', 'name': 'Meloetta (Pirouette Forme)', 'number': '648'},
{'id': '649', 'name': 'Genesect (Normal)', 'number': '649'},
{'id': 'genesect-burn', 'name': 'Genesect (Burn Drive)', 'number': '649'},
{'id': 'genesect-chill', 'name': 'Genesect (Chill Drive)', 'number': '649'},
{'id': 'genesect-douse', 'name': 'Genesect (Douse Drive)', 'number': '649'},
{'id': 'genesect-shock', 'name': 'Genesect (Shock Drive)', 'number': '649'},
]
# https://bulbapedia.bulbagarden.net/wiki/Mega_Evolution#Pok.C3.A9mon_capable_of_Mega_Evolution
KALOS_MEGA = [
{'id': 'venusaur-mega', 'name': 'Mega Venusaur', 'number': '3'},
{'id': 'charizard-mega-x', 'name': 'Mega Charizard X', 'number': '6'},
{'id': 'charizard-mega-y', 'name': 'Mega Charizard Y', 'number': '6'},
{'id': 'blastoise-mega', 'name': 'Mega Blastoise', 'number': '9'},
{'id': 'beedrill-mega', 'name': 'Mega Beedrill', 'number': '15'},
{'id': 'pidgeot-mega', 'name': 'Mega Pidgeot', 'number': '18'},
{'id': 'alakazam-mega', 'name': 'Mega Alakazam', 'number': '65'},
{'id': 'slowbro-mega', 'name': 'Mega Slowbro', 'number': '80'},
{'id': 'gengar-mega', 'name': 'Mega Gengar', 'number': '94'},
{'id': 'kangaskhan-mega', 'name': 'Mega Kangaskhan', 'number': '115'},
{'id': 'pinsir-mega', 'name': 'Mega Pinsir', 'number': '127'},
{'id': 'gyarados-mega', 'name': 'Mega Gyarados', 'number': '130'},
{'id': 'aerodactyl-mega', 'name': 'Mega Aerodactyl', 'number': '142'},
{'id': 'mewtwo-mega-x', 'name': 'Mega Mewtwo X', 'number': '150'},
{'id': 'mewtwo-mega-y', 'name': 'Mega Mewtwo Y', 'number': '150'},
{'id': 'ampharos-mega', 'name': 'Mega Ampharos', 'number': '181'},
{'id': 'steelix-mega', 'name': 'Me<NAME>', 'number': '208'},
{'id': 'scizor-mega', 'name': 'Mega Scizor', 'number': '212'},
{'id': 'heracross-mega', 'name': 'Mega Heracross', 'number': '214'},
{'id': 'houndoom-mega', 'name': 'Mega Houndoom', 'number': '229'},
{'id': 'tyranitar-mega', 'name': 'Mega Tyranitar', 'number': '248'},
{'id': 'sceptile-mega', 'name': 'Mega Sceptile', 'number': '254'},
{'id': 'blaziken-mega', 'name': 'Mega Blaziken', 'number': '257'},
{'id': 'swampert-mega', 'name': 'Mega Swampert', 'number': '260'},
{'id': 'gardevoir-mega', 'name': 'Mega Gardevoir', 'number': '282'},
{'id': 'sableye-mega', 'name': 'Mega Sableye', 'number': '302'},
{'id': 'mawile-mega', 'name': 'Mega Mawile', 'number': '303'},
{'id': 'aggron-mega', 'name': 'Mega Aggron', 'number': '306'},
{'id': 'medicham-mega', 'name': 'Mega Medicham', 'number': '308'},
{'id': 'manectric-mega', 'name': 'Mega Manectric', 'number': '310'},
{'id': 'sharpedo-mega', 'name': 'Mega Sharpedo', 'number': '319'},
{'id': 'camerupt-mega', 'name': 'Mega Camerupt', 'number': '323'},
{'id': 'altaria-mega', 'name': 'Mega Altaria', 'number': '334'},
{'id': 'banette-mega', 'name': 'Mega Banette', 'number': '354'},
{'id': 'absol-mega', 'name': 'Mega Absol', 'number': '359'},
{'id': 'glalie-mega', 'name': 'Me<NAME>', 'number': '362'},
{'id': 'salamence-mega', 'name': 'Me<NAME>', 'number': '373'},
{'id': 'metagross-mega', 'name': 'Mega Metagross', 'number': '376'},
{'id': 'latias-mega', 'name': 'Me<NAME>', 'number': '380'},
{'id': 'latios-mega', 'name': '<NAME>', 'number': '381'},
{'id': 'rayquaza-mega', 'name': '<NAME>', 'number': '384'},
{'id': 'lopunny-mega', 'name': '<NAME>', 'number': '428'},
{'id': 'garchomp-mega', 'name': '<NAME>', 'number': '445'},
{'id': 'lucario-mega', 'name': '<NAME>', 'number': '448'},
{'id': 'abomasnow-mega', 'name': '<NAME>', 'number': '460'},
{'id': 'gallade-mega', 'name': '<NAME>', 'number': '475'},
{'id': 'audino-mega', 'name': '<NAME>', 'number': '531'},
{'id': 'diancie-mega', 'name': '<NAME>', 'number': '719'},
]
# https://bulbapedia.bulbagarden.net/wiki/Primal_Reversion#Pok.C3.A9mon_capable_of_Primal_Reversion
KALOS_PRIMAL_REVERSION = [
{'id': 'kyogre-primal', 'name': '<NAME>', 'number': '382'},
{'id': 'groudon-primal', 'name': '<NAME>', 'number': '383'},
]
KALOS = [
{'id': '650', 'name': 'Chespin', 'number': '650'},
{'id': '651', 'name': 'Quilladin', 'number': '651'},
{'id': '652', 'name': 'Chesnaught', 'number': '652'},
{'id': '653', 'name': 'Fennekin', 'number': '653'},
{'id': '654', 'name': 'Braixen', 'number': '654'},
{'id': '655', 'name': 'Delphox', 'number': '655'},
{'id': '656', 'name': 'Froakie', 'number': '656'},
{'id': '657', 'name': 'Frogadier', 'number': '657'},
{'id': '658', 'name': 'Greninja', 'number': '658'},
{'id': 'greninja-ash', 'name': 'Ash-Greninja', 'number': '658'},
{'id': '659', 'name': 'Bunnelby', 'number': '659'},
{'id': '660', 'name': 'Diggersby', 'number': '660'},
{'id': '661', 'name': 'Fletchling', 'number': '661'},
{'id': '662', 'name': 'Fletchinder', 'number': '662'},
{'id': '663', 'name': 'Talonflame', 'number': '663'},
{'id': '664', 'name': 'Scatterbug', 'number': '664'},
{'id': '665', 'name': 'Spewpa', 'number': '665'},
{'id': 'vivillon-archipelago', 'name': 'Vivillon (Archipelago Pattern)', 'number': '666'},
{'id': 'vivillon-continental', 'name': 'Vivillon (Continental Pattern)', 'number': '666'},
{'id': 'vivillon-elegant', 'name': 'Vivillon (Elegant Pattern)', 'number': '666'},
{'id': 'vivillon-garden', 'name': 'Vivillon (Garden Pattern)', 'number': '666'},
{'id': 'vivillon-high', 'name': 'Vivillon (High Plains Pattern)', 'number': '666'},
{'id': 'vivillon-icy', 'name': 'Vivillon (Icy Snow Pattern)', 'number': '666'},
{'id': 'vivillon-jungle', 'name': 'Vivillon (Jungle Pattern)', 'number': '666'},
{'id': 'vivillon-marine', 'name': 'Vivillon (Marine Pattern)', 'number': '666'},
{'id': 'vivillon-meadow', 'name': 'Vivillon (Meadow Pattern)', 'number': '666'},
{'id': 'vivillon-modern', 'name': 'Vivillon (Modern Pattern)', 'number': '666'},
{'id': 'vivillon-monsoon', 'name': 'Vivillon (Monsoon Pattern)', 'number': '666'},
{'id': 'vivillon-ocean', 'name': 'Vivillon (Ocean Pattern)', 'number': '666'},
{'id': 'vivillon-polar', 'name': 'Vivillon (Polar Pattern)', 'number': '666'},
{'id': 'vivillon-river', 'name': 'Vivillon (River Pattern)', 'number': '666'},
{'id': 'vivillon-sandstorm', 'name': 'Vivillon (Sandstorm Pattern)', 'number': '666'},
{'id': 'vivillon-savanna', 'name': 'Vivillon (Savanna Pattern)', 'number': '666'},
{'id': 'vivillon-sun', 'name': 'Vivillon (Sun Pattern)', 'number': '666'},
{'id': 'vivillon-tundra', 'name': 'Vivillon (Tundra Pattern)', 'number': '666'},
{'id': '667', 'name': 'Litleo', 'number': '667'},
{'id': '668', 'name': 'Pyroar (Both)', 'number': '668'},
{'id': 'pyroar-male', 'name': 'Pyroar (Male)', 'number': '668'},
{'id': 'pyroar-female', 'name': 'Pyroar (Female)', 'number': '668'},
{'id': 'flabebe-red', 'name': 'Flabébé (Red Flower)', 'number': '669'},
{'id': 'flabebe-yellow', 'name': 'Flabébé (Yellow Flower)', 'number': '669'},
{'id': 'flabebe-orange', 'name': 'Flabébé (Orange Flower)', 'number': '669'},
{'id': 'flabebe-blue', 'name': 'Flabébé (Blue Flower)', 'number': '669'},
{'id': 'flabebe-white', 'name': 'Flabébé (White Flower)', 'number': '669'},
{'id': 'floette-red', 'name': 'Floette (Red Flower)', 'number': '670'},
{'id': 'floette-yellow', 'name': 'Floette (Yellow Flower)', 'number': '670'},
{'id': 'floette-orange', 'name': 'Floette (Orange Flower)', 'number': '670'},
{'id': 'floette-blue', 'name': 'Floette (Blue Flower)', 'number': '670'},
{'id': 'floette-white', 'name': 'Floette (White Flower)', 'number': '670'},
{'id': 'florges-red', 'name': 'Florges (Red Flower)', 'number': '671'},
{'id': 'florges-yellow', 'name': 'Florges (Yellow Flower)', 'number': '671'},
{'id': 'florges-orange', 'name': 'Florges (Orange Flower)', 'number': '671'},
{'id': 'florges-blue', 'name': 'Florges (Blue Flower)', 'number': '671'},
{'id': 'florges-white', 'name': 'Florges (White Flower)', 'number': '671'},
{'id': '672', 'name': 'Skiddo', 'number': '672'},
{'id': '673', 'name': 'Gogoat', 'number': '673'},
{'id': '674', 'name': 'Pancham', 'number': '674'},
{'id': '675', 'name': 'Pangoro', 'number': '675'},
{'id': '676', 'name': 'Furfrou (Natural Trim)', 'number': '676'},
{'id': 'furfrou-dandy', 'name': 'Furfrou (Dandy Trim)', 'number': '676'},
{'id': 'furfrou-debutante', 'name': 'Furfrou (Debutante Trim)', 'number': '676'},
{'id': 'furfrou-diamond', 'name': 'Furfrou (Diamond Trim)', 'number': '676'},
{'id': 'furfrou-heart', 'name': 'Furfrou (Heart Trim)', 'number': '676'},
{'id': 'furfrou-kabuki', 'name': 'Furfrou (Kabuki Trim)', 'number': '676'},
{'id': 'furfrou-la-reine', 'name': 'Furfrou (La Reine Trim)', 'number': '676'},
{'id': 'furfrou-matron', 'name': 'Furfrou (Matron Trim)', 'number': '676'},
{'id': 'furfrou-pharaoh', 'name': 'Furfrou (Pharaoh Trim)', 'number': '676'},
{'id': 'furfrou-star', 'name': 'Furfrou (Star Trim)', 'number': '676'},
{'id': '677', 'name': 'Espurr', 'number': '677'},
{'id': '678', 'name': 'Meowstic (Both)', 'number': '678'},
{'id': 'meowstic-male', 'name': 'Meowstic (Male)', 'number': '678'},
{'id': 'meowstic-female', 'name': 'Meowstic (Female)', 'number': '678'},
{'id': '679', 'name': 'Honedge', 'number': '679'},
{'id': '680', 'name': 'Doublade', 'number': '680'},
{'id': '681', 'name': 'Aegislash (Both)', 'number': '681'},
{'id': 'aegislash-blade', 'name': 'Aegislash (Blade Forme)', 'number': '681'},
{'id': 'aegislash-shield', 'name': 'Aegislash (Shield Forme)', 'number': '681'},
{'id': '682', 'name': 'Spritzee', 'number': '682'},
{'id': '683', 'name': 'Aromatisse', 'number': '683'},
{'id': '684', 'name': 'Swirlix', 'number': '684'},
{'id': '685', 'name': 'Slurpuff', 'number': '685'},
{'id': '686', 'name': 'Inkay', 'number': '686'},
{'id': '687', 'name': 'Malamar', 'number': '687'},
{'id': '688', 'name': 'Binacle', 'number': '688'},
{'id': '689', 'name': 'Barbaracle', 'number': '689'},
{'id': '690', 'name': 'Skrelp', 'number': '690'},
{'id': '691', 'name': 'Dragalge', 'number': '691'},
{'id': '692', 'name': 'Clauncher', 'number': '692'},
{'id': '693', 'name': 'Clawitzer', 'number': '693'},
{'id': '694', 'name': 'Helioptile', 'number': '694'},
{'id': '695', 'name': 'Heliolisk', 'number': '695'},
{'id': '696', 'name': 'Tyrunt', 'number': '696'},
{'id': '697', 'name': 'Tyrantrum', 'number': '697'},
{'id': '698', 'name': 'Amaura', 'number': '698'},
{'id': '699', 'name': 'Aurorus', 'number': '699'},
{'id': '700', 'name': 'Sylveon', 'number': '700'},
{'id': '701', 'name': 'Hawlucha', 'number': '701'},
{'id': '702', 'name': 'Dedenne', 'number': '702'},
{'id': '703', 'name': 'Carbink', 'number': '703'},
{'id': '704', 'name': 'Goomy', 'number': '704'},
{'id': '705', 'name': 'Sliggoo', 'number': '705'},
{'id': '706', 'name': 'Goodra', 'number': '706'},
{'id': '707', 'name': 'Klefki', 'number': '707'},
{'id': '708', 'name': 'Phantump', 'number': '708'},
{'id': '709', 'name': 'Trevenant', 'number': '709'},
{'id': 'pumpkaboo-average', 'name': 'Pumpkaboo (Average Size)', 'number': '710'},
{'id': 'pumpkaboo-large', 'name': 'Pumpkaboo (Large Size)', 'number': '710'},
{'id': 'pumpkaboo-small', 'name': 'Pumpkaboo (Small Size)', 'number': '710'},
{'id': 'pumpkaboo-super', 'name': 'Pumpkaboo (Super Size)', 'number': '710'},
{'id': 'gourgeist-average', 'name': 'Gourgeist (Average Size)', 'number': '711'},
{'id': 'gourgeist-large', 'name': 'Gourgeist (Large Size)', 'number': '711'},
{'id': 'gourgeist-small', 'name': 'Gourgeist (Small Size)', 'number': '711'},
{'id': 'gourgeist-super', 'name': 'Gourgeist (Super Size)', 'number': '711'},
{'id': '712', 'name': 'Bergmite', 'number': '712'},
{'id': '713', 'name': 'Avalugg', 'number': '713'},
{'id': '714', 'name': 'Noibat', 'number': '714'},
{'id': '715', 'name': 'Noivern', 'number': '715'},
{'id': '716', 'name': 'Xerneas', 'number': '716'},
{'id': '717', 'name': 'Yveltal', 'number': '717'},
{'id': '718', 'name': 'Zygarde (50% Forme)', 'number': '718'},
{'id': 'zygarde-10', 'name': 'Zygarde (10% Forme)', 'number': '718'},
{'id': 'zygarde-complete', 'name': 'Zygarde (Complete Forme)', 'number': '718'},
{'id': '719', 'name': 'Diancie', 'number': '719'},
{'id': 'hoopa-confined', 'name': 'Hoopa (Confined)', 'number': '720'},
{'id': 'hoopa-unbound', 'name': 'Hoopa (Unbound)', 'number': '720'},
{'id': '721', 'name': 'Volcanion', 'number': '721'},
]
# https://bulbapedia.bulbagarden.net/wiki/Regional_form#List_of_Alolan_Forms
ALOLA_FORMS = [
{'id': 'rattata-alola', 'name': '<NAME>', 'number': '19'},
{'id': 'raticate-alola', 'name': '<NAME>', 'number': '20'},
{'id': 'raichu-alola', 'name': '<NAME>', 'number': '26'},
{'id': 'sandshrew-alola', 'name': '<NAME>', 'number': '27'},
{'id': 'sandslash-alola', 'name': '<NAME>', 'number': '28'},
{'id': 'vulpix-alola', 'name': '<NAME>', 'number': '37'},
{'id': 'ninetales-alola', 'name': '<NAME>', 'number': '38'},
{'id': 'diglett-alola', 'name': '<NAME>', 'number': '50'},
{'id': 'dugtrio-alola', 'name': '<NAME>', 'number': '51'},
{'id': 'meowth-alola', 'name': '<NAME>', 'number': '52'},
{'id': 'persian-alola', 'name': '<NAME>', 'number': '53'},
{'id': 'geodude-alola', 'name': '<NAME>', 'number': '74'},
{'id': 'graveler-alola', 'name': '<NAME>', 'number': '75'},
{'id': 'golem-alola', 'name': '<NAME>', 'number': '76'},
{'id': 'grimer-alola', 'name': '<NAME>', 'number': '88'},
{'id': 'muk-alola', 'name': '<NAME>', 'number': '89'},
{'id': 'exeggutor-alola', 'name': '<NAME>', 'number': '103'},
{'id': 'marowak-alola', 'name': '<NAME>', 'number': '105'},
]
ALOLA = [
{'id': '722', 'name': 'Rowlet', 'number': '722'},
{'id': '723', 'name': 'Dartrix', 'number': '723'},
{'id': '724', 'name': 'Decidueye', 'number': '724'},
{'id': '725', 'name': 'Litten', 'number': '725'},
{'id': '726', 'name': 'Torracat', 'number': '726'},
{'id': '727', 'name': 'Incineroar', 'number': '727'},
{'id': '728', 'name': 'Popplio', 'number': '728'},
{'id': '729', 'name': 'Brionne', 'number': '729'},
{'id': '730', 'name': 'Primarina', 'number': '730'},
{'id': '731', 'name': 'Pikipek', 'number': '731'},
{'id': '732', 'name': 'Trumbeak', 'number': '732'},
{'id': '733', 'name': 'Toucannon', 'number': '733'},
{'id': '734', 'name': 'Yungoos', 'number': '734'},
{'id': '735', 'name': 'Gumshoos', 'number': '735'},
{'id': '736', 'name': 'Grubbin', 'number': '736'},
{'id': '737', 'name': 'Charjabug', 'number': '737'},
{'id': '738', 'name': 'Vikavolt', 'number': '738'},
{'id': '739', 'name': 'Crabrawler', 'number': '739'},
{'id': '740', 'name': 'Crabominable', 'number': '740'},
{'id': 'oricorio-baile', 'name': 'Oricorio (Baile Style)', 'number': '741'},
{'id': 'oricorio-pom-pom', 'name': 'Oricorio (Pom-Pom Style)', 'number': '741'},
{'id': "oricorio-pa'u", 'name': "Oricorio (Pa'u Style)", 'number': '741'},
{'id': 'oricorio-sensu', 'name': 'Oricorio (Sensu Style)', 'number': '741'},
{'id': '742', 'name': 'Cutiefly', 'number': '742'},
{'id': '743', 'name': 'Ribombee', 'number': '743'},
{'id': '744', 'name': 'Rockruff', 'number': '744'},
{'id': '745', 'name': 'Lycanroc (Midday Form)', 'number': '745'},
{'id': 'lycanroc-dusk', 'name': 'Lycanroc (Dusk Form)', 'number': '745'},
{'id': 'lycanroc-midnight', 'name': 'Lycanroc (Midnight Form)', 'number': '745'},
{'id': 'wishiwashi-solo', 'name': 'Wishiwashi (Solo Form)', 'number': '746'},
{'id': 'wishiwashi-school', 'name': 'Wishiwashi (School Form)', 'number': '746'},
{'id': '747', 'name': 'Mareanie', 'number': '747'},
{'id': '748', 'name': 'Toxapex', 'number': '748'},
{'id': '749', 'name': 'Mudbray', 'number': '749'},
{'id': '750', 'name': 'Mudsdale', 'number': '750'},
{'id': '751', 'name': 'Dewpider', 'number': '751'},
{'id': '752', 'name': 'Araquanid', 'number': '752'},
{'id': '753', 'name': 'Fomantis', 'number': '753'},
{'id': '754', 'name': 'Lurantis', 'number': '754'},
{'id': '755', 'name': 'Morelull', 'number': '755'},
{'id': '756', 'name': 'Shiinotic', 'number': '756'},
{'id': '757', 'name': 'Salandit', 'number': '757'},
{'id': '758', 'name': 'Salazzle', 'number': '758'},
{'id': '759', 'name': 'Stufful', 'number': '759'},
{'id': '760', 'name': 'Bewear', 'number': '760'},
{'id': '761', 'name': 'Bounsweet', 'number': '761'},
{'id': '762', 'name': 'Steenee', 'number': '762'},
{'id': '763', 'name': 'Tsareena', 'number': '763'},
{'id': '764', 'name': 'Comfey', 'number': '764'},
{'id': '765', 'name': 'Oranguru', 'number': '765'},
{'id': '766', 'name': 'Passimian', 'number': '766'},
{'id': '767', 'name': 'Wimpod', 'number': '767'},
{'id': '768', 'name': 'Golisopod', 'number': '768'},
{'id': '769', 'name': 'Sandygast', 'number': '769'},
{'id': '770', 'name': 'Palossand', 'number': '770'},
{'id': '771', 'name': 'Pyukumuku', 'number': '771'},
{'id': '772', 'name': 'Type: Null', 'number': '772'},
{'id': '773', 'name': 'Silvally', 'number': '773'},
{'id': '774', 'name': 'Minior (Meteor Form)', 'number': '774'},
{'id': 'minior-core', 'name': 'Minior (Core)', 'number': '774'},
{'id': '775', 'name': 'Komala', 'number': '775'},
{'id': '776', 'name': 'Turtonator', 'number': '776'},
{'id': '777', 'name': 'Togedemaru', 'number': '777'},
{'id': '778', 'name': 'Mimikyu', 'number': '778'},
{'id': '779', 'name': 'Bruxish', 'number': '779'},
{'id': '780', 'name': 'Drampa', 'number': '780'},
{'id': '781', 'name': 'Dhelmise', 'number': '781'},
{'id': '782', 'name': 'Jangmo-o', 'number': '782'},
{'id': '783', 'name': 'Hakamo-o', 'number': '783'},
{'id': '784', 'name': 'Kommo-o', 'number': '784'},
{'id': '785', 'name': '<NAME>', 'number': '785'},
{'id': '786', 'name': '<NAME>', 'number': '786'},
{'id': '787', 'name': '<NAME>', 'number': '787'},
{'id': '788', 'name': '<NAME>', 'number': '788'},
{'id': '789', 'name': 'Cosmog', 'number': '789'},
{'id': '790', 'name': 'Cosmoem', 'number': '790'},
{'id': '791', 'name': 'Solgaleo', 'number': '791'},
{'id': '792', 'name': 'Lunala', 'number': '792'},
{'id': '793', 'name': 'Nihilego', 'number': '793'},
{'id': '794', 'name': 'Buzzwole', 'number': '794'},
{'id': '795', 'name': 'Pheromosa', 'number': '795'},
{'id': '796', 'name': 'Xurkitree', 'number': '796'},
{'id': '797', 'name': 'Celesteela', 'number': '797'},
{'id': '798', 'name': 'Kartana', 'number': '798'},
{'id': '799', 'name': 'Guzzlord', 'number': '799'},
{'id': '800', 'name': 'Necrozma', 'number': '800'},
{'id': 'necrozma-dawn', 'name': '<NAME>', 'number': '800'},
{'id': 'necrozma-dusk', 'name': '<NAME>', 'number': '800'},
{'id': 'necrozma-ultra', 'name': '<NAME>', 'number': '800'},
{'id': '801', 'name': 'Magearna', 'number': '801'},
{'id': '802', 'name': 'Marshadow', 'number': '802'},
{'id': '803', 'name': 'Poipole', 'number': '803'},
{'id': '804', 'name': 'Naganadel', 'number': '804'},
{'id': '805', 'name': 'Stakataka', 'number': '805'},
{'id': '806', 'name': 'Blacephalon', 'number': '806'},
{'id': '807', 'name': 'Zeraora', 'number': '807'},
{'id': '808', 'name': 'Meltan', 'number': '808'},
{'id': '809', 'name': 'Melmetal', 'number': '809'},
]
# https://bulbapedia.bulbagarden.net/wiki/Regional_form#List_of_Galarian_Forms
GALAR_FORMS = [
{'id': 'meowth-galar', 'name': '<NAME>', 'number': '52'},
{'id': 'ponyta-galar', 'name': '<NAME>', 'number': '77'},
{'id': 'rapidash-galar', 'name': 'Galarian Rapidash', 'number': '78'},
{'id': 'slowpoke-galar', 'name': '<NAME>', 'number': '79'},
{'id': 'slowbro-galar', 'name': '<NAME>', 'number': '80'},
{'id': 'farfetchd-galar', 'name': "<NAME>", 'number': '83'},
{'id': 'weezing-galar', 'name': '<NAME>', 'number': '110'},
{'id': 'mr-mime-galar', 'name': 'Galarian Mr. Mime', 'number': '122'},
{'id': 'articuno-galar', 'name': '<NAME>', 'number': '144'},
{'id': 'zapdos-galar', 'name': '<NAME>', 'number': '145'},
{'id': 'moltres-galar', 'name': '<NAME>', 'number': '146'},
{'id': 'slowking-galar', 'name': '<NAME>', 'number': '199'},
{'id': 'corsola-galar', 'name': '<NAME>', 'number': '222'},
{'id': 'zigzagoon-galar', 'name': '<NAME>', 'number': '263'},
{'id': 'linoone-galar', 'name': '<NAME>', 'number': '264'},
{'id': 'darumaka-galar', 'name': '<NAME>', 'number': '554'},
{'id': 'darmanitan-galar', 'name': '<NAME> (Standard Mode)', 'number': '555'},
{'id': 'darmanitan-zen-galar', 'name': '<NAME> (Zen Mode)', 'number': '555'},
{'id': 'yamask-galar', 'name': '<NAME>', 'number': '562'},
{'id': 'stunfisk-galar', 'name': '<NAME>', 'number': '618'},
]
# https://bulbapedia.bulbagarden.net/wiki/Gigantamax#Gigantamax_Pok.C3.A9mon
GALAR_GIGANTAMAX = [
{'id': 'venusaur-gigantamax', 'name': 'Gigantamax Venusaur', 'number': '3'},
{'id': 'charizard-gigantamax', 'name': 'Gigantamax Charizard', 'number': '6'},
{'id': 'blastoise-gigantamax', 'name': 'Gigantamax Blastoise', 'number': '9'},
{'id': 'butterfree-gigantamax', 'name': 'Gigantamax Butterfree', 'number': '12'},
{'id': 'pikachu-gigantamax', 'name': '<NAME>', 'number': '25'},
{'id': 'meowth-gigantamax', 'name': '<NAME>', 'number': '52'},
{'id': 'machamp-gigantamax', 'name': '<NAME>', 'number': '68'},
{'id': 'gengar-gigantamax', 'name': '<NAME>', 'number': '94'},
{'id': 'kingler-gigantamax', 'name': '<NAME>', 'number': '99'},
{'id': 'lapras-gigantamax', 'name': '<NAME>', 'number': '131'},
{'id': 'eevee-gigantamax', 'name': '<NAME>', 'number': '133'},
{'id': 'snorlax-gigantamax', 'name': '<NAME>', 'number': '143'},
{'id': 'garbodor-gigantamax', 'name': '<NAME>', 'number': '569'},
{'id': 'melmetal-gigantamax', 'name': '<NAME>', 'number': '809'},
{'id': 'rillaboom-gigantamax', 'name': '<NAME>', 'number': '812'},
{'id': 'cinderace-gigantamax', 'name': '<NAME>', 'number': '815'},
{'id': 'inteleon-gigantamax', 'name': '<NAME>', 'number': '818'},
{'id': 'corviknight-gigantamax', 'name': '<NAME>', 'number': '823'},
{'id': 'orbeetle-gigantamax', 'name': 'Gigantamax Orbeetle', 'number': '826'},
{'id': 'drednaw-gigantamax', 'name': '<NAME>', 'number': '834'},
{'id': 'coalossal-gigantamax', 'name': '<NAME>', 'number': '839'},
{'id': 'flapple-gigantamax', 'name': '<NAME>', 'number': '841'},
{'id': 'flapple-gigantamax', 'name': '<NAME>', 'number': '842'},
{'id': 'sandaconda-gigantamax', 'name': '<NAME>', 'number': '844'},
{'id': 'toxtricity-gigantamax', 'name': '<NAME>', 'number': '849'},
{'id': 'centiskorch-gigantamax', 'name': '<NAME>', 'number': '851'},
{'id': 'hatterene-gigantamax', 'name': '<NAME>', 'number': '858'},
{'id': 'grimmsnarl-gigantamax', 'name': '<NAME>', 'number': '861'},
{'id': 'alcremie-gigantamax', 'name': '<NAME>', 'number': '869'},
{'id': 'copperajah-gigantamax', 'name': '<NAME>', 'number': '879'},
{'id': 'duraludon-gigantamax', 'name': '<NAME>', 'number': '884'},
{'id': 'urshifu-rapid-gigantamax', 'name': 'Gigantamax Urshifu (Rapid Strike Style)', 'number': '892'},
{'id': 'urshifu-single-gigantamax', 'name': '<NAME>rshifu (Single Strike Style)', 'number': '892'},
]
GALAR = [
{'id': '810', 'name': 'Grookey', 'number': '810'},
{'id': '811', 'name': 'Thwackey', 'number': '811'},
{'id': '812', 'name': 'Rillaboom', 'number': '812'},
{'id': '813', 'name': 'Scorbunny', 'number': '813'},
{'id': '814', 'name': 'Raboot', 'number': '814'},
{'id': '815', 'name': 'Cinderace', 'number': '815'},
{'id': '816', 'name': 'Sobble', 'number': '816'},
{'id': '817', 'name': 'Drizzile', 'number': '817'},
{'id': '818', 'name': 'Inteleon', 'number': '818'},
{'id': '819', 'name': 'Skwovet', 'number': '819'},
{'id': '820', 'name': 'Greedent', 'number': '820'},
{'id': '821', 'name': 'Rookidee', 'number': '821'},
{'id': '822', 'name': 'Corvisquire', 'number': '822'},
{'id': '823', 'name': 'Corviknight', 'number': '823'},
{'id': '824', 'name': 'Blipbug', 'number': '824'},
{'id': '825', 'name': 'Dottler', 'number': '825'},
{'id': '826', 'name': 'Orbeetle', 'number': '826'},
{'id': '827', 'name': 'Nickit', 'number': '827'},
{'id': '828', 'name': 'Thievul', 'number': '828'},
{'id': '829', 'name': 'Gossifleur', 'number': '829'},
{'id': '830', 'name': 'Eldegoss', 'number': '830'},
{'id': '831', 'name': 'Wooloo', 'number': '831'},
{'id': '832', 'name': 'Dubwool', 'number': '832'},
{'id': '833', 'name': 'Chewtle', 'number': '833'},
{'id': '834', 'name': 'Drednaw', 'number': '834'},
{'id': '835', 'name': 'Yamper', 'number': '835'},
{'id': '836', 'name': 'Boltund', 'number': '836'},
{'id': '837', 'name': 'Rolycoly', 'number': '837'},
{'id': '838', 'name': 'Carkol', 'number': '838'},
{'id': '839', 'name': 'Coalossal', 'number': '839'},
{'id': '840', 'name': 'Applin', 'number': '840'},
{'id': '841', 'name': 'Flapple', 'number': '841'},
{'id': '842', 'name': 'Appletun', 'number': '842'},
{'id': '843', 'name': 'Silicobra', 'number': '843'},
{'id': '844', 'name': 'Sandaconda', 'number': '844'},
{'id': '845', 'name': 'Cramorant', 'number': '845'},
{'id': '846', 'name': 'Arrokuda', 'number': '846'},
{'id': '847', 'name': 'Barraskewda', 'number': '847'},
{'id': '848', 'name': 'Toxel', 'number': '848'},
{'id': '849', 'name': 'Toxtricity (Both Forms)', 'number': '849'},
{'id': 'toxtricity-amped', 'name': 'Toxtricity (Amped Form)', 'number': '849'},
{'id': 'toxtricity-low', 'name': 'Toxtricity (Low Key Form)', 'number': '849'},
{'id': '850', 'name': 'Sizzlipede', 'number': '850'},
{'id': '851', 'name': 'Centiskorch', 'number': '851'},
{'id': '852', 'name': 'Clobbopus', 'number': '852'},
{'id': '853', 'name': 'Grapploct', 'number': '853'},
{'id': '854', 'name': 'Sinistea', 'number': '854'},
{'id': '855', 'name': 'Polteageist', 'number': '855'},
{'id': '856', 'name': 'Hatenna', 'number': '856'},
{'id': '857', 'name': 'Hattrem', 'number': '857'},
{'id': '858', 'name': 'Hatterene', 'number': '858'},
{'id': '859', 'name': 'Impidimp', 'number': '859'},
{'id': '860', 'name': 'Morgrem', 'number': '860'},
{'id': '861', 'name': 'Grimmsnarl', 'number': '861'},
{'id': '862', 'name': 'Obstagoon', 'number': '862'},
{'id': '863', 'name': 'Perrserker', 'number': '863'},
{'id': '864', 'name': 'Cursola', 'number': '864'},
{'id': '865', 'name': "Sirfetch'd", 'number': '865'},
{'id': '866', 'name': '<NAME>', 'number': '866'},
{'id': '867', 'name': 'Runerigus', 'number': '867'},
{'id': '868', 'name': 'Milcery', 'number': '868'},
{'id': '869', 'name': 'Alcremie', 'number': '869'},
{'id': '870', 'name': 'Falinks', 'number': '870'},
{'id': '871', 'name': 'Pincurchin', 'number': '871'},
{'id': '872', 'name': 'Snom', 'number': '872'},
{'id': '873', 'name': 'Frosmoth', 'number': '873'},
{'id': '874', 'name': 'Stonjourner', 'number': '874'},
{'id': 'eiscue-ice', 'name': 'Eiscue (Ice Face)', 'number': '875'},
{'id': 'eiscue-noice', 'name': 'Eiscue (Noice Face)', 'number': '875'},
{'id': '876', 'name': 'Indeedee (Both)', 'number': '876'},
{'id': 'indeedee-male', 'name': 'Indeedee (Male)', 'number': '876'},
{'id': 'indeedee-female', 'name': 'Indeedee (Female)', 'number': '876'},
{'id': '877', 'name': 'Morpeko (Both)', 'number': '877'},
{'id': 'morpeko-full', 'name': 'Morpeko (Full Belly Mode)', 'number': '877'},
{'id': 'morpeko-hangry', 'name': 'Morpeko (Hangry Mode)', 'number': '877'},
{'id': '878', 'name': 'Cufant', 'number': '878'},
{'id': '879', 'name': 'Copperajah', 'number': '879'},
{'id': '880', 'name': 'Dracozolt', 'number': '880'},
{'id': '881', 'name': 'Arctozolt', 'number': '881'},
{'id': '882', 'name': 'Dracovish', 'number': '882'},
{'id': '883', 'name': 'Arctovish', 'number': '883'},
{'id': '884', 'name': 'Duraludon', 'number': '884'},
{'id': '885', 'name': 'Dreepy', 'number': '885'},
{'id': '886', 'name': 'Drakloak', 'number': '886'},
{'id': '887', 'name': 'Dragapult', 'number': '887'},
{'id': '888', 'name': 'Zacian (Crowned Sword)', 'number': '888'},
{'id': 'zacian-hero', 'name': 'Zacian (Hero of Many Battles)', 'number': '888'},
{'id': '889', 'name': 'Zamazenta (Crowned Sword)', 'number': '889'},
{'id': 'zamazenta-hero', 'name': 'Zamazenta (Hero of Many Battles)', 'number': '889'},
{'id': '890', 'name': 'Eternatus', 'number': '890'},
{'id': '891', 'name': 'Kubfu', 'number': '891'},
{'id': 'urshifu-rapid', 'name': 'Urshifu (Rapid Strike Style)', 'number': '892'},
{'id': 'urshifu-single', 'name': 'Urshifu (Single Strike Style)', 'number': '892'},
{'id': '893', 'name': 'Zarude', 'number': '893'},
{'id': '894', 'name': 'Regieleki', 'number': '894'},
{'id': '895', 'name': 'Regidrago', 'number': '895'},
{'id': '896', 'name': 'Glastrier', 'number': '896'},
{'id': '897', 'name': 'Spectrier', 'number': '897'},
{'id': '898', 'name': 'Calyrex', 'number': '898'},
{'id': 'calyrex-ice', 'name': 'Ice Rider Calyrex', 'number': '898'},
{'id': 'calyrex-shadow', 'name': 'Shadow Rider Calyrex', 'number': '898'},
]
# Hisui: The name given to the Sinnoh region at the time when the first human settlements took place.
HISUI_FORMS = [
{'id': 'growlithe-hisui', 'name': '<NAME>', 'number': '58'},
{'id': 'arcanine-hisui', 'name': '<NAME>', 'number': '59'},
{'id': 'voltorb-hisui', 'name': '<NAME>', 'number': '100'},
{'id': 'electrode-hisui', 'name': '<NAME>', 'number': '101'},
{'id': 'typhlosion-hisui', 'name': '<NAME>', 'number': '157'},
{'id': 'qwilfish-hisui', 'name': '<NAME>', 'number': '211'},
{'id': 'sneasel-hisui', 'name': '<NAME>', 'number': '215'},
{'id': 'samurott-hisui', 'name': '<NAME>', 'number': '503'},
{'id': 'lilligant-hisui', 'name': '<NAME>', 'number': '549'},
{'id': 'zorua-hisui', 'name': '<NAME>', 'number': '570'},
{'id': 'zoroark-hisui', 'name': '<NAME>', 'number': '571'},
{'id': 'braviary-hisui', 'name': '<NAME>', 'number': '628'},
{'id': 'sliggoo-hisui', 'name': '<NAME>', 'number': '705'},
{'id': 'goodra-hisui', 'name': '<NAME>', 'number': '706'},
{'id': 'avalugg-hisui', 'name': '<NAME>', 'number': '713'},
{'id': 'decidueye-hisui', 'name': '<NAME>', 'number': '724'},
]
HISUI = [
{'id': '899', 'name': 'Wyrdeer', 'number': '899'},
{'id': '900', 'name': 'Kleavor', 'number': '900'},
{'id': '901', 'name': 'Ursaluna', 'number': '901'},
{'id': '902', 'name': 'Basculegion', 'number': '902'},
{'id': '903', 'name': 'Sneasler', 'number': '903'},
{'id': '904', 'name': 'Overqwil', 'number': '904'},
{'id': 'enamorus-incarnate', 'name': 'Enamorus (Incarnate Forme)', 'number': '905'},
{'id': 'enamorus-therian', 'name': 'Enamorus (Therian Forme)', 'number': '905'},
]
def get_region(region: str) -> Dict[str, List[Dict[str, str]]]:
if region == "Kanto":
return {"Local": KANTO}
elif region == "Johto":
return {"Local": JOHTO}
elif region == "Hoenn":
return {"Local": HOENN}
elif region == "Sinnoh":
return {"Local": SINNOH}
elif region == "Unova":
return {"Local": UNOVA}
elif region == "Kalos":
return {"Local": KALOS, "Mega Evolutions": KALOS_MEGA, "Primal Reversion": KALOS_PRIMAL_REVERSION}
elif region == "Alola":
return {"Local": ALOLA, "Alolan Forms": ALOLA_FORMS}
elif region == "Galar":
return {"Local": GALAR, "Galarian Forms": GALAR_FORMS, "Gigantamax Forms": GALAR_GIGANTAMAX}
elif region == "Hisui":
return {"Local": HISUI, "Hisuian Forms": HISUI_FORMS}
else:
return {}
| StarcoderdataPython |
3388016 | <reponame>Zhylkaaa/nboost<filename>nboost/plugins/qa/base.py<gh_stars>0
from typing import Tuple
import time
from nboost.plugins import Plugin
from nboost.delegates import ResponseDelegate
from nboost.database import DatabaseRow
from nboost import defaults
from nboost.logger import set_logger
class QAModelPlugin(Plugin):
def __init__(self,
max_query_length: type(defaults.max_query_length) = defaults.max_query_length,
model_dir: str = defaults.qa_model_dir,
max_seq_len: int = defaults.max_seq_len,
**kwargs):
super().__init__(**kwargs)
self.model_dir = model_dir
self.max_query_length = max_query_length
self.max_seq_len = max_seq_len
self.logger = set_logger('qamodel', verbose=True)
def on_response(self, response: ResponseDelegate, db_row: DatabaseRow):
if response.cvalues:
start_time = time.perf_counter()
responses = []
for idx, cvalue in enumerate(response.cvalues):
answer, start_pos, stop_pos, score = self.get_answer(response.request.query, cvalue)
self.logger.info(f"{response.request.qa_threshold} \t {answer}, {start_pos}, {stop_pos}, {score}")
responses.append({
'answer_text': answer,
'answer_start_pos': start_pos,
'answer_stop_pos': stop_pos,
'answer_score': score,
})
db_row.qa_time = time.perf_counter() - start_time
response.set_path(f'body.nboost.qa', responses)
def get_answer(self, query: str, cvalue: str) -> Tuple[str, int, int, float]:
"""Return answer, start_pos, end_pos, score"""
raise NotImplementedError()
| StarcoderdataPython |
86796 | # -*- coding: utf-8 -*-
"""Module defining Interval model and operations"""
# ActiveState recipe 576816
class Interval(object):
"""
Represents an interval.
Defined as closed interval [start,end), which includes the start and
end positions.
Start and end do not have to be numeric types.
"""
__slots__ = ('_start', '_end')
def __init__(self, start, end):
"Construct, start must be <= end."
if start > end:
raise ValueError('Start (%s) must not be greater than end (%s)' % (start, end))
self._start = start
self._end = end
@property
def start(self):
"""The interval's start"""
return self._start
@property
def end(self):
"""The interval's end"""
return self._end
def __str__(self):
"As string."
return '[%s,%s]' % (self.start, self.end)
def __repr__(self):
"String representation."
return '[%s,%s]' % (self.start, self.end)
def __cmp__(self, other):
"Compare."
if None == other:
return 1
start_cmp = cmp(self.start, other.start)
if 0 != start_cmp:
return start_cmp
else:
return cmp(self.end, other.end)
def __hash__(self):
"Hash."
return hash(self.start) ^ hash(self.end)
def intersection(self, other):
"Intersection. @return: An empty intersection if there is none."
if self > other:
other, self = self, other
if self.end <= other.start:
return Interval(self.start, self.start)
return Interval(other.start, min(self.end, other.end))
def hull(self, other):
"@return: Interval containing both self and other."
if self > other:
other, self = self, other
return Interval(self.start, max(self.end, other.end))
def overlap(self, other):
"@return: True iff self intersects other."
if self > other:
other, self = self, other
return self.end > other.start
def overlapm(self, other):
"@return: True iff selfs overlaps or meets other."
if self > other:
other, self = self, other
return self.end >= other.start
def move(self, offset):
"@return: Interval displaced offset to start and end"
return Interval(self.start + offset, self.end + offset)
def __contains__(self, item):
"@return: True iff item in self."
return self.start <= item and item <= self.end
@property
def zero_in(self):
"@return: True iff 0 in self."
return self.start <= 0 and 0 <= self.end
def subset(self, other):
"@return: True iff self is subset of other."
return self.start >= other.start and self.end <= other.end
def proper_subset(self, other):
"@return: True iff self is proper subset of other."
return self.start > other.start and self.end < other.end
@property
def empty(self):
"@return: True iff self is empty."
return self.start == self.end
@property
def length(self):
"""@return: Difference between end and start"""
return self.end - self.start
@property
def singleton(self):
"@return: True iff self.end - self.start == 1."
return self.end - self.start == 1
def separation(self, other):
"@return: The distance between self and other."
if self > other:
other, self = self, other
if self.end > other.start:
return 0
else:
return other.start - self.end
| StarcoderdataPython |
1619845 | <filename>bakkes_rcon/__init__.py
from .client import *
from .exceptions import *
from .inventory import *
__version__ = '0.1.0'
__all__ = [
'BakkesRconClient',
'Quality',
]
| StarcoderdataPython |
120196 | <filename>src/02/count.py
import sys
n_huruf=0;
n_angka=0;
lines_number = 0
for line in sys.stdin:
for chara in line:
if(chara.isdigit()):
n_angka+=1
elif(chara.isalpha()):
n_huruf+=1
lines_number = lines_number + 1
sys.stdout.write('Jumlah huruf :'+str(n_huruf))
sys.stdout.write('\n')
sys.stdout.write('Jumlah angka :'+str(n_angka))
| StarcoderdataPython |
146975 | <filename>RS_scpFile.py<gh_stars>1-10
import paramiko
#hostname = '10.57.29.175'
hostname = '10.57.29.175'
password = '<PASSWORD>'
username = "coding4"
port = 22
mypath='/Users/alan/Desktop/my_file'
remotepath='/Users/coding4/my_file'
t = paramiko.Transport((hostname, 22))
t.connect(username=username, password=password)
sftp = paramiko.SFTPClient.from_transport(t)
def trasnfer_file(path):
""" Tranfer file in other computer
Args:
path(String): path for tranfer file
"""
sftp.put(path, mypath);
| StarcoderdataPython |
3267261 | # Copyright 2022 The T5X Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Train state for passing around objects during training."""
from typing import Any, Dict, Optional
from flax import core as flax_core
from flax import optim
from flax import struct
from flax.core import scope as flax_scope
import jax
import jax.numpy as jnp
PyTreeDef = type(jax.tree_structure(None))
EMPTY_DICT = flax_core.freeze({})
class TrainState(struct.PyTreeNode):
"""Simple train state for holding parameters, step, optimizer state."""
_optimizer: optim.Optimizer
# Variables related with axes specification.
axes_variables: Optional[flax_scope.FrozenVariableDict] = None
# Flax mutable fields.
flax_mutables: Optional[flax_scope.FrozenDict] = EMPTY_DICT
@property
def step(self) -> jnp.ndarray:
return self._optimizer.state.step
@property
def params(self) -> PyTreeDef:
return self._optimizer.target
@property
def param_states(self) -> PyTreeDef:
return self._optimizer.state.param_states
@property
def optimizer_name(self):
"""Returns the name of the used optimizer."""
return self._optimizer.optimizer_def.__class__.__name__
def state_dict(self) -> Dict[str, Any]:
state_dict = self._optimizer.state_dict()
if self.flax_mutables:
state_dict['flax_mutables'] = flax_core.unfreeze(self.flax_mutables)
return state_dict
def apply_gradient(self,
grads,
learning_rate,
flax_mutables=EMPTY_DICT) -> 'TrainState':
new_optimizer = self._optimizer.apply_gradient(
grads, learning_rate=learning_rate)
return self.replace(_optimizer=new_optimizer, flax_mutables=flax_mutables)
def restore_state(self, state_dict: Dict[str, Any]) -> 'TrainState':
new_optimizer = self._optimizer.restore_state(state_dict)
return self.replace(
_optimizer=new_optimizer,
flax_mutables=flax_core.freeze(state_dict['flax_mutables'])
if 'flax_mutables' in state_dict else EMPTY_DICT)
def update_step(self, step: int) -> 'TrainState':
return self.replace(
_optimizer=self._optimizer.replace(
state=self._optimizer.state.replace(step=step)),
flax_mutables=self.flax_mutables)
@classmethod
def from_flax_optimizer(
cls,
optimizer: optim.Optimizer,
axes_variables: Optional[flax_scope.FrozenVariableDict] = None,
flax_mutables: Optional[flax_scope.FrozenDict] = EMPTY_DICT
) -> 'TrainState':
return cls(
_optimizer=optimizer,
axes_variables=axes_variables,
flax_mutables=flax_mutables)
| StarcoderdataPython |
172741 | import io
from PIL import Image, ImageDraw, ImageFont
from fastapi.param_functions import File
from app.models.schemas.words import (
WordOutWithIdDate
)
from app.resources import strings
from typing import List
# import datetime as dt
from O365 import calendar
import textwrap
""" Return byte buffer if output=='__buffer', else export file 'output' @"""
def create_bitmap_from_word(word: WordOutWithIdDate, output="temp.bmp"):
out = Image.new("1", (800, 480), 255)
fmb60 = ImageFont.truetype(strings.PATH_FONTS_FOLDER +
"freemono/FreeMonoBold.ttf", 60)
fm30 = ImageFont.truetype(strings.PATH_FONTS_FOLDER + "freemono/FreeMono.ttf", 30)
fmi30 = ImageFont.truetype(strings.PATH_FONTS_FOLDER +
"freemono/FreeMonoOblique.ttf", 30)
fmb30 = ImageFont.truetype(strings.PATH_FONTS_FOLDER +
"freemono/FreeMonoBold.ttf", 30)
fmi15 = ImageFont.truetype(strings.PATH_FONTS_FOLDER +
"freemono/FreeMonoOblique.ttf", 15)
# get a drawing context
d = ImageDraw.Draw(out)
d.text((5, 5), word.word, font=fmb60, fill=0)
d.text((5, 60), word.type, font=fmi30, fill=0)
d.line((0, 100, 800, 100), fill=0)
# ---------------------------------------------
d.text((5, 120), word.fullword, font=fm30, fill=0)
# d.multiline_text((5, 160), word.content, font=fmb30, fill=0)
offset = 0
count = 0
for line in textwrap.wrap(str(word.content), break_long_words=False, width=43):
# print(line)
d.text((5, 160 + offset), line, font=fm30, fill=0)
offset += fm30.getsize(line)[1]
count += 1
if count==9:
break
d.line((0, 435, 800, 435), fill=0)
# ---------------------------------------------
d.text((5, 445), "Last update: " +
word.updated_at.strftime("%m/%d/%Y, %H:%M:%S"),
font=fmi15, fill=0
)
if output == "__buffer":
img_byte_arr = io.BytesIO()
out.save(img_byte_arr, format='bmp')
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
elif output == "__image_object":
return out
else:
out.save(strings.PATH_STATIC_FOLDER + output)
# TODO: improve converting and resizing quality. Check "change" pictures in static folders
# For now, stick with: convert to black-white before POST to server.
# https://stackoverflow.com/questions/46385999/transform-an-image-to-a-bitmap
def process_bitmap_from_file(file: File, output="temp.bmp"):
out = Image.open(io.BytesIO(file))
o2 = out.convert('1').resize((800, 480))
if output == "__buffer":
img_byte_arr = io.BytesIO()
o2.save(img_byte_arr, format='bmp')
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
else:
o2.save(strings.PATH_STATIC_FOLDER + output, format='bmp')
def create_bitmap_from_calendar(all_events: List[calendar.Event], output="temp.bmp"):
if len(all_events) == 0:
return None
out = Image.new("1", (800, 480), 255)
fnt = ImageFont.truetype("Pillow/Tests/fonts/FreeMonoBold.ttf", 25)
fnt20 = ImageFont.truetype("Pillow/Tests/fonts/FreeMonoBold.ttf", 20)
fnt10 = ImageFont.truetype("Pillow/Tests/fonts/FreeMonoBold.ttf", 15)
# get a drawing context
d = ImageDraw.Draw(out)
d1 = 130 # hard code distance
y1 = 5 # y position
x1 = 110
d.text((x1 + 0 * d1, y1), "Mon", font=fnt, fill=0)
d.text((x1 + 1 * d1, y1), "Tue", font=fnt, fill=0)
d.text((x1 + 2 * d1, y1), "Wed", font=fnt, fill=0)
d.text((x1 + 3 * d1, y1), "Thurs", font=fnt, fill=0)
d.text((x1 + 4 * d1 + 20, y1), "Frid", font=fnt, fill=0)
# # ---------------------------------------------
d.line((80, 35, 800, 35), fill=0, width=2)
d.line((80, 35, 80, 480), fill=0, width=2)
# # ---------------------------------------------
d2 = 70
x2 = 5
y2 = 27
d.text((x2, y2 + 0 * d2), "08:00", font=fnt20, fill=0)
d.text((x2, y2 + 1 * d2), "10:00", font=fnt20, fill=0)
d.text((x2, y2 + 2 * d2), "12:00", font=fnt20, fill=0)
d.text((x2, y2 + 3 * d2), "14:00", font=fnt20, fill=0)
d.text((x2, y2 + 4 * d2), "16:00", font=fnt20, fill=0)
d.text((x2, y2 + 5 * d2), "18:00", font=fnt20, fill=0)
d.text((x2, y2 + 6 * d2), "20:00", font=fnt20, fill=0)
# # ---------------------------------------------
# horizontal lines
d.line((80, 35 + 1 * d2, 800, 35 + 1 * d2), fill=0, width=1)
d.line((80, 35 + 2 * d2, 800, 35 + 2 * d2), fill=0, width=1)
d.line((80, 35 + 3 * d2, 800, 35 + 3 * d2), fill=0, width=1)
d.line((80, 35 + 4 * d2, 800, 35 + 4 * d2), fill=0, width=1)
d.line((80, 35 + 5 * d2, 800, 35 + 5 * d2), fill=0, width=1)
d.line((80, 35 + 6 * d2, 800, 35 + 6 * d2), fill=0, width=1)
# # ---------------------------------------------
# vertical lines
d.line((80 + 1 * (d1 - 0), 35, 80 + 1 * (d1 - 0), 480), fill=0, width=1)
d.line((80 + 2 * (d1 - 0), 35, 80 + 2 * (d1 - 0), 480), fill=0, width=1)
d.line((80 + 3 * (d1 - 0), 35, 80 + 3 * (d1 - 0), 480), fill=0, width=1)
d.line((80 + 4 * (d1 - 0), 35, 80 + 4 * (d1 - 0), 480), fill=0, width=1)
d.line((80 + 5 * (d1 - 0), 35, 80 + 5 * (d1 - 0), 480), fill=0, width=1)
# # ---------------------------------------------
time_range = [8, 10, 12, 14, 16, 18] # TODO: move to config
for event in all_events:
y = -1
x = -1
for j in range(len(time_range) - 1):
if event.start.hour >= time_range[j] and event.start.hour < time_range[j + 1]:
y = j
break
x = event.start.weekday()
if 0 <= y and y <= 5 and 0 <= x and x <= 4:
# Now draw this event to timetable
offset = 0
for line in textwrap.wrap(event.subject, break_long_words=False, width=11):
d.text((90 + x * d1, 40 + y * d2 + offset), line, font=fnt10, fill=0)
offset += fnt10.getsize(line)[1]
d.text((90 + 30 + x * d1, 87 + y * d2),
event.location['displayName'], font=fnt10, fill=0)
if output == "__buffer":
img_byte_arr = io.BytesIO()
out.save(img_byte_arr, format='bmp')
img_byte_arr = img_byte_arr.getvalue()
return img_byte_arr
else:
out.save(strings.PATH_STATIC_FOLDER + output, format='bmp')
| StarcoderdataPython |
3383349 | from unittest import TestCase
from os.path import dirname, realpath
from fil_io.json import load_single
from jsonschema.exceptions import ValidationError
class TestSchemaValidation(TestCase):
pass
class TestFullSchemaValidation(TestSchemaValidation):
def test_basic_schema(self):
from aws_schema import SchemaValidator
test_item = load_single(
f"{dirname(realpath(__file__))}/test_data/database/item_basic.json"
)
schema_file = (
f"{dirname(realpath(__file__))}//test_data/database/schema_basic.json"
)
validator = SchemaValidator(file=schema_file)
validator.validate(test_item)
def test_basic_schema_wrong_data(self):
from aws_schema import SchemaValidator
test_item = load_single(
f"{dirname(realpath(__file__))}/test_data/database/item_basic_wrong.json"
)
schema_file = (
f"{dirname(realpath(__file__))}/test_data/database/schema_basic.json"
)
validator = SchemaValidator(file=schema_file)
try:
validator.validate(test_item)
self.fail()
except ValidationError:
pass
def test_nested_schema(self):
from os import chdir, getcwd
actual_cwd = getcwd()
chdir(dirname(realpath(__file__)))
try:
from aws_schema import SchemaValidator
test_item = load_single(
f"{dirname(realpath(__file__))}/test_data/database/item_nested.json"
)
schema_file = (
f"{dirname(realpath(__file__))}/test_data/database/schema_nested.json"
)
validator = SchemaValidator(file=schema_file)
validator.validate(test_item)
except BaseException as b:
exc = b
finally:
chdir(actual_cwd)
if "exc" in globals():
raise exc
def test_basic_schema_without_required(self):
from aws_schema import SchemaValidator
test_item = load_single(
f"{dirname(realpath(__file__))}/test_data/database/item_basic.json"
)
test_item.pop("some_float")
schema_file = (
f"{dirname(realpath(__file__))}/test_data/database/schema_basic.json"
)
validator = SchemaValidator(file=schema_file)
validator.validate(test_item, no_required_check=True)
def test_basic_schema_without_required_nested(self):
from aws_schema import SchemaValidator
test_item = load_single(
f"{dirname(realpath(__file__))}/test_data/database/item_basic.json"
)
test_item["some_nested_dict"]["KEY1"].pop("subKEY2")
schema_file = (
f"{dirname(realpath(__file__))}/test_data/database/schema_basic.json"
)
validator = SchemaValidator(file=schema_file)
validator.validate(test_item, no_required_check=True)
class TestGetSubSchema(TestSchemaValidation):
@classmethod
def setUpClass(cls) -> None:
cls.raw_schema_file = (
f"{dirname(realpath(__file__))}/test_data/database/schema_nested.json"
)
cls.raw_schema = load_single(cls.raw_schema_file)
from aws_schema import SchemaValidator
cls.validator = SchemaValidator(file=cls.raw_schema_file)
def test_get_first_level(self):
sub_schema, depth = self.validator.get_sub_schema(["some_dict"])
self.assertEqual(self.raw_schema["properties"]["some_dict"], sub_schema)
self.assertEqual(1, depth)
def test_get_nested_dict(self):
sub_schema, depth = self.validator.get_sub_schema(
["some_nested_dict", "KEY1", "subKEY2"]
)
self.assertEqual(
self.raw_schema["properties"]["some_nested_dict"]["properties"]["KEY1"][
"properties"
]["subKEY2"],
sub_schema,
)
self.assertEqual(3, depth)
def test_get_array(self):
sub_schema, depth = self.validator.get_sub_schema(["some_array"])
self.assertEqual(self.raw_schema["properties"]["some_array"], sub_schema)
self.assertEqual(1, depth)
def test_get_referenced_sub_schema_from_dict(self):
sub_schema, depth = self.validator.get_sub_schema(
["some_nested_dict", "KEY1", "subKEY3"]
)
nested_schema = load_single(
f"{dirname(realpath(__file__))}/test_data/database/schema_nested_definitions.json"
)
self.assertEqual(
nested_schema["definitions"]["third_nested_dict_key"], sub_schema
)
self.assertEqual(3, depth)
def test_get_referenced_sub_schema_from_array(self):
sub_schema, depth = self.validator.get_sub_schema(["nested_array", "KEY1"])
nested_schema = load_single(
f"{dirname(realpath(__file__))}/test_data/database/schema_nested_array_child.json"
)
self.assertEqual(nested_schema["properties"]["KEY1"], sub_schema)
self.assertEqual(2, depth)
def test_get_one_of_sub_schema(self):
sub_schema, depth = self.validator.get_sub_schema(["oneOfKey", "oneOfKey1"])
self.assertEqual(
{
"oneOf": [
{"type": "integer"},
{"type": "string"}
]
},
sub_schema
)
self.assertEqual(3, depth)
def test_get_subschema_with_more_depth_than_available(self):
sub_schema, depth = self.validator.get_sub_schema(
["some_nested_dict", "KEY2", "anySubKey", "nextSubLevel"]
)
self.assertEqual(
self.raw_schema["properties"]["some_nested_dict"]["properties"]["KEY2"],
sub_schema,
)
self.assertEqual(2, depth)
class TestCheckSubItemType(TestSchemaValidation):
@classmethod
def setUpClass(cls) -> None:
cls.raw_schema_file = (
f"{dirname(realpath(__file__))}/test_data/database/schema_nested.json"
)
cls.raw_schema = load_single(cls.raw_schema_file)
from aws_schema import SchemaValidator
cls.validator = SchemaValidator(file=cls.raw_schema_file)
def test_first_level_string(self):
self.validator.validate_sub_part({"some_string": "abcdef"})
def test_first_level_int(self):
self.validator.validate_sub_part({"some_int": 3})
def test_nested_dict_end_value(self):
self.validator.validate_sub_part({"some_nested_dict": {"KEY1": {"subKEY2": 4}}})
def test_nested_dict_unspecified_sub_type(self):
self.validator.validate_sub_part({"some_nested_dict": {"KEY2": {"anyKey": "any string at lowest level"}}})
def test_nested_dict_end_value_wrong_value_with_schema_error_path(self):
from jsonschema import ValidationError
with self.assertRaises(ValidationError) as VE:
self.validator.validate_sub_part(
{"some_nested_dict": {"KEY1": {"subKEY3": ["string_value", 4]}}}
)
self.assertEqual("4 is not of type 'string'", VE.exception.args[0])
self.assertEqual(
["some_nested_dict", "KEY1", "subKEY3", 1], list(VE.exception.path)
)
def test_nested_dict_pattern_properties(self):
new_sub_dict = {
"some_nested_dict": {
"KEY1": {"subKEY4": {"abc": [{"sub_sub_key": "some_string_value"}]}}
}
}
self.validator.validate_sub_part(new_sub_dict)
def test_nested_dict_pattern_properties_wrong_pattern(self):
from jsonschema import ValidationError
new_sub_dict = {
"some_nested_dict": {
"KEY1": {"subKEY4": {"Abc": [{"sub_sub_key": "some_string_value"}]}}
}
}
with self.assertRaises(ValidationError) as VE:
self.validator.validate_sub_part(new_sub_dict)
self.assertEqual(
"none of the patternProperties matched: ['^[a-z]+$', '^[a-z0-9]+$']",
VE.exception.args[0],
)
# ToDo path isn't added when checking patternProperties
# self.assertEqual(
# ["some_nested_dict", "KEY1", "subKEY4"], list(VE.exception.path)
# )
def test_nested_dict_dict_value(self):
self.validator.validate_sub_part(
{"some_nested_dict": {"KEY1": {"subKEY1": "some_string", "subKEY2": 5}}}
)
def test_array_item1(self):
self.validator.validate_sub_part({"some_array": ["some_string"]})
def test_array_item2(self):
self.validator.validate_sub_part({"some_array": [34]})
def test_array_item3(self):
self.validator.validate_sub_part(
{"some_array": [{"KEY1": {"subKEY1": "string", "subKEY2": 45}}]}
)
def test_array_item_not_given_in_list(self):
from jsonschema import ValidationError
with self.assertRaises(ValidationError):
self.validator.validate_sub_part(
{"some_array": "some_string_not_in_an_array"}
)
def test_array_item_wrong_type(self):
from jsonschema import ValidationError
with self.assertRaises(ValidationError):
self.validator.validate_sub_part({"some_array": [[[1]]]})
class TestCustomValidator(TestSchemaValidation):
@staticmethod
def is_set(checker, instance):
return isinstance(instance, set)
schema = {
"properties": {
"some_string": {"type": "string"},
"some_set": {"type": "set"}
}
}
item = {
"some_string": "abc",
"some_set": {"a", "b", "c"}
}
def test_with_standard_validator(self):
from aws_schema import SchemaValidator
from jsonschema.exceptions import UnknownType
validator = SchemaValidator(raw=self.schema)
with self.assertRaises(UnknownType):
validator.validate(self.item)
def test_with_custom_validator(self):
from jsonschema.validators import Draft7Validator, extend
from aws_schema import SchemaValidator
custom_validator = extend(
Draft7Validator,
type_checker=Draft7Validator.TYPE_CHECKER.redefine_many(
{
u"set": self.is_set
}
)
)
validator = SchemaValidator(raw=self.schema, custom_validator=custom_validator)
validator.validate(self.item)
def test_part_with_custom_validator(self):
from jsonschema.validators import Draft7Validator, extend
from aws_schema import SchemaValidator
custom_validator = extend(
Draft7Validator,
type_checker=Draft7Validator.TYPE_CHECKER.redefine_many(
{
u"set": self.is_set
}
)
)
validator = SchemaValidator(raw=self.schema, custom_validator=custom_validator)
validator.validate_sub_part({"some_set": self.item["some_set"]})
| StarcoderdataPython |
90967 | <filename>src/drivers/ssc_interface/launch/ssc_interface.launch.py<gh_stars>1-10
# Copyright 2020 The Autoware Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from launch import LaunchDescription
from launch.substitutions import LaunchConfiguration
from launch.actions import DeclareLaunchArgument
from launch_ros.actions import Node
from ament_index_python import get_package_share_directory
import os
def get_share_file(package_name, file_name):
return os.path.join(get_package_share_directory(package_name), file_name)
def generate_launch_description():
"""
Launch necessary dependencies for working with AutonomouStuff SSC and ROS 2/Autoware.Auto.
The SSC interface, which translates inputs and outputs to and from Autoware messages.
"""
# --------------------------------- Params -------------------------------
# In combination 'raw', 'basic' and 'high_level' control
# in what mode of control comands to operate in,
# only one of them can be active at a time with a value
control_command_param = DeclareLaunchArgument(
'control_command',
default_value="basic", # use "raw", "basic" or "high_level"
description='command control mode')
# Default ssc_interface params
ssc_interface_param = DeclareLaunchArgument(
'ssc_interface_param',
default_value=[
get_share_file('ssc_interface', 'param/defaults.param.yaml')
],
description='Path to config file for SSC interface')
# -------------------------------- Nodes-----------------------------------
# SSC interface
ssc_interface = Node(
package='ssc_interface',
node_name='ssc_interface_node',
node_executable='ssc_interface_node_exe',
node_namespace='vehicle',
output='screen',
parameters=[LaunchConfiguration('ssc_interface_param')],
remappings=[
('gear_select', '/ssc/gear_select'),
('arbitrated_speed_commands', '/ssc/arbitrated_speed_commands'),
('arbitrated_steering_commands', '/ssc/arbitrated_steering_commands'),
('turn_signal_command', '/ssc/turn_signal_command'),
('dbw_enabled_feedback', '/ssc/dbw_enabled_fedback'),
('gear_feedback', '/ssc/gear_feedback'),
('velocity_accel_cov', '/ssc/velocity_accel_cov'),
('steering_feedback', '/ssc/steering_feedback'),
('vehicle_kinematic_state_cog', '/vehicle/vehicle_kinematic_state'),
('state_report_out', '/vehicle/vehicle_state_report'),
('state_command', '/vehicle/vehicle_state_command')
]
)
ld = LaunchDescription([
control_command_param,
ssc_interface_param,
ssc_interface
])
return ld
| StarcoderdataPython |
1600838 | <gh_stars>0
import requests
import re
username = 'natas2'
password = '<PASSWORD>'
url = f'http://{username}.natas.labs.overthewire.org/files/users.txt'
response = requests.get(url, auth=(username, password))
content = response.text
print(re.findall('natas3:(.*)', content)[0]) | StarcoderdataPython |
67130 | # Print N reverse
# https://www.acmicpc.net/problem/2742
print('\n'.join(list(map(str, [x for x in range(int(input()), 0, -1)]))))
| StarcoderdataPython |
3298196 | <reponame>gem763/bmatch-api
from django.shortcuts import render
from django.http import HttpResponse, JsonResponse
from django.conf import settings
from django.views.generic import View
from gensim.models import Doc2Vec #, Word2Vec
import os
import time
import json
import re
import numpy as np
# Create your views here.
model_path = os.path.join(settings.BASE_DIR, 'doc2vec.model')
d2v = Doc2Vec.load(model_path)
def test(request):
return HttpResponse(model_path)
# class SearchView(View):
# def post(self, request):
# qry = request.POST.get('qry', None)
# brands = request.POST.get('brands', None)
#
# if (qry is None) | (brands is None):
# return JsonResponse({})
#
# else:
# brands = json.loads(brands)
# qry = qry.split(' ')
# sims = {}
#
# for bname, keywords in brands.items():
# try:
# sims[bname] = float(w2v.wv.n_similarity(keywords, qry))
# except:
# _keywords = [k for k in keywords if k in w2v.wv.vocab]
# _qry = [k for k in qry if k in w2v.wv.vocab]
# if len(_keywords)*len(_qry) != 0:
# sims[bname] = float(w2v.wv.n_similarity(_keywords, _qry))
#
# return JsonResponse(sims)
#
#
# class SearchView_old(View):
# def get(self, request):
# qry = request.GET.get('q', None)
# bnames = request.GET.get('b', None)
#
# if (qry is None) | (bnames is None):
# return JsonResponse({})
#
# else:
# qry = qry.split(' ')
# bnames = bnames.split(' ')
# sims = {}
#
# for bname in bnames:
# try:
# sims[bname] = float(w2v.wv.n_similarity([bname], qry))
# except:
# pass
#
# return JsonResponse(sims)
# class SimwordsView_old(View):
# def post(self, request):
# words = request.POST.get('w', None)
# topn = request.POST.get('topn', 100)
# min = request.POST.get('min', 0.5)
#
# if words is None:
# return JsonResponse({})
#
# else:
# words = words.split(' ')
#
# try:
# simwords = {k:v for k,v in w2v.wv.most_similar(words, topn=int(topn)) if v > float(min)}
# return JsonResponse(simwords)
#
# except:
# _words = [k for k in words if k in w2v.wv.vocab]
# if len(_words) != 0:
# simwords = {k:v for k,v in w2v.wv.most_similar(_words, topn=int(topn)) if v > float(min)}
# return JsonResponse(simwords)
#
# else:
# return JsonResponse({})
class SimwordsView(View):
def post(self, request):
bname = request.POST.get('bname', None)
topn = request.POST.get('topn', 100)
min = request.POST.get('min', 0.5)
if bname is None:
return JsonResponse({})
else:
if bname in d2v.docvecs:
simwords = d2v.wv.most_similar(positive=[d2v.docvecs[bname]], topn=int(topn))
return JsonResponse({k:v for k,v in simwords if v > float(min)})
else:
return JsonResponse({})
class SimbrandsView(View):
def post(self, request):
qry = request.POST.get('qry', None)
bname = request.POST.get('bname', None)
topn = len(d2v.docvecs) + 10
if (qry is None) & (bname is not None):
if bname in d2v.docvecs:
sims = d2v.docvecs.most_similar(positive=[bname], topn=topn)
return JsonResponse(dict(sims))
else:
return JsonResponse({})
elif (qry is not None) & (bname is None):
qry = [w for w in re.split('\W+', qry) if w!='']
qry_vec = d2v.infer_vector(qry, epochs=500)
sims = d2v.docvecs.most_similar(positive=[qry_vec], topn=topn)
return JsonResponse(dict(sims))
else:
return JsonResponse({})
# class SimbrandsView_old(View):
# def _simbrands(self, mykeywords, keywords_dict):
# sims = {}
# for bname, keywords in keywords_dict.items():
# try:
# sims[bname] = float(w2v.wv.n_similarity(keywords, mykeywords))
# except:
# _keywords = [k for k in keywords if k in w2v.wv.vocab]
# _mykeywords = [k for k in mykeywords if k in w2v.wv.vocab]
# if len(_keywords)*len(_mykeywords) != 0:
# sims[bname] = float(w2v.wv.n_similarity(_keywords, _mykeywords))
#
# return sims
#
# def post(self, request):
# qry = request.POST.get('qry', None)
# mybname = request.POST.get('mybname', None)
# brands = request.POST.get('brands', None)
#
# if (qry is None) & (mybname is not None) & (brands is not None):
# keywords_dict = json.loads(brands)
# mykeywords = keywords_dict[mybname]
# sims = self._simbrands(mykeywords, keywords_dict)
# return JsonResponse(sims)
#
# elif (qry is not None) & (mybname is None) & (brands is not None):
# keywords_dict = json.loads(brands)
# mykeywords = qry.split(' ')
# sims = self._simbrands(mykeywords, keywords_dict)
# return JsonResponse(sims)
#
# else:
# return JsonResponse({})
def minmax_scale(dic, max=100, min=0, type=0):
keys = dic.keys()
x = np.array(list(dic.values()))
if type==0:
x = np.interp(x, (x.min(), x.max()), (min, max))
elif (type==1) | (type==2):
x_min = 0 if x.min()<0 else x.min()*0.8
x_max = x.max()*1.2
x = np.interp(x, (x.min(), x.max()), (x_min, x_max))
x = np.clip(x*100, min, max)
# return dict(zip(keys, x))
return {k:int(v) for k,v in zip(keys, x)}
def normalized(scores_pair):
bnames = d2v.docvecs.doctags.keys()
id1, id2 = scores_pair.keys()
for bname in bnames:
_sum = scores_pair[id1][bname] + scores_pair[id2][bname]
scores_pair[id1][bname] /= _sum
scores_pair[id2][bname] /= _sum
return scores_pair
class IdentityView(View):
def post(self, request):
bname = request.POST.get('bname', None)
idwords = request.POST.get('idwords', None)
id_scaletype= int(request.POST.get('id_scaletype', 0))
weights = request.POST.get('weights', None)
if idwords is None:
return JsonResponse({})
elif bname is None:
idwords = json.loads(idwords)
weights = json.loads(weights)
topn = len(d2v.docvecs) + 10
idty = {}
for _idwords in idwords:
scores_pair = {}
for k,v in _idwords.items():
word_vec = d2v.infer_vector([w.strip() for w in v.split(' ')], epochs=500)
_scores = dict(d2v.docvecs.most_similar(positive=[word_vec], topn=topn))
scores_pair[k] = _scores
# print(_scores['engineeredgarment'])
# 주의할것! 일부 브랜드가 d2v 모델에 안들어가 있다. 트위터 글이 없어서...
if (id_scaletype==0) | (id_scaletype==1):
scores_pair = normalized(scores_pair)
for idname, scores in scores_pair.items():
for _bname, _score in scores.items():
if _bname not in idty: idty[_bname] = {}
idty[_bname][idname] = _score
for _bname, _idty in idty.items():
idty[_bname] = minmax_scale(_idty, max=100, min=30, type=id_scaletype)
return JsonResponse(idty)
elif bname in d2v.docvecs:
idwords = json.loads(idwords)
brand_vec = d2v.docvecs[bname]
idty = {}
for _idwords in idwords:
_idty = {}
for k,v in _idwords.items():
word_vec = d2v.infer_vector([w.strip() for w in v.split(' ')], epochs=500)
_idty[k] = float(d2v.wv.cosine_similarities(brand_vec, [word_vec])[0])
if (id_scaletype==0) | (id_scaletype==1):
_idty_sum = sum(_idty.values())
_idty = {k:v/_idty_sum for k,v in _idty.items()}
idty.update(_idty)
idty = minmax_scale(idty, max=100, min=30, type=id_scaletype)
return JsonResponse(idty)
else:
return JsonResponse({})
| StarcoderdataPython |
165180 | <reponame>shell909090/acme-tiny<gh_stars>0
#!/usr/bin/python
# -*- coding: utf-8 -*-
'''
@date: 2019-04-28
@author: Shell.Xu
@copyright: 2019, Shell.Xu <<EMAIL>>
@license: MIT
'''
import os
import re
import logging
from os import path
import acme
class Validator(object):
re_token = re.compile(r'[^A-Za-z0-9_\-]')
def __init__(self, path, nocheck=False):
logging.debug('init validator file: (path=%s, nocheck=%s)', path, nocheck)
self.acme_path = path
self.nocheck = nocheck
def check(self, domain, wkpath, token, keyauth):
logging.info('check wellknown url')
try:
wkurl = 'http://{0}/.well-known/acme-challenge/{1}'.format(domain, token)
data, _, _ = acme.httpget(wkurl)
if data != keyauth:
raise acme.WellKnownCheckError()
except (AssertionError, ValueError) as e:
raise acme.WellKnownCheckError(
"couldn't download wellknown file: {}".format(e))
logging.info('wellknown check passed')
def auth_domain(self, od, auth_url):
logging.info('get challenge')
auth, _, _ = od.acct.signed_get(auth_url, None, err_msg='get challenges error')
domain = auth['identifier']['value']
logging.warning('verify %s', domain)
challenge = [c for c in auth['challenges'] if c['type'] == 'http-01'][0]
token = self.re_token.sub('_', challenge['token'])
keyauth = '%s.%s' %(token, od.acct.get_thumbprint())
wkpath = path.join(self.acme_path, token)
logging.info('write token to %s', wkpath)
with open(wkpath, 'w') as fo:
fo.write(keyauth)
try:
if not self.nocheck:
self.check(domain, wkpath, token, keyauth)
logging.info('submit challenge')
od.acct.signed_get(challenge['url'], {},
'submit challenge error: %s' % domain)
auth = od.acct.wait(
auth_url, {'pending',},
'check challenge status error for %s' % domain)
if auth['status'] != 'valid':
raise acme.ChallengeError(
'challenge did not pass for {0}: {1}'.format(domain, auth))
logging.warning('%s verified!', domain)
finally:
logging.info('remove wellknown file %s', wkpath)
os.remove(wkpath)
def __call__(self, od):
for auth_url in od.order['authorizations']:
self.auth_domain(od, auth_url)
| StarcoderdataPython |
1797823 | <filename>dvt/byod_dvt/fargate/validation/src/app.py
from clevercsv import read_dataframe
from rules import CsvHeaderRule
from rules import FileSizeEncodingRule
import boto3
import s3fs
import csv
import os
import time
import datetime
TABLE_NAME = os.environ['TABLE_NAME']
QUEUE_URL = os.environ['QUEUE_URL']
SOURCE_BUCKET_NAME = os.environ['SOURCE_BUCKET_NAME']
TARGET_BUCKET_NAME = os.environ['TARGET_BUCKET_NAME']
REGION = os.environ['REGION']
sqs = boto3.client('sqs', region_name=REGION)
s3 = boto3.client('s3')
dynamodb = boto3.resource('dynamodb', region_name=REGION)
if __name__ == "__main__":
# collecting queue message one by one until queue is empty
while True:
response = sqs.receive_message(
QueueUrl=QUEUE_URL,
AttributeNames=[
'SentTimestamp'
],
MaxNumberOfMessages=1,
MessageAttributeNames=[
'All'
],
VisibilityTimeout=0,
WaitTimeSeconds=0
)
try:
# read SQS message
receipt_handle = response['Messages'][0]['ReceiptHandle']
job_id = response['Messages'][0]['Body']
# get job from DDB
print('Retrieving job %s from DynamoDB...' % (job_id))
table = dynamodb.Table(TABLE_NAME)
response = table.get_item(Key={'id': job_id})
print('Retrieving csv file from S3...')
filename = '%s.csv' % (job_id)
obj = s3.get_object(
Bucket=SOURCE_BUCKET_NAME,
Key=response['Item']['filename'],
VersionId=response['Item']['filename_version']
)
# run validation rules
rule1 = CsvHeaderRule()
print('Validating if header exist and header names...')
r1_err, r1_messages = rule1.validate(obj)
rule2 = FileSizeEncodingRule()
print('Validating file size and encoding...')
r2_err, r2_messages = rule2.validate(obj)
print('Validation done')
# if there are errors...
if (r1_err or r2_err):
print('Error found')
# generate csv
print('Generating error messages csv file...')
error_messages = r1_messages + r2_messages
with open(filename, mode='w') as result_csv:
writer = csv.writer(
result_csv, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
writer.writerow(['type', 'message'])
for err in error_messages:
writer.writerow(err)
# upload csv to S3
print('Uploading to S3...')
response = s3.upload_file(
filename, TARGET_BUCKET_NAME, 'validation/%s' % (filename))
print('S3 response: %s' % (response))
# count errors and warnings
errors = 0
warnings = 0
for err in error_messages:
if err[0] == 'error':
errors = errors + 1
else:
warnings = warnings + 1
print('Found %d warnings and %d errors' % (warnings, errors))
# update DDB table
print('Updating job in DynamoDB...')
response = table.update_item(
Key={
'id': job_id
},
UpdateExpression="set result_uri = :r, warnings = :w, errors = :e, #status = :s, end_ts = :d",
ExpressionAttributeValues={
':r': 'https://%s.s3-%s.amazonaws.com/validation/%s' % (TARGET_BUCKET_NAME, REGION, filename),
':w': warnings,
':e': errors,
':s': 'failed' if errors > 0 else 'success',
':d': datetime.datetime.utcnow().isoformat()
},
ExpressionAttributeNames={
'#status': 'status'
},
ReturnValues="UPDATED_NEW"
)
print('DynamoDB response: %s' % (response))
else:
print('No error found')
print('Updating job in DynamoDB...')
response = table.update_item(
Key={
'id': job_id
},
UpdateExpression="set #status = :s, end_ts = :d",
ExpressionAttributeValues={
':s': 'success',
':d': datetime.datetime.utcnow().isoformat()
},
ExpressionAttributeNames={
'#status': 'status'
},
ReturnValues="UPDATED_NEW"
)
print('DynamoDB response: %s' % (response))
# Delete received message from queue
print('Deleting SQS message from queue...')
sqs.delete_message(
QueueUrl=QUEUE_URL,
ReceiptHandle=receipt_handle
)
print('Validation job done')
# Manage case queue is empty
except KeyError:
print('No messages anymore')
except Exception as error:
print('Uncaught exception: %s' % (error))
time.sleep(1)
| StarcoderdataPython |
1780685 | """
project = Election scraper
author = <NAME>
"""
from time import time
import requests
from bs4 import BeautifulSoup as BS
import csv
import sys
# training website
INPUT = "https://volby.cz/pls/ps2017nss/ps3?xjazyk=CZ"
def get_districts(web):
"""
Přijímá webovku a vrátí extrahovaný seznam odkazů na stránky volebních
okresů + základní webovku.
EG.: Ze stránky Volby.cz vrací [okres_1.cz, okres_2.cz, ... okres_N.cz].
:param web:
:return adr_okresu:
:return base_adr:
"""
base_adr = web[0:web.rfind("/")]
soup = suck_n_chop(web)
tab_lines = [tr.find_all("a") for tr in soup.find_all("tr")]
adr_okresu = [base_adr + "/" + group[2]["href"] for group in tab_lines
if len(group) != 0 if group[0].text[0:2] == "CZ"]
return adr_okresu, base_adr
def get_municipalities(web_list, base_adr):
"""
Přijímá seznam okresních webů + základní webovku a vrací seznam webů obcí.
EG.: Z webu brno-venkov.cz vrací [Opatovice.cz, Ostopovice.cz, ...].
:param web_list:
:param base_adr:
:return adr_obci:
"""
assert web_list != [], "Problém: seznam adres je prázdný."
adr_obci = []
for web in web_list:
soup = suck_n_chop(web)
tab_lines = [tr.find_all("a") for tr in soup.find_all("tr")]
adr_obci = [base_adr + "/" + element["href"] for group in tab_lines
if len(group) != 0 for element in group
if element.text.isnumeric()]
return adr_obci
def extract_data(web_list):
"""
Přijímá cílové webovky (obce), cucá data a vrací je v seznamu.
EG.: return [[code, location ...], [62500, Brno ...], ...]
:param web_list:
:return db:
"""
assert web_list != [], "Problém: seznam adres je prázdný."
db = [(complete_first_line(suck_n_chop(web_list[0])))]
for web in web_list:
soup = suck_n_chop(web)
# získá kód obce
code = []
start = web.find("obec=") + 5
i = start
while web[i].isnumeric():
code.append(web[i])
i += 1
code = "".join(code)
# vytvoří řádek v CSVčku
db.append(make_package(soup, code))
return db
def complete_first_line(soup):
"""
Přijímá soap webu, ze které získá jména všech polit. stran a vrátí
popisky osy X v CSV tabulce.
:param soup:
:return first_line:
"""
first_line = ["code", "location", "registered", "envelopes", "valid"]
tabs = soup.find_all("div", {"class": "t2_470"})
par_lines = [tr.find_all("td") for tab in tabs for tr in tab.find_all("tr")
if len(tr) == 11]
for party in range(len(par_lines)):
first_line.append(par_lines[party][1].text)
return first_line
def make_package(soup, code):
"""
Získá z poskytnuté polévky informace a vrátí je v balíku - seřazené jako
řádek v CSV. Zvenku také získá kód označující obec.
:param soup:
:param code:
:return package:
"""
# code
package = [code]
# location
for title in soup.find_all("h3"):
if title.text[1:6] == "Obec:":
package.append(title.text[7:].strip())
# registered
tab = soup.find("table")
lines = [tr for tr in tab.find_all("tr")]
inventory = [td for td in lines[2]]
package.append("".join(inventory[7].text.split("\xa0")))
# envelopes
package.append("".join(inventory[9].text.split("\xa0")))
# valid
package.append("".join(inventory[15].text.split("\xa0")))
# parties
tabs = soup.find_all("table")
par_lines = [tr.find_all("td") for tab in tabs[1:]
for tr in tab.find_all("tr") if len(tr) == 11]
for party in range(len(par_lines)):
package.append("".join(par_lines[party][2].text.split("\xa0")))
return package
def suck_n_chop(web):
"""
Vyšle požadavek, zkontroluje příjem a vrátí soap.
:param web:
:return BS(resp.text, "html.parser"):
"""
resp = requests.get(web)
assert resp.status_code == 200, "Problém se spojením: {}".format(resp.status_code)
return BS(resp.text, "html.parser")
def save_csv(db, file_name):
"""
Přijíme data o volbách a uloží data do csv souboru s přijatým názvem.
:param db:
:param file_name:
"""
with open(file_name + ".csv", "w", newline="") as file:
writer = csv.writer(file)
writer.writerows(db)
def main(CSV_NAME):
"""
Jedna funkce vládne všem, jedna jim všem káže...
-
200 obcí za 20 sec.
Všech cca 6300 obcí si žádá cca 13 minut!!!
"""
# fixtures
# web = "https://volby.cz/pls/ps2017nss"
# okresy = ["https://volby.cz/pls/ps2017nss/ps32?xjazyk=CZ&xkraj=11&xnumnuts=6203"]
# obce = ["https://volby.cz/pls/ps2017nss/ps311?xjazyk=CZ&xkraj=11&xobec=582786&xvyber=6202"]
# db = [["name", "age", "sex"], ["Ondrej", "23", "M"], ["Lucie", "21", "F"]]
time_1 = time()
okresy, web = get_districts(INPUT)
print("Nalezeno okresů:", len(okresy))
obce = get_municipalities(okresy, web)
print("Nalezeno obcí:", len(obce))
print("Zpracovávám. Čekejte prosím...")
db = extract_data(obce)
save_csv(db, CSV_NAME)
print("Úspěšně dokončeno a uloženo.")
time_2 = time()
time_total = int(time_2 - time_1)
print("TOTAL TIME:", time_total, "secs")
if __name__ == "__main__":
if len(sys.argv) != 2:
print("\nUSAGE: Web_scraper.py OutputFileName\n")
else:
main(sys.argv[1:2])
| StarcoderdataPython |
51821 | from .index import index
from .dashboard import dashboard
from .player_list import player_list
from .player_info import player_info
from .server_statistics import server_statistics | StarcoderdataPython |
4811494 | <reponame>computer-geek64/scripts<filename>src/python/git_backup.py
#!/usr/bin/python3
# git-backup.py v1.4
# <NAME>
# December 29th, 2018
try:
import os
import git
import requests
import json
import sys
from datetime import datetime
except ImportError:
print("[-] Installing dependencies...")
if os.system("pip install --upgrade requests datetime git; pip3 install --upgrade requests datetime git") == 0:
print("[+] Dependencies successfully installed.")
else:
print("[!] Dependency installation failed. Script restart required.")
sys.exit(0)
name = os.path.split(sys.argv[0])[-1]
version = 1.4
developer = "<NAME>"
developer_info_url = "https://computer-geek64.github.io/info"
rights = "All rights reserved."
def print_banner():
print(name + " v" + str(version))
print("Copyright " + str(datetime.now().year) + " " + developer + ". " + rights + "\n")
def print_usage():
print("Usage: " + name + " [options]\n")
print("Option\t\tLong Option\t\tDescription")
print("-h\t\t--help\t\t\tShow this help screen")
print("-b\t\t--no-banner\t\tSuppress banner")
print("-d\t\t--developer\t\tDisplay information about developer")
print("-y\t\t--yes\t\t\tDo not ask for confirmation (batch mode)")
print("-p [path]\t--path [path]\t\tSpecify git backup path (default is current working directory)")
print("-u [username]\t--user [username]\tSpecify user for git backup (default is developer: @computer-geek64)")
print("-P [file]\t--private [file]\tSpecify private repositories in [file]")
banner = True
usage = False
developer_info = False
confirm = True
path = os.getcwd()
user = "computer-geek64"
private_repositories = ""
arg = 1
while arg < len(sys.argv):
if sys.argv[arg] == "-h" or sys.argv[arg] == "--help":
usage = True
elif sys.argv[arg] == "-b" or sys.argv[arg] == "--no-banner":
banner = False
elif sys.argv[arg] == "-d" or sys.argv[arg] == "--developer":
developer_info = True
elif sys.argv[arg] == "-y" or sys.argv[arg] == "--yes":
confirm = False
elif sys.argv[arg] == "-p" or sys.argv[arg] == "--path":
arg += 1
path = sys.argv[arg]
os.chdir(path)
elif sys.argv[arg] == "-u" or sys.argv[arg] == "--user":
arg += 1
user = sys.argv[arg]
elif sys.argv[arg] == "-P" or sys.argv[arg] == "--private":
arg += 1
private_repositories = sys.argv[arg]
else:
usage = True
arg += 1
if banner:
print_banner()
if usage:
print_usage()
sys.exit(0)
if developer_info:
print(requests.get(developer_info_url).text)
sys.exit(0)
if confirm:
if input("Are you sure you want to delete everything in \"" + os.getcwd() + "\"? Y/N >> ").lower() == "y":
print("[+] Proceed.")
else:
print("[!] Abort.")
sys.exit(0)
# Clean directory
os.system("rm -rf *")
# Get repositories JSON response from GitHub API
json_response = json.loads(requests.get("https://" + user + "@api.github.com/users/" + user.split(":")[0] + "/repos").text)
# Exctract and clone repositories
repos = [json_response[i]["html_url"] for i in range(len(json_response))]
for repo in repos:
git.Repo.clone_from(repo.replace("://", "://" + user + "@"), [x for x in repo.split("/") if x][-1])
# Extract and clone private repositories
if len(private_repositories) > 0:
with open(private_repositories, "r") as file:
lines = file.readlines()
file.close()
for line in lines:
git.Repo.clone_from("https://" + user + "@github.com/" + user.split(":")[0] + "/" + line.strip(), line.strip())
| StarcoderdataPython |
29519 | import argparse
import glob
import os
import random
import re
from dataclasses import dataclass
from functools import partial
from math import ceil
from typing import List, Optional
import numpy as np
import torch
from torch.optim.lr_scheduler import ReduceLROnPlateau
from tqdm import tqdm
import util
tqdm.monitor_interval = 0
tqdm = partial(tqdm, bar_format="{l_bar}{r_bar}")
TRAIN = "train"
DEV = "dev"
TEST = "test"
class Optimizer(util.NamedEnum):
sgd = "sgd"
adadelta = "adadelta"
adam = "adam"
amsgrad = "amsgrad"
class Scheduler(util.NamedEnum):
reducewhenstuck = "reducewhenstuck"
warmupinvsqr = "warmupinvsqr"
def setup_seed(seed):
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed_all(seed)
@dataclass
class Evaluation:
filepath: str
devloss: float
evaluation_result: Optional[List[util.Eval]]
class BaseTrainer(object):
"""docstring for Trainer."""
def __init__(self):
super().__init__()
self.parser = argparse.ArgumentParser()
self.set_args()
self.params = self.get_params()
util.maybe_mkdir(self.params.model)
self.logger = util.get_logger(
self.params.model + ".log", log_level=self.params.loglevel
)
for key, value in vars(self.params).items():
self.logger.info("command line argument: %s - %r", key, value)
setup_seed(self.params.seed)
self.data = None
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
self.model = None
self.optimizer = None
self.min_lr = 0
self.scheduler = None
self.evaluator = None
self.global_steps = 0
self.last_devloss = float("inf")
self.models: List[Evaluation] = list()
def set_args(self):
"""
get_args
"""
# fmt: off
parser = self.parser
parser.add_argument('--seed', default=0, type=int)
parser.add_argument('--train', required=True, type=str, nargs='+')
parser.add_argument('--dev', required=True, type=str, nargs='+')
parser.add_argument('--test', default=None, type=str, nargs='+')
parser.add_argument('--model', required=True, help='dump model filename')
parser.add_argument('--load', default='', help='load model and continue training; with `smart`, recover training automatically')
parser.add_argument('--bs', default=20, type=int, help='training batch size')
parser.add_argument('--epochs', default=20, type=int, help='maximum training epochs')
parser.add_argument('--max_steps', default=0, type=int, help='maximum training steps')
parser.add_argument('--warmup_steps', default=4000, type=int, help='number of warm up steps')
parser.add_argument('--total_eval', default=-1, type=int, help='total number of evaluation')
parser.add_argument('--optimizer', default=Optimizer.adam, type=Optimizer, choices=list(Optimizer))
parser.add_argument('--scheduler', default=Scheduler.reducewhenstuck, type=Scheduler, choices=list(Scheduler))
parser.add_argument('--lr', default=1e-3, type=float, help='learning rate')
parser.add_argument('--min_lr', default=1e-5, type=float, help='minimum learning rate')
parser.add_argument('--momentum', default=0.9, type=float, help='momentum of SGD')
parser.add_argument('--beta1', default=0.9, type=float, help='beta1 of Adam')
parser.add_argument('--beta2', default=0.999, type=float, help='beta2 of Adam')
parser.add_argument('--estop', default=1e-8, type=float, help='early stopping criterion')
parser.add_argument('--cooldown', default=0, type=int, help='cooldown of `ReduceLROnPlateau`')
parser.add_argument('--patience', default=0, type=int, help='patience of `ReduceLROnPlateau`')
parser.add_argument('--discount_factor', default=0.5, type=float, help='discount factor of `ReduceLROnPlateau`')
parser.add_argument('--max_norm', default=0, type=float, help='gradient clipping max norm')
parser.add_argument('--gpuid', default=[], nargs='+', type=int, help='choose which GPU to use')
parser.add_argument('--loglevel', default='info', choices=['info', 'debug'])
parser.add_argument('--saveall', default=False, action='store_true', help='keep all models')
parser.add_argument('--shuffle', default=False, action='store_true', help='shuffle the data')
parser.add_argument('--cleanup_anyway', default=False, action='store_true', help='cleanup anyway')
# fmt: on
def get_params(self):
return self.parser.parse_args()
def checklist_before_run(self):
assert self.data is not None, "call load_data before run"
assert self.model is not None, "call build_model before run"
assert self.optimizer is not None, "call setup_training before run"
assert self.scheduler is not None, "call setup_scheduler before run"
assert self.evaluator is not None, "call setup_evalutator before run"
def load_data(self, dataset, train, dev, test):
raise NotImplementedError
def build_model(self):
raise NotImplementedError
def load_model(self, model):
assert self.model is None
self.logger.info("load model in %s", model)
self.model = torch.load(model, map_location=self.device)
self.model = self.model.to(self.device)
epoch = int(model.split("_")[-1])
return epoch
def smart_load_model(self, model_prefix):
assert self.model is None
models = []
for model in glob.glob(f"{model_prefix}.nll*"):
res = re.findall(r"\w*_\d+\.?\d*", model[len(model_prefix) :])
loss_ = res[0].split("_")
evals_ = res[1:-1]
epoch_ = res[-1].split("_")
assert loss_[0] == "nll" and epoch_[0] == "epoch"
loss, epoch = float(loss_[1]), int(epoch_[1])
evals = []
for ev in evals_:
ev = ev.split("_")
evals.append(util.Eval(ev[0], ev[0], float(ev[1])))
models.append((epoch, Evaluation(model, loss, evals)))
self.models = [x[1] for x in sorted(models)]
return self.load_model(self.models[-1].filepath)
def setup_training(self):
assert self.model is not None
params = self.params
if params.optimizer == Optimizer.sgd:
self.optimizer = torch.optim.SGD(
self.model.parameters(), params.lr, momentum=params.momentum
)
elif params.optimizer == Optimizer.adadelta:
self.optimizer = torch.optim.Adadelta(self.model.parameters(), params.lr)
elif params.optimizer == Optimizer.adam:
self.optimizer = torch.optim.Adam(
self.model.parameters(), params.lr, betas=(params.beta1, params.beta2)
)
elif params.optimizer == Optimizer.amsgrad:
self.optimizer = torch.optim.Adam(
self.model.parameters(),
params.lr,
betas=(params.beta1, params.beta2),
amsgrad=True,
)
else:
raise ValueError
self.min_lr = params.min_lr
if params.scheduler == Scheduler.reducewhenstuck:
self.scheduler = ReduceLROnPlateau(
self.optimizer,
"min",
patience=params.patience,
cooldown=params.cooldown,
factor=params.discount_factor,
min_lr=params.min_lr,
)
elif params.scheduler == Scheduler.warmupinvsqr:
self.scheduler = util.WarmupInverseSquareRootSchedule(
self.optimizer, params.warmup_steps
)
else:
raise ValueError
def save_training(self, model_fp):
save_objs = (self.optimizer.state_dict(), self.scheduler.state_dict())
torch.save(save_objs, f"{model_fp}.progress")
def load_training(self, model_fp):
assert self.model is not None
if os.path.isfile(f"{model_fp}.progress"):
optimizer_state, scheduler_state = torch.load(f"{model_fp}.progress")
self.optimizer.load_state_dict(optimizer_state)
self.scheduler.load_state_dict(scheduler_state)
else:
self.logger.warning("cannot find optimizer & scheduler file")
def setup_evalutator(self):
raise NotImplementedError
def get_lr(self):
if isinstance(self.scheduler, ReduceLROnPlateau):
return self.optimizer.param_groups[0]["lr"]
try:
return self.scheduler.get_last_lr()[0]
except AttributeError:
return self.scheduler.get_lr()[0]
def train(self, epoch_idx, batch_size, max_norm):
logger, model = self.logger, self.model
logger.info("At %d-th epoch with lr %f.", epoch_idx, self.get_lr())
model.train()
sampler, nb_batch = self.iterate_batch(TRAIN, batch_size)
losses, cnt = 0, 0
for batch in tqdm(sampler(batch_size), total=nb_batch):
loss = model.get_loss(batch)
self.optimizer.zero_grad()
loss.backward()
if max_norm > 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm)
logger.debug(
"loss %f with total grad norm %f",
loss,
util.grad_norm(model.parameters()),
)
self.optimizer.step()
if not isinstance(self.scheduler, ReduceLROnPlateau):
self.scheduler.step()
self.global_steps += 1
losses += loss.item()
cnt += 1
loss = losses / cnt
self.logger.info(f"Running average train loss is {loss} at epoch {epoch_idx}")
return loss
def iterate_batch(self, mode, batch_size):
if mode == TRAIN:
return (self.data.train_batch_sample, ceil(self.data.nb_train / batch_size))
elif mode == DEV:
return (self.data.dev_batch_sample, ceil(self.data.nb_dev / batch_size))
elif mode == TEST:
return (self.data.test_batch_sample, ceil(self.data.nb_test / batch_size))
else:
raise ValueError(f"wrong mode: {mode}")
def calc_loss(self, mode, batch_size, epoch_idx) -> float:
self.model.eval()
sampler, nb_batch = self.iterate_batch(mode, batch_size)
loss, cnt = 0.0, 0
for batch in tqdm(sampler(batch_size), total=nb_batch):
loss += self.model.get_loss(batch).item()
cnt += 1
loss = loss / cnt
self.logger.info(f"Average {mode} loss is {loss} at epoch {epoch_idx}")
return loss
def iterate_instance(self, mode):
if mode == TRAIN:
return self.data.train_sample, self.data.nb_train
elif mode == DEV:
return self.data.dev_sample, self.data.nb_dev
elif mode == TEST:
return self.data.test_sample, self.data.nb_test
else:
raise ValueError(f"wrong mode: {mode}")
def evaluate(self, mode, epoch_idx, decode_fn) -> List[util.Eval]:
raise NotImplementedError
def decode(self, mode, write_fp, decode_fn):
raise NotImplementedError
def update_lr_and_stop_early(self, epoch_idx, devloss, estop):
stop_early = True
if isinstance(self.scheduler, ReduceLROnPlateau):
prev_lr = self.get_lr()
self.scheduler.step(devloss)
curr_lr = self.get_lr()
if (
self.last_devloss - devloss
) < estop and prev_lr == curr_lr == self.min_lr:
self.logger.info(
"Early stopping triggered with epoch %d (previous dev loss: %f, current: %f)",
epoch_idx,
self.last_devloss,
devloss,
)
stop_status = stop_early
else:
stop_status = not stop_early
self.last_devloss = devloss
else:
stop_status = not stop_early
return stop_status
def save_model(
self, epoch_idx, devloss: float, eval_res: List[util.Eval], model_fp
):
eval_tag = "".join(["{}_{}.".format(e.desc, e.res) for e in eval_res])
fp = f"{model_fp}.nll_{devloss:.4f}.{eval_tag}epoch_{epoch_idx}"
torch.save(self.model, fp)
self.models.append(Evaluation(fp, devloss, eval_res))
def select_model(self):
raise NotImplementedError
def reload_and_test(self, model_fp, best_fp, bs, decode_fn):
self.model = None
self.logger.info(f"loading {best_fp} for testing")
self.load_model(best_fp)
self.calc_loss(DEV, bs, -1)
self.logger.info("decoding dev set")
self.decode(DEV, f"{model_fp}.decode", decode_fn)
results = self.evaluate(DEV, -1, decode_fn)
if results:
results = " ".join([f"{r.desc} {r.res}" for r in results])
self.logger.info(f'DEV {model_fp.split("/")[-1]} {results}')
if self.data.test_file is not None:
self.calc_loss(TEST, bs, -1)
self.logger.info("decoding test set")
self.decode(TEST, f"{model_fp}.decode", decode_fn)
results = self.evaluate(TEST, -1, decode_fn)
if results:
results = " ".join([f"{r.desc} {r.res}" for r in results])
self.logger.info(f'TEST {model_fp.split("/")[-1]} {results}')
def cleanup(self, saveall, save_fps, model_fp):
if not saveall:
for model in self.models:
if model.filepath in save_fps:
continue
os.remove(model.filepath)
os.remove(f"{model_fp}.progress")
def run(self, start_epoch, decode_fn=None):
"""
helper for training
"""
self.checklist_before_run()
finish = False
params = self.params
steps_per_epoch = ceil(self.data.nb_train / params.bs)
if params.max_steps > 0:
max_epochs = ceil(params.max_steps / steps_per_epoch)
else:
max_epochs = params.epochs
params.max_steps = max_epochs * steps_per_epoch
self.logger.info(
f"maximum training {params.max_steps} steps ({max_epochs} epochs)"
)
if params.total_eval > 0:
eval_every = max(max_epochs // params.total_eval, 1)
else:
eval_every = 1
self.logger.info(f"evaluate every {eval_every} epochs")
for epoch_idx in range(start_epoch, max_epochs):
self.train(epoch_idx, params.bs, params.max_norm)
if not (
epoch_idx
and (epoch_idx % eval_every == 0 or epoch_idx + 1 == max_epochs)
):
continue
with torch.no_grad():
devloss = self.calc_loss(DEV, params.bs, epoch_idx)
eval_res = self.evaluate(DEV, epoch_idx, decode_fn)
if self.update_lr_and_stop_early(epoch_idx, devloss, params.estop):
finish = True
break
self.save_model(epoch_idx, devloss, eval_res, params.model)
self.save_training(params.model)
if finish or params.cleanup_anyway:
best_fp, save_fps = self.select_model()
with torch.no_grad():
self.reload_and_test(params.model, best_fp, params.bs, decode_fn)
self.cleanup(params.saveall, save_fps, params.model)
| StarcoderdataPython |
1600147 | # -*- encoding: utf-8 -*-
from sympy import symbols, simplify
n = symbols('n', integer=True)
p = symbols('p')
a1_a1 = 1 - p + p/(n+1)
a1na1 = p - p/(n+1)
a2_a2__a1_a1 = 1 - p + p/(n+1)
a2_a2__a1na1 = p / (n+1)
a2na2__a1_a1 = p - p/(n+1)
a2na2__a1na1 = 1 - p/(n+1)
simplify(a2_a2__a1_a1 + a2na2__a1_a1)
# 1
simplify(a2_a2__a1na1 + a2na2__a1na1)
# 1
simplify(a1_a1 + a1na1)
# 1
a2_a2 = a2_a2__a1_a1 * a1_a1 + a2_a2__a1na1 * a1na1
a2na2 = a2na2__a1_a1 * a1_a1 + a2na2__a1na1 * a1na1
simplify(a2_a2 + a2na2)
# 1
a2_a2.evalf(subs={p: 0.1, n: 4})
# 0.848
a2_a2.evalf(subs={p: 0.3, n: 4})
# 0.592
# >>> simplify(a2_a2)
# np2−2np+n+1n+1
for pval in [0.01, 0.1, 0.3]:
print("{} & {:.3f} & & {:.3f} & \\\\".format(pval, a1_a1.evalf(subs={p: pval, n: 4}),
a2_a2.evalf(subs={p: pval, n: 4}),))
| StarcoderdataPython |
3258099 | # -*- coding: utf-8 -*-
import collections
import itertools
class LineNumbering(object):
"""A class responsible for managing line numbers."""
Step = collections.namedtuple('Increment', 'bytecode_step, line_step')
def __init__(self, code_object):
"""
Initializes the LineNumbering manager.
Args:
code_object (code): The code object whose line numbering to manage.
"""
# The line numbers are stored as
# (line number step, byte code step) pairs.
# For example [1, 2, 3, 4] would indicate that, starting with both the
# line number and bytecode address being zero, the disasssembled output
# should thefn be formatted as: (1, 2), (4, 6), the format being
# (line number, bytecode address).
# So we simply step through the list in steps of two,
# starting at different offsets.
steps = list(bytearray(code_object.co_lnotab))
steps = zip(steps[::2], steps[1::2])
self._iterator = itertools.starmap(LineNumbering.Step, steps)
# The first pair of steps
self._steps = next(self._iterator)
# The current (absolute) line number
self.line_number = code_object.co_firstlineno
self.line_number += self._steps.line_step
# The next instruction offset (to know when to call step())
# Set to None initially for the special starting condition
self.next_address = self._steps.bytecode_step
# Whether or not we've exhausted the iterator
self.is_exhausted = False
def step(self):
"""
Increments the line numbering iterator by one step.
Raises:
StopIteration if an attempt to step an exhausted iterator is made.
"""
if self.is_exhausted:
raise StopIteration('Line numbering exhausted')
# Add the two increments
self.line_number += self._steps.line_step
# Try to get a look at where the next instruction starts
# so we know when to increment the iterator next
self._try_step()
def at_new_line(self, program_counter):
"""
Tests if a given program counter is at the next line address.
Args:
program_counter (int): The current program counter of the
disassembler.
Returns:
True if the program counter has reached the next line, else false.
"""
if self.is_exhausted:
return False
return program_counter >= self.next_address
def _try_step(self):
"""
Attempts to increment the iterator to fetch the next address.
This method is necessary to get a look at the next increment value, so
that we know when the program counter has reached a new line in the
original code.
"""
try:
self._steps = next(self._iterator)
self.next_address += self._steps.bytecode_step
except StopIteration:
# This just means there's no lines left to disassemble
self.is_exhausted = True
| StarcoderdataPython |
1750041 | <gh_stars>10-100
from .logging import logs
__all__ = ['logs', 'models', 'utils']
| StarcoderdataPython |
187161 | <gh_stars>0
# Code from Chapter 6 of Machine Learning: An Algorithmic Perspective (2nd Edition)
# by <NAME> (http://stephenmonika.net)
# You are free to use, change, or redistribute the code in any way you wish for
# non-commercial purposes, but please maintain the name of the original author.
# This code comes with no warranty of any kind.
# <NAME>, 2008, 2014
# Various dimensionality reductions running on the Iris dataset
import pylab as pl
import numpy as np
iris = np.loadtxt('../3 MLP/iris_proc.data', delimiter=',')
iris[:, :4] = iris[:, :4] - iris[:, :4].mean(axis=0)
imax = np.concatenate((iris.max(axis=0) * np.ones((1, 5)), iris.min(axis=0) * np.ones((1, 5))), axis=0).max(axis=0)
iris[:, :4] = iris[:, :4] / imax[:4]
labels = iris[:, 4:]
iris = iris[:, :4]
order = list(range(np.shape(iris)[0]))
np.random.shuffle(order)
iris = iris[order, :]
labels = labels[order, 0]
w0 = np.where(labels == 0)
w1 = np.where(labels == 1)
w2 = np.where(labels == 2)
import lda
newData, w = lda.lda(iris, labels, 2)
print(np.shape(newData))
pl.plot(iris[w0, 0], iris[w0, 1], 'ok')
pl.plot(iris[w1, 0], iris[w1, 1], '^k')
pl.plot(iris[w2, 0], iris[w2, 1], 'vk')
pl.axis([-1.5, 1.8, -1.5, 1.8])
pl.axis('off')
pl.figure(2)
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis([-1.5, 1.8, -1.5, 1.8])
pl.axis('off')
import pca
x, y, evals, evecs = pca.pca(iris, 2)
pl.figure(3)
pl.plot(y[w0, 0], y[w0, 1], 'ok')
pl.plot(y[w1, 0], y[w1, 1], '^k')
pl.plot(y[w2, 0], y[w2, 1], 'vk')
pl.axis('off')
import kernelpca
newData = kernelpca.kernelpca(iris, 'gaussian', 2)
pl.figure(4)
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis('off')
import factoranalysis
newData = factoranalysis.factoranalysis(iris, 2)
# print newData
pl.figure(5)
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis('off')
import lle
print(np.shape(iris))
a, b, newData = lle.lle(iris, 2, 12)
print(np.shape(newData))
print(newData[w0, :])
print("---")
print(newData[w1, :])
print("---")
print(newData[w2, :])
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis('off')
import isomap
print(labels)
newData, newLabels = isomap.isomap(iris, 2, 100)
print(np.shape(newData))
print(newLabels)
w0 = np.where(newLabels == 0)
w1 = np.where(newLabels == 1)
w2 = np.where(newLabels == 2)
pl.plot(newData[w0, 0], newData[w0, 1], 'ok')
pl.plot(newData[w1, 0], newData[w1, 1], '^k')
pl.plot(newData[w2, 0], newData[w2, 1], 'vk')
pl.axis('off')
print("Done")
pl.show()
| StarcoderdataPython |
4800471 | <filename>.silver-system-programs/disk-info.py
import os
import subprocess
class color():
purple = '\033[95m'
cyan = '\033[96m'
darkcyan = '\033[36m'
blue = '\033[94m'
blued = '\033[92m'
yellow = '\033[93m'
red = '\033[91m'
silver = '\033[3;30;47m'
orange= '\033[31;43m'
bold = '\033[1m'
green = '\033[32m'
underline = '\033[4m'
end = '\033[0m'
ch = 'Y'
while ch == 'Y':
print(color.yellow+'In which format you want information'+color.end)
print(color.red+'0.\t'+color.green+'Exit'+color.end)
print(color.red+'1.\t'+color.green+'Megabyte'+color.end)
print(color.red+'2.\t'+color.green+'Bytes'+color.end)
y = int(input(color.purple+'Enter your choice:\t'+color.end))
if y == 0:
print(color.cyan+'Exiting................'+color.end)
if y == 1:
os.system('df -m')
if y == 2:
os.system('df')
ch = input(color.blue+color.bold+'Do you want to continue seeing disk informations in different formats(Y/n):\t'+color.end)
| StarcoderdataPython |
1638241 | import sys
import os
from gittra.script import parse_back
#function to call merge inside the original directory
def gittra_merge(initial_dir, final_dir):
parse_back(initial_dir, final_dir)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.