id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1706484 | <reponame>abhiWriteCode/Num-Eng-Machine-Translation
from bs4 import BeautifulSoup
from requests import get
import random
import asyncio
import pandas as pd
from time import time
random.seed(12345)
async def convert2text(number):
html_body = get('https://www.calculatorsoup.com/calculators/conversions/numberstowords.php?'
f'number={number}&format=words&letter_case=lowercase&action=solve')
soup = BeautifulSoup(html_body.text, features="lxml")
text = soup.find(id="answer").text
return text
async def main(numbers):
async def parallel_request(number):
return (number, await convert2text(number))
data = [parallel_request(number) for number in numbers]
data = await asyncio.gather(*data)
df = pd.DataFrame(data, columns=['number', 'text'])
return df
if __name__ == '__main__':
START_TIME = time()
MIN = 500_000
MAX = MIN + 500_000
SAMPLE_PERCENTAGE = 1
SAMPLE_SIZE = ((MAX - MIN) * SAMPLE_PERCENTAGE) // 100
print(MIN, MAX, SAMPLE_SIZE)
numbers = range(MIN, MAX)
numbers = random.sample(numbers, k=SAMPLE_SIZE)
df = asyncio.get_event_loop().run_until_complete(main(numbers))
df.to_csv(f'data/num_text_pair - {MIN} - {MAX} - {SAMPLE_PERCENTAGE}.csv', index=False)
print(f'required time: {time() - START_TIME:.2f} sec') | StarcoderdataPython |
85877 | #
# Copyright 2022 Autodesk
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import maya.cmds as cmds
# These names should not be localized as Usd only accepts [a-z,A-Z] as valid characters.
kDefaultMayaReferencePrimName = 'MayaReference1'
kDefaultVariantSetName = 'Representation'
kDefaultVariantName = 'MayaReference'
def defaultMayaReferencePrimName():
return kDefaultMayaReferencePrimName
def defaultVariantSetName():
return kDefaultVariantSetName
def defaultVariantName():
return kDefaultVariantName
class SetParentContext():
'''Simple context helper to go up one parent level when exiting.'''
def __init__(self, parent):
cmds.setParent(parent)
pass
def __enter__(self):
pass
def __exit__(self, mytype, value, tb):
cmds.setParent('..')
def pushOptionsUITemplate():
'''Standardize the look of the options UI.
Python translation of fileOptions.mel:pushOptionsUITemplate(),
which is not a global proc.
'''
if not cmds.uiTemplate('optionsTemplate', exists=True):
cmds.uiTemplate('optionsTemplate')
cmds.frameLayout(defineTemplate='optionsTemplate',
collapsable=True,
collapse=False,
labelVisible=True,
labelIndent=5,
marginWidth=5,
marginHeight=5)
cmds.columnLayout(defineTemplate='optionsTemplate',
adjustableColumn=True)
cmds.setUITemplate('optionsTemplate', pushTemplate=True)
| StarcoderdataPython |
119356 | from .guitar_spec import GuitarSpec, GuitarType, Wood, Builder
class Guitar:
serial_number: str
price: float
spec: GuitarSpec
def __init__(self, serial_number: str, price: float, spec: GuitarSpec) -> None:
self.serial_number = serial_number
self.price = price
self.spec = spec
# def main():
# g0 = Guitar()
# g = Guitar("001", 200.56, GuitarSpec("Zen", "V12", GuitarType.Electric, Wood.Oak, Wood.Olive))
| StarcoderdataPython |
1641938 | import logging
import os
import sys
logger = logging.getLogger(__name__)
from bert import constants, remote_webservice
logger.info(f'Starting service[{constants.SERVICE_NAME}] Daemon. Debug[{constants.DEBUG}]')
logger.info(f'Loading Service Module[{constants.SERVICE_MODULE}]')
if constants.SERVICE_MODULE is None:
raise NotImplementedError(f'Missing ENVVar[SERVICE_MODULE]')
remote_webservice.load_service_module(constants.SERVICE_MODULE)
MIDDLEWARE = remote_webservice.setup_service()
| StarcoderdataPython |
4800810 | <filename>codes/Baseline Model Example/scoring/matching.py
"""
Copyright 2018 Defense Innovation Unit Experimental
All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from collections import defaultdict
from rectangle import Rectangle
import numpy as np
class Matching(object):
"""Matching class."""
def __init__(self, groundtruth_rects, rects):
"""Constructs a Matching instance.
Args:
groundtruth_rects: a list of groundtruth rectangles.
rects: a list of rectangles to be matched against the groundtruth_rects.
Raises:
ValueError: if any item inside the groundtruth_rects or rects are not
Rectangle type.
"""
for rect in groundtruth_rects:
if not isinstance(rect, Rectangle):
raise ValueError('Invalid instance type: should be Rectangle.')
for rect in rects:
if not isinstance(rect, Rectangle):
raise ValueError('Invalid instance type: should be Rectangle.')
self.groundtruth_rects_ = groundtruth_rects
self.rects_ = rects
self._compute_iou_from_rectangle_pairs()
def _compute_iou_from_rectangle_pairs(self):
"""Computes the iou scores between all pairs of rectangles."""
#try to extract a matrix nx4 from rects
m = len(self.groundtruth_rects_)
n = len(self.rects_)
self.n = n
self.m = m
self.iou_rectangle_pair_indices_ = defaultdict(list)
if not(n == 0 or m == 0):
mat2 = np.array( [j.coords for j in self.groundtruth_rects_])
mat1 = np.array([j.coords for j in self.rects_])
#i,j axes correspond to #boxes, #coords per rect
#compute the areas
w1 = mat1[:,2] - mat1[:,0]
w2 = mat2[:,2] - mat2[:,0]
h1 = mat1[:,3] - mat1[:,1]
h2 = mat2[:,3] - mat2[:,1]
a1 = np.multiply(h1,w1)
a2 = np.multiply(h2,w2)
w_h_matrix = cartesian([a1,a2]).reshape((n,m,2))
a_matrix = w_h_matrix.sum(axis=2)
#now calculate the intersection rectangle
i_xmin = cartesian([mat1[:,0],mat2[:,0]]).reshape((n,m,2))
i_xmax = cartesian([mat1[:,2],mat2[:,2]]).reshape((n,m,2))
i_ymin = cartesian([mat1[:,1],mat2[:,1]]).reshape((n,m,2))
i_ymax = cartesian([mat1[:,3],mat2[:,3]]).reshape((n,m,2))
i_w = np.min(i_xmax,axis=2) - np.max(i_xmin,axis=2)
i_h = np.min(i_ymax,axis=2) - np.max(i_ymin,axis=2)
i_w[i_w < 0] = 0
i_h[i_h < 0] = 0
i_a_matrix = np.multiply(i_w,i_h)
iou_matrix = np.divide(i_a_matrix, (a_matrix - i_a_matrix))
self.iou_matrix = iou_matrix
else:
self.iou_matrix = np.zeros((n,m))
def greedy_match(self,iou_threshold):
gt_rects_matched = [False for gt_index in range(self.m)]
rects_matched = [False for r_index in range(self.n)]
if self.n == 0:
return [],[]
elif self.m == 0:
return rects_matched, []
for i,gt_index in enumerate(np.argmax(self.iou_matrix, axis=1)):
if self.iou_matrix[i, gt_index] >= iou_threshold:
if gt_rects_matched[gt_index] is False and rects_matched[i] is False:
rects_matched[i] = True
gt_rects_matched[gt_index] = True
return rects_matched, gt_rects_matched
def cartesian(arrays, out=None):
"""Generate a cartesian product of input arrays.
Parameters
----------
arrays : list of array-like
1-D arrays to form the cartesian product of.
out : ndarray
Array to place the cartesian product in.
Returns
-------
out : ndarray
2-D array of shape (M, len(arrays)) containing cartesian products
formed of input arrays.
"""
arrays = [np.asarray(x) for x in arrays]
shape = (len(x) for x in arrays)
dtype = arrays[0].dtype
ix = np.indices(shape)
ix = ix.reshape(len(arrays), -1).T
if out is None:
out = np.empty_like(ix, dtype=dtype)
for n, arr in enumerate(arrays):
out[:, n] = arrays[n][ix[:, n]]
return out | StarcoderdataPython |
1706536 | """
Identify the fewest combined steps the wires
must take to reach an intersection
"""
from part_01_solution import (
positions,
crossed_paths,
ORIGIN
)
# remove the origin from both wires
for wire in positions:
positions[wire].remove(ORIGIN)
# for each crossed path, sum the steps it took both wires to arrive there
steps = {"path": [], "steps": []}
for path in crossed_paths:
steps["path"].append(path)
steps["steps"].append(positions["wire_a"].index(path)
+ positions["wire_b"].index(path))
# find the minimum number of steps it took to reach one intersection
print(f"""
The min. number of steps to reach an intersection is {min(steps["steps"])}.
""")
| StarcoderdataPython |
1760376 | from _skelet_functions import *
def worst_case_strategy(guesses, answer, possibilities=None):
if len(guesses) < 1:
remaining_answers = create_list_of_combinations(COMBINATIONS, 4)
guess = remaining_answers[7]
feedback = feedback_calculate(answer, guess)
else:
remaining_answers = remove_impossible_guesses(guesses, possibilities)
index = calc_worst_guess(guesses, remaining_answers)
guess = remaining_answers[index]
feedback = feedback_calculate(answer, guess)
return guess, feedback, remaining_answers
def calc_worst_guess(guesses, possibilities):
"""
loop through the remaining possibilities. Count the amount with the same feedback or better.
return the index of the highest amount of occurences
!!doesn't work yet!!
:param guesses:
:param possibilities:
:return:
"""
last_guess = guesses[-1]['Q']
feedback = guesses[-1]['A']
feedback_and_occurences = {}
# Create list to count which options have the highest amount of possibilities
for possible in possibilities:
tmp_feedback = feedback_calculate(last_guess, possible)
tmp_feedback = str(tmp_feedback)
try:
feedback_and_occurences[tmp_feedback] = feedback_and_occurences[tmp_feedback] + 1
except KeyError:
feedback_and_occurences[tmp_feedback] = 1
worst_case_feedback = max(feedback_and_occurences, key=feedback_and_occurences.get)
for i, possible in enumerate(possibilities):
if worst_case_feedback == str(feedback_calculate(last_guess, possible)):
print(i)
return i
return 0
def remove_impossible_guesses(guesses, possibilities):
"""
Remove possibilities with written out rules
:param guesses:
:param possibilities:
:return:
"""
last_guess = guesses[-1]['Q']
feedback = guesses[-1]['A']
remaining_possibilities = []
for possible in possibilities:
if feedback == feedback_calculate(last_guess, possible):
if last_guess != possible:
remaining_possibilities.append(possible)
return remaining_possibilities
| StarcoderdataPython |
4812440 | import json
import time, datetime
import csv
import os
import preProcess
import dataVis
from pandas import DataFrame
from pandas import TimeGrouper
import pandas as pd
from matplotlib import pyplot
def readWholeCSV(docName):
Folder_Path = r'/Users/siqiyaoyao/git/python3/fnirs/fnirsAnalysis/dataset/'+ docName #要拼接的文件夹及其完整路径,注意不要包含中文
#修改当前工作目录
os.chdir(Folder_Path)
#将该文件夹下的所有文件名存入一个列表
file_list = os.listdir()
file_list.remove('.DS_Store')
print('doc list:', file_list)
return file_list
def getWholeParticipents(fileList):
allGroupData =[]
allDataSet =[]
for file in fileList:
seriesGroups,dataSet = dataVis.readDataFromcsv(file)
allGroupData.append(seriesGroups)
allDataSet.append(dataSet)
return allGroupData,allDataSet
def processPergroup(allSets,allParicipants):
l = len(allParicipants)
participants_label_arr = []
g1 = []
g2 = []
g3 =[]
g4 = []
g5 = []
g6 = []
g7 = []
g8 =[]
for i in range(0,l):# debug 1
indexList = dataVis.findGroupIndex(allParicipants[i].label)
group_label = dataVis.groupData(indexList,allSets[i]) # 0-19 0 group 1 time 2-19 channel
#normalizeData_devide(group_label)
g1.append(group_label[0]) # 0-19
g2.append(group_label[1])
g3.append(group_label[2])
g4.append(group_label[3])
g5.append(group_label[4])
g6.append(group_label[5])
g7.append(group_label[6])
g8.append(group_label[7])
#participants_label_arr.append(group_label)
return g1,g2,g3,g4,g5,g6,g7,g8
def normalizeData_devide(groups):
labelArr =[]
for index,data in enumerate(groups): # 8 groups, data [0] groupindex [1] time [2:19] channel
n = len(data)
groupIndex = data[0].mean()
print(n,data[0].mean())
# def normalizeData_score(data):
def plotPerGroup(group):
labels1 = DataFrame()
labels2 = DataFrame()
labels3 = DataFrame()
labels4 = DataFrame()
labels5 = DataFrame()
labels6 = DataFrame()
labels7 = DataFrame()
labels8 = DataFrame()
labels9 = DataFrame()
labels10 = DataFrame()
labels11 = DataFrame()
labels12 = DataFrame()
labels13 = DataFrame()
labels14= DataFrame()
labels15 = DataFrame()
labels16 = DataFrame()
labels17 = DataFrame()
labels18 = DataFrame()
for index,g in enumerate(group):
labels1[index] = g[2]
labels2[index] = g[3]
labels3[index] = g[4]
labels4[index] = g[5]
labels5[index] = g[6]
labels6[index] = g[7]
labels7[index] = g[8]
labels8[index] = g[9]
labels9[index] = g[10]
labels10[index] = g[11]
labels11[index] = g[12]
labels12[index] = g[13]
labels13[index] = g[14]
labels14[index] = g[15]
labels15[index] = g[16]
labels16[index] = g[17]
labels17[index] = g[18]
labels18[index] = g[19]
labels1.plot(subplots=False, legend=False)
labels2.plot(subplots=False, legend=False)
labels3.plot(subplots=False, legend=False)
labels4.plot(subplots=False, legend=False)
labels5.plot(subplots=False, legend=False)
labels6.plot(subplots=False, legend=False)
labels7.plot(subplots=False, legend=False)
labels8.plot(subplots=False, legend=False)
labels9.plot(subplots=False, legend=False)
labels10.plot(subplots=False, legend=False)
labels11.plot(subplots=False, legend=False)
labels12.plot(subplots=False, legend=False)
labels13.plot(subplots=False, legend=False)
labels14.plot(subplots=False, legend=False)
labels15.plot(subplots=False, legend=False)
labels16.plot(subplots=False, legend=False)
labels17.plot(subplots=False, legend=False)
labels18.plot(subplots=False, legend=False)
pyplot.show()
def main():
print('test')
fileList = readWholeCSV('finaldata')
#print(seriesGroups.groups[0],len(seriesGroups.groups))
allParicipants,allSets = getWholeParticipents(fileList)
g1,g2,g3,g4,g5,g6,g7,g8 = processPergroup(allSets,allParicipants)
#plotPerGroup(g1)
print(len(allParicipants))
#preProcess.classifyFdata()
if __name__ == "__main__":
main() | StarcoderdataPython |
90230 | <gh_stars>0
"""Tabular QL agent"""
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import framework
import utils
DEBUG = False
GAMMA = 0.5 # discounted factor
TRAINING_EP = 0.5 # epsilon-greedy parameter for training
TESTING_EP = 0.05 # epsilon-greedy parameter for testing
NUM_RUNS = 10
NUM_EPOCHS = 200
NUM_EPIS_TRAIN = 25 # number of episodes for training at each epoch
NUM_EPIS_TEST = 50 # number of episodes for testing
ALPHA = 0.1 # learning rate for training
ACTIONS = framework.get_actions()
OBJECTS = framework.get_objects()
NUM_ACTIONS = len(ACTIONS)
NUM_OBJECTS = len(OBJECTS)
# pragma: coderesponse template
def epsilon_greedy(state_1, state_2, q_func, epsilon):
"""Returns an action selected by an epsilon-Greedy exploration policy
Args:
state_1, state_2 (int, int): two indices describing the current state
q_func (np.ndarray): current Q-function
epsilon (float): the probability of choosing a random command
Returns:
(int, int): the indices describing the action/object to take
"""
action_index, object_index = None, None
# TODO Your code here
adventurous = (np.random.rand() <= epsilon) # and (epsilon != 0)
if adventurous:
action_index = np.random.randint(NUM_ACTIONS)
object_index = np.random.randint(NUM_OBJECTS)
else:
st = q_func[state_1, state_2, :, :]
action_index = st.max(axis=1).argmax()
object_index = st.max(axis=0).argmax()
return (action_index, object_index)
# pragma: coderesponse end
# pragma: coderesponse template
def tabular_q_learning(q_func, current_state_1, current_state_2, action_index,
object_index, reward, next_state_1, next_state_2,
terminal):
"""Update q_func for a given transition
Args:
q_func (np.ndarray): current Q-function
current_state_1, current_state_2 (int, int): two indices describing the current state
action_index (int): index of the current action
object_index (int): index of the current object
reward (float): the immediate reward the agent recieves from playing current command
next_state_1, next_state_2 (int, int): two indices describing the next state
terminal (bool): True if this episode is over
Returns:
None
"""
# TODO Your code here
old_Q = q_func[current_state_1, current_state_2, action_index, object_index]
if terminal:
Q = (1-ALPHA)*old_Q + ALPHA*reward
else:
Q = (1-ALPHA)*old_Q + ALPHA*(reward + GAMMA*q_func[next_state_1, next_state_2].max(axis=1).max(axis=0) )
q_func[current_state_1, current_state_2, action_index,
object_index] = Q # TODO Your update here
return None # This function shouldn't return anything
# pragma: coderesponse end
# pragma: coderesponse template
def run_episode(for_training):
""" Runs one episode
If for training, update Q function
If for testing, computes and return cumulative discounted reward
Args:
for_training (bool): True if for training
Returns:
None
"""
epsilon = TRAINING_EP if for_training else TESTING_EP
epi_reward = 0 # None
# initialize for each episode
# TODO Your code here
(current_room_desc, current_quest_desc, terminal) = framework.newGame()
t = 0
while not terminal:
current_state_1 = dict_room_desc[current_room_desc]
current_state_2 = dict_quest_desc[current_quest_desc]
# Choose next action and execute
# TODO Your code here
if for_training:
# update Q-function.
# TODO Your code here
ai, oi = epsilon_greedy(
state_1=current_state_1,
state_2=current_state_2,
q_func=q_func,
epsilon=TRAINING_EP)
# next_room_desc, next_quest_desc, reward, terminal = framework.step_game(
# current_room_desc,
# current_quest_desc,
# ai,
# oi)
# # epi_reward += (GAMMA**t) * reward
# next_state_1 = dict_room_desc[next_room_desc]
# next_state_2 = dict_quest_desc[next_quest_desc]
# tabular_q_learning(q_func,
# current_state_1=current_state_1,
# current_state_2=current_state_2,
# action_index=ai,
# object_index=oi,
# reward=epi_reward,
# next_state_1=next_state_1,
# next_state_2=next_state_2,
# terminal=terminal)
if not for_training:
# update reward
# TODO Your code here
ai, oi = epsilon_greedy(
state_1=current_state_1,
state_2=current_state_2,
q_func=q_func,
epsilon=TESTING_EP)
# next_room_desc, next_quest_desc, reward, terminal = framework.step_game(
# current_room_desc,
# current_quest_desc,
# ai,
# oi)
# epi_reward += (GAMMA**t) * reward
# prepare next step
# TODO Your code here
next_room_desc, next_quest_desc, reward, terminal = framework.step_game(
current_room_desc,
current_quest_desc,
ai,
oi)
epi_reward += (GAMMA**t) * reward
next_state_1 = dict_room_desc[next_room_desc]
next_state_2 = dict_quest_desc[next_quest_desc]
tabular_q_learning(q_func,
current_state_1=current_state_1,
current_state_2=current_state_2,
action_index=ai,
object_index=oi,
reward=epi_reward,
next_state_1=next_state_1,
next_state_2=next_state_2,
terminal=terminal)
current_room_desc = next_room_desc
current_quest_desc = next_quest_desc
t += 1
if not for_training:
return epi_reward
# pragma: coderesponse end
def run_epoch():
"""Runs one epoch and returns reward averaged over test episodes"""
rewards = []
for _ in range(NUM_EPIS_TRAIN):
run_episode(for_training=True)
for _ in range(NUM_EPIS_TEST):
rewards.append(run_episode(for_training=False))
return np.mean(np.array(rewards))
def run():
"""Returns array of test reward per epoch for one run"""
global q_func
q_func = np.zeros((NUM_ROOM_DESC, NUM_QUESTS, NUM_ACTIONS, NUM_OBJECTS))
single_run_epoch_rewards_test = []
pbar = tqdm(range(NUM_EPOCHS), ncols=80)
for _ in pbar:
# for _ in range(NUM_EPOCHS):
# rew = run_epoch()
# single_run_epoch_rewards_test.append(rew)
# print("EPOCH", _, ": ", rew,
# "Avg reward: {:0.6f} | Ewma reward: {:0.6f}".format(
# np.mean(single_run_epoch_rewards_test),
# utils.ewma(single_run_epoch_rewards_test))
# )
single_run_epoch_rewards_test.append(run_epoch())
pbar.set_description(
"Avg reward: {:0.6f} | Ewma reward: {:0.6f}".format(
np.mean(single_run_epoch_rewards_test),
utils.ewma(single_run_epoch_rewards_test)))
return single_run_epoch_rewards_test
if __name__ == '__main__':
# Data loading and build the dictionaries that use unique index for each state
(dict_room_desc, dict_quest_desc) = framework.make_all_states_index()
NUM_ROOM_DESC = len(dict_room_desc)
NUM_QUESTS = len(dict_quest_desc)
# set up the game
framework.load_game_data()
epoch_rewards_test = [] # shape NUM_RUNS * NUM_EPOCHS
for _ in range(NUM_RUNS):
print("==================== RUN ", _, "==========================")
epoch_rewards_test.append(run())
epoch_rewards_test = np.array(epoch_rewards_test)
x = np.arange(NUM_EPOCHS)
fig, axis = plt.subplots()
axis.plot(x, np.mean(epoch_rewards_test,
axis=0)) # plot reward per epoch averaged per run
axis.set_xlabel('Epochs')
axis.set_ylabel('reward')
axis.set_title(('Tablular: nRuns=%d, Epilon=%.2f, Epi=%d, alpha=%.4f' %
(NUM_RUNS, TRAINING_EP, NUM_EPIS_TRAIN, ALPHA)))
plt.show()
| StarcoderdataPython |
3289711 | <reponame>NEISSproject/tf_neiss
from trainer.trainer_base import TrainerBase
import model_fn.model_fn_nlp.model_fn_pos as models
import util.flags as flags
from input_fn.input_fn_nlp.input_fn_pos import InputFnPOS
# Model parameter
# ===============
flags.define_string('model_type', 'ModelPOS', 'Model Type to use choose from: ModelTriangle')
flags.define_string('tags', 'stts_tiger.txt', 'path to tag vocabulary')
flags.define_string('word_embeddings', '../../../data/word_embeddings/cc.de.300.bin', 'path to word embeddings')
flags.define_string('graph', 'KerasGraphFF3', 'class name of graph architecture')
flags.define_list('add_types', str, 'types that are add features int or float',
"", [])
flags.define_integer('buffer', 1,
'number of training samples hold in the cache. (effects shuffling)')
flags.define_boolean('predict_mode', False, 'If and only if true the prediction will be accomplished')
flags.FLAGS.parse_flags()
class TrainerPOS(TrainerBase):
def __init__(self):
super(TrainerPOS, self).__init__()
self._input_fn_generator = InputFnPOS(self._flags)
self._input_fn_generator.print_params()
self._params['num_tags'] = self._input_fn_generator.get_num_tags()
self._params['tags'] = self._input_fn_generator.getTagMapper()
self._model_class = getattr(models, self._flags.model_type)
# self._graph.info()
if __name__ == '__main__':
# logging.basicConfig(level=logging.INFO)
trainer = TrainerPOS()
trainer.train()
| StarcoderdataPython |
124478 | <reponame>franzihe/Python_Masterthesis<gh_stars>0
# coding: utf-8
# In[2]:
import sys
sys.path.append('/Volumes/SANDISK128/Documents/Thesis/Python/')
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
import colormaps as cmaps
import save_fig as SF
import datetime
from datetime import date
# In[3]:
### Define colorbar colors
champ = 255.
blue = np.array([1,74,159])/champ # for the date
vert_col = np.array([197,197,197])/champ # vertical line for day marker
# In[4]:
def dates_plt(time_ml):
dt = []
dd = []
dm = []
dy = []
for i in range(0,time_ml.shape[0],6):
dt.append(datetime.datetime.utcfromtimestamp(time_ml[i]).hour)
dd.append(datetime.datetime.utcfromtimestamp(time_ml[i]).day)
dm.append(datetime.datetime.utcfromtimestamp(time_ml[i]).month)
dy.append(datetime.datetime.utcfromtimestamp(time_ml[i]).year)
xt = []
t1 = '%s-%s-%s' %(dy[0],dm[0],dd[0])
xt.append(t1)
for i in range(1,4):
xt.append('%s' %dt[i])
t2 = '%s-%s-%s' %(dy[4],dm[4],dd[4])
xt.append(t2)
for i in range(5,8):
xt.append('%s' %dt[i])
if np.asarray(dt).size >8:
t3 = '%s-%s-%s' %(dy[8],dm[8],dd[8])
xt.append(t3)
elif np.asarray(dt).size >9:
for i in range(9,12):
xt.append('%s' %dt[i])
else:
xt
return(xt);
# def dates_plt_18(time_ml):
# dt = []
# dd = []
# dm = []
# dy = []
# for i in range(0,time_ml.shape[0],6):
# dt.append(datetime.datetime.utcfromtimestamp(time_ml[i]).hour)
# dd.append(datetime.datetime.utcfromtimestamp(time_ml[i]).day)
# dm.append(datetime.datetime.utcfromtimestamp(time_ml[i]).month)
# dy.append(datetime.datetime.utcfromtimestamp(time_ml[i]).year)
#
#
# xt = []
# for i in range(0,1):
# xt.append('%s' %dt[i])
# t1 = '%s-%s-%s' %(dy[1],dm[1],dd[1])
# xt.append(t1)
# for i in range(2,5):
# xt.append('%s' %dt[i])
# t2 = '%s-%s-%s' %(dy[5],dm[5],dd[5])
# xt.append(t2)
# for i in range(6,9):
# xt.append('%s' %dt[i])
# if np.asarray(dt).size >9:
# t3 = '%s-%s-%s' %(dy[9],dm[9],dd[9])
# xt.append(t3)
# elif np.asarray(dt).size >10:
# for i in range(10,12):
# xt.append('%s' %dt[i])
# else:
# xt
# return(xt);
def dates_plt_00(h_p00, m_p00, d_p00, y_p00, ini_day ):
xt = []
t1 = '%s-%s-%s' %(y_p00[0][ini_day-1], m_p00[0][ini_day-1], d_p00[0][ini_day-1])
xt.append(t1)
for i in range(6,24,6):
xt.append('%s' %h_p00[i][ini_day-1])
t2 = '%s-%s-%s' %(y_p00[0][ini_day], m_p00[0][ini_day], d_p00[0][ini_day])
xt.append(t2)
for i in range(6,24,6):
xt.append('%s' %h_p00[i][ini_day])
t3 = '%s-%s-%s' %(y_p00[0][ini_day+1], m_p00[0][ini_day+1], d_p00[0][ini_day+1])
xt.append(t3)
return(xt);
def dates_plt_18(h_p18, m_p18, d_p18, y_p18, ini_day):
xt = []
for i in range(0,1):
xt.append('%s' %h_p18[i][ini_day-1])
t1 = '%s-%s-%s' %(y_p18[6][ini_day-1], m_p18[6][ini_day-1], d_p18[6][ini_day-1])
xt.append(t1)
for i in range(12,24,6):
xt.append('%s' %h_p18[i][ini_day-1])
for i in range(0,1):
xt.append('%s' %h_p18[i][ini_day])
t2 = '%s-%s-%s' %(y_p18[6][ini_day], m_p18[6][ini_day], d_p18[6][ini_day])
xt.append(t2)
for i in range(12,24,6):
xt.append('%s' %h_p18[i][ini_day])
for i in range(0,1):
xt.append('%s' %h_p18[i][ini_day+1])
return(xt);
levels = np.arange(0,0.6,0.02) # snowfall amount not divided by thickness
#levels = np.arange(0,9.5,0.32) # snowfall amount divided by thickness
# In[ ]:
def plot_vertical_EM0_1(time, height,result, time_ml, var_name, unit, maxim, Xmax, title):
fig = plt.figure(figsize=(20.,14.15))
gs = GridSpec(2, 2)
# title
fig.suptitle(title, y=0.95, color =blue, fontsize = 26)
for ens_memb in range(0,2):
if len(result[ens_memb]) == 0:
continue
### first 2 ens_memb
ax0 = plt.subplot(gs[ens_memb, :])
im0 = ax0.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)
ax0.text(Xmax-0.5, Xmax+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 22,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax0.axis([time[ens_memb].min(), Xmax, height[ens_memb].min(), 3000.])
# ax0.yaxis.grid()
# Vertical line to show end of day
ax0.axvline(24,color = vert_col, linewidth = 3)
ax0.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
dates = dates_plt(time_ml)
yl = [0., '' , 1.0, '' , 2., '' , 3.]
# labels
ax0.set_xticks(np.arange(0,Xmax+1,6))
if ens_memb == 1:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='on',labelsize = 20)
ax0.set_xticklabels(dates, rotation = 25, fontsize = 20)
ax0.set_xlabel('time', fontsize = 22)
else:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='off',labelsize = 20)
ax0.set_ylabel('height [km]', fontsize = 22)
ax0.set_yticks(np.arange(0,3500.,500.))
ax0.set_yticklabels(yl, fontsize = 20)
plt.subplots_adjust(hspace = 0.08)
# Add Colorbar
cbaxes = fig.add_axes([0.14, 0.03, .75, .02] ) #[left, bottom, width, height]
cbar = plt.colorbar(im0, orientation = 'horizontal', cax=cbaxes)
cbar.ax.set_xlabel('%s %s %s' %(var_name[0], var_name[1], unit), fontsize = 22)
cbar.ax.tick_params(labelsize = 20)
# In[ ]:
def plot_vertical_EM0_9(time, height,result, time_ml, var_name, unit, maxim, title):
fig = plt.figure(figsize=(14.15,20.))
gs = GridSpec(6, 2)
# title
fig.suptitle(title,y=0.9, color =blue, fontsize = 20)
#levels = np.arange(0,np.nanmax(maxim),0.015)
### first 2 ens_memb
for ens_memb in range(0,2):
if len(result[ens_memb]) == 0:
continue
ax0 = plt.subplot(gs[ens_memb, :])
im0 = ax0.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)
ax0.text(time[ens_memb].max()-0.5, time[ens_memb].min()+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 20,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax0.axis([time[ens_memb].min(), time[ens_memb].max(), height[ens_memb].min(), 3000.])
# ax0.yaxis.grid()
# Vertical line to show end of day
ax0.axvline(24,color = vert_col, linewidth = 3)
ax0.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
dates = dates_plt(time_ml)
yl = [0., '' , 1.0, '' , 2., '' , 3.]
# labels
ax0.set_xticks(np.arange(0,time[ens_memb].max()+1,6))
if ens_memb == 1:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='on',labelsize = 16)
ax0.set_xticklabels(dates, rotation = 25, fontsize = 16)
# ax0.set_xlabel('time', fontsize = 20)
else:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='off',labelsize = 16)
ax0.set_ylabel('height [km]', fontsize = 20)
ax0.set_yticks(np.arange(0,3500.,500.))
ax0.set_yticklabels(yl, fontsize = 16)
plt.subplots_adjust(hspace = 0.5)
# Add Colorbar
cbaxes = fig.add_axes([0.14, 0.03, .75, .02] ) #[left, bottom, width, height]
cbar = plt.colorbar(im0, orientation = 'horizontal', cax=cbaxes)
cbar.ax.set_xlabel('%s %s %s' %(var_name[0], var_name[1], unit), fontsize = 20)
cbar.ax.tick_params(labelsize = 18)
pos = []
pos.append(0)
pos.append(0)
for i in range(2,6):
pos.append(i)
pos.append(i)
### left column:
for ens_memb in range(2,10,2):
if len(result[ens_memb]) == 0:
continue
ax2 = plt.subplot(gs[pos[ens_memb], :-1])
im2 = ax2.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)#, vmin=z_min, vmax=z_max)
ax2.text(time[ens_memb].max()-0.5, time[ens_memb].min()+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 20,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax2.axis([time[ens_memb].min(), time[ens_memb].max(), height[ens_memb].min(), 3000.])
# ax2.yaxis.grid()
# Vertical line to show end of day
ax2.axvline(24,color = vert_col, linewidth = 3)
ax2.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
if np.asarray(dates).size <= 8.:
dates2 = [dates[0], '', '','',dates[4], '', '','']
else:
dates2 = [dates[0], '', '','',dates[4], '', '','',dates[8]]
# labels
ax2.set_xticks(np.arange(0,time[ens_memb].max()+1,6))
ax2.set_ylabel('height [km]', fontsize = 20)
ax2.set_yticks(np.arange(0,3500.,500.))
ax2.set_yticklabels(yl, fontsize = 18)
if ens_memb == 8:
ax2.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='on', labelsize = 16)
ax2.set_xticklabels(dates2, rotation = 25, fontsize = 16)
ax2.set_xlabel('time', fontsize = 20)
else:
ax2.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='off', labelsize = 16)
# right column
for ens_memb in range(3,10,2):
if len(result[ens_memb]) == 0:
continue
ax3 = plt.subplot(gs[pos[ens_memb], -1:])
im2 = ax3.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)#, vmin=z_min, vmax=z_max)
ax3.text(time[ens_memb].max()-0.5, time[ens_memb].min()+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 20,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax3.axis([time[ens_memb].min(), time[ens_memb].max(), height[ens_memb].min(), 3000.])
# ax3.yaxis.grid()
# Vertical line to show end of day
ax3.axvline(24,color = vert_col, linewidth = 3)
ax3.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
# labels
ax3.set_xticks(np.arange(0,time[ens_memb].max()+1,6))
ax3.set_ylabel('height [km]', fontsize = 20)
ax3.set_yticks(np.arange(0,3500.,500.))
ax3.set_yticklabels(yl, fontsize = 18)
if ens_memb == 9:
ax3.tick_params(axis='both',which='both',bottom='on',top='off',left = 'off',labelbottom='on', labelleft = 'off',labelsize = 16)
ax3.set_xticklabels(dates2, rotation = 25, fontsize = 16)
ax3.set_xlabel('time', fontsize = 20)
else:
ax3.tick_params(axis='both',which='both',bottom='on',top='off',left = 'off',labelbottom='off', labelleft = 'off',labelsize = 16)
# In[ ]:
def plot_vertical_EM0_9_48h(time, height,result, time_ml, var_name, unit, maxim, Xmax, title):
fig = plt.figure(figsize=(14.15,20.))
gs = GridSpec(10, 2)
# title
fig.suptitle(title, y =0.9, color =blue, fontsize = 20)
# levels = np.arange(0,np.nanmax(maxim),0.015)
for ens_memb in range(0,10):
if len(result[ens_memb]) == 0:
continue
### first all ens_memb
ax0 = plt.subplot(gs[ens_memb, :])
im0 = ax0.contourf(time[ens_memb], np.transpose(height[ens_memb]), result[ens_memb].T, levels,cmap=cmaps.viridis)
ax0.text(Xmax-0.5, Xmax+50, 'EM%s' %(ens_memb), # x, y
verticalalignment = 'bottom', horizontalalignment='right',
#transform = ax0.transAxes,
color = blue, fontsize = 20,
bbox={'facecolor':'white','alpha':.8, 'pad':1})
# set the limits of the plot to the limits of the data
ax0.axis([time[ens_memb].min(), Xmax, height[ens_memb].min(), 3000.])
# ax0.yaxis.grid()
# Vertical line to show end of day
ax0.axvline(24,color = vert_col, linewidth = 3)
ax0.axvline(48,color = vert_col, linewidth = 3)
# label ticks for plotting
dates = dates_plt(time_ml)
yl = [0., '' , 1.0, '' , 2., '' , 3.]
# labels
ax0.set_xticks(np.arange(0,Xmax+1,6))
if ens_memb == 9:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='on',labelsize = 16)
ax0.set_xticklabels(dates, rotation = 25, fontsize = 16)
ax0.set_xlabel('time', fontsize = 20)
else:
ax0.tick_params(axis='both',which='both',bottom='on',top='off',labelbottom='off',labelsize = 16)
if ens_memb == 4:
plt.ylabel('height [km]', fontsize = 20)
# ax0.set_ylabel('height [km]', fontsize = 22)
ax0.set_yticks(np.arange(0,3500.,500.))
ax0.set_yticklabels(yl, fontsize = 16)
plt.subplots_adjust(hspace = 0.15)
# Add Colorbar
cbaxes = fig.add_axes([0.14, 0.03, .75, .02] ) #[left, bottom, width, height]
cbar = plt.colorbar(im0, orientation = 'horizontal', cax=cbaxes)
cbar.ax.set_xlabel('%s %s %s' %(var_name[0], var_name[1], unit), fontsize = 20)
cbar.ax.tick_params(labelsize = 18)
| StarcoderdataPython |
3333365 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
-------------------------------------------------------------------------------
@Name: dump_db_pickle.py
@Desc:
@Author: <EMAIL>
@Create: 2020.05.06 14:26
-------------------------------------------------------------------------------
@Change: 2020.05.06
-------------------------------------------------------------------------------
"""
import pickle
dbfile = open('people-file', 'rb')
db = pickle.load(dbfile)
for key in db:
print(key, '=>\n ', db[key])
print(db['sue']['name'])
| StarcoderdataPython |
1650449 | from setuptools import find_packages, setup
setup(
name="NIFR",
version="0.2.0",
author="<NAME>, <NAME>, <NAME>",
packages=find_packages(),
description="Null-sampling for Interpretable and Fair Representations",
python_requires=">=3.8",
package_data={"nifr": ["py.typed"]},
install_requires=[
"captum",
"EthicML",
"gitpython",
"numpy >= 1.15",
"pandas >= 0.24",
"scikit-image >= 0.14",
"scikit-learn >= 0.20",
"scipy >= 1.2.1",
"torch >= 1.2",
"torchvision >= 0.4.0",
"tqdm >= 4.31",
"typed-argument-parser == 1.4",
"typing-extensions >= 3.7.4",
"typing-inspect >= 0.5",
"wandb == 0.8.27",
],
)
| StarcoderdataPython |
3381138 | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fake transport for testing Switchboard."""
import copy
import time
from gazoo_device.switchboard import switchboard_process
from gazoo_device.switchboard import transport_properties
from gazoo_device.utility import multiprocessing_utils
EXCEPTION_MESSAGE = "Something bad happened during read"
def _produce_data(byte_rate, bytes_per_second, exit_flag,
generate_raw_log_lines, read_queue):
"""Generates dummy data to imitate reading from a device."""
byte_count = 0
delay = 0.0
line_count = 0
padding = 0
start_time = time.time()
while not exit_flag.is_set():
line_count += 1
if callable(generate_raw_log_lines):
raw_log_line = u"{}{}\n".format(generate_raw_log_lines(), "*" * padding)
else:
raw_log_line = u"{:08d}{}\n".format(line_count, "*" * padding)
byte_count += len(raw_log_line)
bytes_per_second.value = byte_count / (time.time() - start_time)
if bytes_per_second.value > byte_rate:
delay += 0.001
elif bytes_per_second.value < byte_rate:
if delay > 0.0:
delay -= 0.001
else:
padding += 1
time.sleep(delay)
read_queue.put(raw_log_line)
class FakeTransport:
"""Mock Switchboard transport class for testing."""
def __init__(self,
baudrate=115200,
generate_lines=False,
generate_raw_log_lines=None,
fail_open=False,
fail_read=False,
failure_message=EXCEPTION_MESSAGE,
write_read_func=None,
open_on_start=True,
read_only_if_raw_data_queue_enabled=False):
self.comms_address = "/some/serial/path"
self.bytes_per_second = multiprocessing_utils.get_context().Value("f", 0.0)
self.is_open_count = multiprocessing_utils.get_context().Value("i", 0)
self.open_count = multiprocessing_utils.get_context().Value("i", 0)
self.close_count = multiprocessing_utils.get_context().Value("i", 0)
self.read_size = multiprocessing_utils.get_context().Value("i", 0)
self.reads = multiprocessing_utils.get_context().Queue()
self.writes = multiprocessing_utils.get_context().Queue()
self._baudrate = baudrate
self._exit_flag = multiprocessing_utils.get_context().Event()
self._fail_open = fail_open
self._fail_read = fail_read
self._generate_lines = generate_lines
self._generate_raw_log_lines = generate_raw_log_lines
self._properties = {}
self._failure_message = failure_message
self._transport_open = multiprocessing_utils.get_context().Event()
self._write_read_func = write_read_func
self._properties[transport_properties.OPEN_ON_START] = open_on_start
# Note: if using read_only_if_raw_data_queue_enabled flag, your test must
# call bind_raw_data_enabled_method().
self._read_only_if_raw_data_queue_enabled = read_only_if_raw_data_queue_enabled
self._raw_data_queue_enabled_method = None
def __del__(self):
self.close()
def clear_open(self):
if hasattr(self, "_transport_open"):
self._transport_open.clear()
def close(self):
"""Releases resources used by the class."""
try:
if hasattr(self, "_exit_flag"):
self._exit_flag.set()
if hasattr(self, "_generator"):
self._generator.join()
if hasattr(self, "close_count"):
self.close_count.value += 1
if hasattr(self, "_transport_open"):
self.clear_open()
except IOError:
# Test probably failed and canceled the manager Event objects
pass
finally:
# Always manually delete any multiprocess manager attributes so python's
# garbage collector properly runs.
attrs = [
"_raw_data_queue_enabled_method", "bytes_per_second", "is_open_count",
"open_count", "close_count", "read_size", "reads", "writes",
"_properties", "_transport_open"
]
for attr in attrs:
if hasattr(self, attr):
delattr(self, attr)
# Always make "_exit_flag" last attribute to delete
if hasattr(self, "_exit_flag"):
delattr(self, "_exit_flag")
def bind_raw_data_enabled_method(self, transport_process):
"""Add a reference to raw_data_enabled() method of transport_process.
Args:
transport_process (TransportProcess): the transport process using
this fake transport. Required in order to be able to read only
when the raw data queue is enabled to avoid race conditions.
"""
self._raw_data_queue_enabled_method = transport_process.raw_data_enabled
def is_open(self):
result = False
try:
if hasattr(self, "is_open_count"):
self.is_open_count.value += 1
result = self._transport_open.is_set()
except IOError:
# Test probably failed and canceled the manager Event objects
pass
return result
def get_all_properties(self):
return copy.deepcopy(self._properties)
def get_property(self, key, value=None):
if hasattr(self, "_properties"):
if key in self._properties:
return self._properties[key]
return value
def get_property_list(self):
return list(self._properties.keys())
def set_property(self, key, value):
self._properties[key] = value
def open(self):
try:
self.open_count.value += 1
self.set_open()
except IOError:
# Test probably failed and canceled the manager Event objects
pass
if self._fail_open:
raise Exception(self._failure_message)
def set_open(self):
self._transport_open.set()
if self._generate_lines:
self._generator = multiprocessing_utils.get_context().Process(
target=_produce_data,
args=(self._baudrate / 10, self.bytes_per_second, self._exit_flag,
self._generate_raw_log_lines, self.reads))
self.daemon = True
self._generator.start()
def _should_read(self):
return (not self._read_only_if_raw_data_queue_enabled or
(self._read_only_if_raw_data_queue_enabled and
self._raw_data_queue_enabled_method is not None and
self._raw_data_queue_enabled_method()))
def read(self, size=1, timeout=None):
"""Reads from mock read queue or raises an error if fail_read is True."""
try:
self.read_size.value = size
except IOError:
# Test probably failed and canceled the manager Event objects
pass
if self._fail_read:
raise Exception(self._failure_message)
if self._should_read():
return switchboard_process.get_message(self.reads, timeout=timeout)
else:
return None
def write(self, data, timeout=None):
self.writes.put(data, timeout=timeout)
if self._write_read_func:
responses = self._write_read_func(data)
for response in responses:
self.reads.put(response)
def test_method(self, raise_error: bool = False) -> str:
"""A transport method which raises an error if raise_error=True."""
del self # Unused by the mock implementation
if raise_error:
raise RuntimeError("Something failed.")
return "Some return"
| StarcoderdataPython |
3264407 | <reponame>djbsmith/dmu_products
"""Script to generate the HELP homogenised SPIRE maps."""
from datetime import datetime
from itertools import product
import numpy as np
from astropy.io import fits
from astropy.table import Table
VERSION = "0.9"
spire_maps = Table.read("spire_maps.fits")
all_obsids = Table.read("spire_obsids.fits")
def get_band(filename):
if "250" in filename or "psw" in filename.lower():
return "250"
if "350" in filename or "pmw" in filename.lower():
return "350"
if "500" in filename or "plw" in filename.lower():
return "500"
raise ValueError(f"Can't identify the band in '{filename}'.")
# HerMES
hermes_maps = spire_maps[spire_maps['survey'] == "HerMES"]
for row in hermes_maps:
field = row['field']
filename = row['filename']
if "hers-helms-xmm" not in filename:
obsids = sorted(list(all_obsids['ObsID'][
all_obsids['field'] == field
]))
else:
obsids = sorted(list(all_obsids['ObsID'][
(
(all_obsids['field'] == field) |
(all_obsids['field'] == 'XMM-LSS')
) & (all_obsids['not_in_hs82'] == 0)
]))
assert len(obsids) > 0
orig_hdu_list = fits.open(f"../dmu19_HerMES/data/{filename}")
image_hdu = orig_hdu_list[1]
assert image_hdu.header['EXTNAME'] == "image"
image_hdu.header['EXTNAME'] = "IMAGE"
image_hdu.header['BUNIT'] = "Jy / beam"
if filename == 'hers-helms-xmm_itermap_20160623_PSW.fits':
# FIXME The Herschel-Strip-82 SPIRE250 map is corrupted; we can only
# access the image data.
error_hdu = fits.ImageHDU()
error_hdu.header['EXTNAME'] = "ERROR"
error_hdu.header.add_comment("The error map is not available.")
exposure_hdu = fits.ImageHDU()
exposure_hdu.header['EXTNAME'] = "EXPOSURE"
exposure_hdu.header.add_comment("The exposure map is not available.")
mask_hdu = fits.ImageHDU()
mask_hdu.header['EXTNAME'] = "MASK"
mask_hdu.header.add_comment("The mask map is not available.")
else:
error_hdu = orig_hdu_list[2]
assert error_hdu.header['EXTNAME'] == "error"
error_hdu.header['EXTNAME'] = "ERROR"
error_hdu.header['BUNIT'] = "Jy / beam"
exposure_hdu = orig_hdu_list[3]
assert exposure_hdu.header['EXTNAME'] == "exposure"
exposure_hdu.header['EXTNAME'] = "EXPOSURE"
exposure_hdu.header['BUNIT'] = "s"
mask_hdu = orig_hdu_list[4]
if "hers-helms-xmm" not in filename and "SSDF" not in filename:
assert mask_hdu.header['EXTNAME'] == "flag"
else:
assert mask_hdu.header['EXTNAME'] == "mask"
# The Herschel-Stripe-82 and SSDF masks are 0 for bad and 1 for
# good while we use the reverse for HELP
good_mask = mask_hdu.data == 1
bad_mask = mask_hdu.data == 0
mask_hdu.data[good_mask] = 0
mask_hdu.data[bad_mask] = 1
mask_hdu.header['EXTNAME'] = "MASK"
nebfilt_map_name = filename.replace(".fits", "_nebfiltered.fits")
nebfilt_map = fits.open(
f"../dmu19_nebular_filtered_maps/data/{nebfilt_map_name}")
nebfilt_hdu = fits.ImageHDU(header=nebfilt_map[0].header,
data=nebfilt_map[0].data)
nebfilt_hdu.header['EXTNAME'] = "NEBFILT"
nebfilt_hdu.header['BUNIT'] = "Jy / beam"
primary_hdu = fits.PrimaryHDU()
primary_hdu.header.append((
"CREATOR", "Herschel Extragalactic Legacy Project"
))
primary_hdu.header.append((
"TIMESYS", "UTC", "All dates are in UTC time"
))
primary_hdu.header.append((
"DATE", datetime.now().replace(microsecond=0).isoformat(),
"Date of file creation"
))
primary_hdu.header.append((
"VERSION", VERSION, "HELP product version"
))
primary_hdu.header.append((
"TELESCOP", "Herschel", "Name of the telescope"
))
primary_hdu.header.append((
"INSTRUME", "SPIRE", "Name of the instrument"
))
primary_hdu.header.append((
"FILTER", f"SPIRE-{get_band(filename)}", "Name of the filter"
))
primary_hdu.header.append((
"FIELD", field, "Name of the HELP field"
))
for idx, obs_id in enumerate(obsids):
keyword = "OBSID" + str(idx).zfill(3)
primary_hdu.header.append((keyword, obs_id))
if "hers-helms-xmm" in filename:
primary_hdu.header.add_comment(
"These maps also contain some observations on the XMM-LSS field.")
hdu_list = fits.HDUList([primary_hdu, image_hdu, nebfilt_hdu, error_hdu,
exposure_hdu, mask_hdu])
hdu_list.writeto(f"data/{field}_SPIRE{get_band(filename)}_v{VERSION}.fits",
checksum=True)
print(f"{field} / {get_band(filename)} processed...")
# AKARI-NEP
akarinep_maps = spire_maps[spire_maps['survey'] == "AKARI-NEP"]
for row in akarinep_maps:
field = row['field']
filename = row['filename']
obsids = sorted(list(all_obsids['ObsID'][all_obsids['field'] == field]))
assert len(obsids) > 0
orig_hdu_list = fits.open(f"../dmu19_AKARI-NEP/data/{filename}")
image_hdu = orig_hdu_list[1]
assert image_hdu.header['EXTNAME'] == "image"
image_hdu.header['EXTNAME'] = "IMAGE"
image_hdu.header['BUNIT'] = "Jy / beam"
error_hdu = orig_hdu_list[2]
assert error_hdu.header['EXTNAME'] == "error"
error_hdu.header['EXTNAME'] = "ERROR"
error_hdu.header['BUNIT'] = "Jy / beam"
# The maps contain the coverage and where observed in normal mode.
exposure_hdu = orig_hdu_list[3]
assert exposure_hdu.header['EXTNAME'] == "coverage"
exposure_hdu.data *= 1 / 18.6 # Conversion of coverage to exposure
exposure_hdu.header['EXTNAME'] = "EXPOSURE"
exposure_hdu.header['BUNIT'] = "s"
exposure_hdu.header['QTTY____'] = "s"
# The maps do not contain mask maps.
mask_hdu = fits.ImageHDU()
mask_hdu.header['EXTNAME'] = "MASK"
mask_hdu.header.add_comment("The mask map is not available.")
nebfilt_map_name = filename.replace(".fits", "_nebfiltered.fits")
nebfilt_map = fits.open(
f"../dmu19_nebular_filtered_maps/data/AKARI-NEP_{nebfilt_map_name}")
nebfilt_hdu = fits.ImageHDU(header=nebfilt_map[0].header,
data=nebfilt_map[0].data)
nebfilt_hdu.header['EXTNAME'] = "NEBFILT"
nebfilt_hdu.header['BUNIT'] = "Jy / beam"
primary_hdu = fits.PrimaryHDU()
primary_hdu.header.append((
"CREATOR", "Herschel Extragalactic Legacy Project"
))
primary_hdu.header.append((
"TIMESYS", "UTC", "All dates are in UTC time"
))
primary_hdu.header.append((
"DATE", datetime.now().replace(microsecond=0).isoformat(),
"Date of file creation"
))
primary_hdu.header.append((
"VERSION", VERSION, "HELP product version"
))
primary_hdu.header.append((
"TELESCOP", "Herschel", "Name of the telescope"
))
primary_hdu.header.append((
"INSTRUME", "SPIRE", "Name of the instrument"
))
primary_hdu.header.append((
"FILTER", f"SPIRE-{get_band(filename)}", "Name of the filter"
))
primary_hdu.header.append((
"FIELD", field, "Name of the HELP field"
))
for idx, obs_id in enumerate(obsids):
keyword = "OBSID" + str(idx).zfill(3)
primary_hdu.header.append((keyword, obs_id))
hdu_list = fits.HDUList([primary_hdu, image_hdu, nebfilt_hdu, error_hdu,
exposure_hdu, mask_hdu])
hdu_list.writeto(f"data/{field}_SPIRE{get_band(filename)}_v{VERSION}.fits",
checksum=True)
print(f"{field} / {get_band(filename)} processed...")
# SPIRE-NEP
spirenep_maps = spire_maps[spire_maps['survey'] == "SPIRE-NEP"]
for row in spirenep_maps:
field = row['field']
filename = row['filename']
obsids = sorted(list(all_obsids['ObsID'][all_obsids['field'] == field]))
assert len(obsids) > 0
orig_hdu_list = fits.open(
f"../dmu19_SPIRE-NEP-calibration/data/{filename}")
image_hdu = orig_hdu_list[1]
assert image_hdu.header['EXTNAME'] == "image"
image_hdu.header['EXTNAME'] = "IMAGE"
image_hdu.header['BUNIT'] = "Jy / beam"
mask_hdu = orig_hdu_list[2]
assert mask_hdu.header['EXTNAME'] == "flag"
mask_hdu.header['EXTNAME'] = "MASK"
# The maps contain the coverage and where observed in normal mode.
exposure_hdu = orig_hdu_list[3]
assert exposure_hdu.header['EXTNAME'] == "coverage"
exposure_hdu.data *= 1 / 18.6 # Conversion of coverage to exposure
exposure_hdu.header['EXTNAME'] = "EXPOSURE"
exposure_hdu.header['BUNIT'] = "s"
exposure_hdu.header['QTTY____'] = "s"
error_hdu = orig_hdu_list[4]
assert error_hdu.header['EXTNAME'] == "error"
error_hdu.header['EXTNAME'] = "ERROR"
error_hdu.header['BUNIT'] = "Jy / beam"
nebfilt_map_name = filename.replace(".fits", "_nebfiltered.fits")
nebfilt_map = fits.open(
f"../dmu19_nebular_filtered_maps/data/SPIRE-NEP_{nebfilt_map_name}")
nebfilt_hdu = fits.ImageHDU(header=nebfilt_map[0].header,
data=nebfilt_map[0].data)
nebfilt_hdu.header['EXTNAME'] = "NEBFILT"
nebfilt_hdu.header['BUNIT'] = "Jy / beam"
primary_hdu = fits.PrimaryHDU()
primary_hdu.header.append((
"CREATOR", "Herschel Extragalactic Legacy Project"
))
primary_hdu.header.append((
"TIMESYS", "UTC", "All dates are in UTC time"
))
primary_hdu.header.append((
"DATE", datetime.now().replace(microsecond=0).isoformat(),
"Date of file creation"
))
primary_hdu.header.append((
"VERSION", VERSION, "HELP product version"
))
primary_hdu.header.append((
"TELESCOP", "Herschel", "Name of the telescope"
))
primary_hdu.header.append((
"INSTRUME", "SPIRE", "Name of the instrument"
))
primary_hdu.header.append((
"FILTER", f"SPIRE-{get_band(filename)}", "Name of the filter"
))
primary_hdu.header.append((
"FIELD", field, "Name of the HELP field"
))
for idx, obs_id in enumerate(obsids):
keyword = "OBSID" + str(idx).zfill(3)
primary_hdu.header.append((keyword, obs_id))
hdu_list = fits.HDUList([primary_hdu, image_hdu, nebfilt_hdu, error_hdu,
exposure_hdu, mask_hdu])
hdu_list.writeto(f"data/{field}_SPIRE{get_band(filename)}_v{VERSION}.fits",
checksum=True)
print(f"{field} / {get_band(filename)} processed...")
# H-ATLAS
hatlas_field_basenames = {
"GAMA-09": "HATLAS_GAMA9_DR1_",
"GAMA-12": "HATLAS_GAMA12_DR1_",
"GAMA-15": "HATLAS_GAMA15_DR1_",
"HATLAS-NGP": "HATLAS_NGP_DR2_",
"HATLAS-SGP": "HATLAS_SGP_DR2_",
}
hatlas_coverage_maps = {
"GAMA-12_500": "GAMA12-PLWmap-mosaic-20141007_coverage.fits",
"GAMA-12_350": "GAMA12-PMWmap-mosaic-20141007_coverage.fits",
"GAMA-12_250": "GAMA12-PSWmap-mosaic-20141007_coverage.fits",
"GAMA-15_500": "GAMA15-PLWmap-mosaic-20130218_coverage.fits",
"GAMA-15_350": "GAMA15-PMWmap-mosaic-20130218_coverage.fits",
"GAMA-15_250": "GAMA15-PSWmap-mosaic-20130218_coverage.fits",
"GAMA-09_500": "GAMA9-PLWmap-mosaic-20130218_coverage.fits",
"GAMA-09_350": "GAMA9-PMWmap-mosaic-20130218_coverage.fits",
"GAMA-09_250": "GAMA9-PSWmap-mosaic-20130218_coverage.fits",
"HATLAS-NGP_500": "NGP-PLWmap-mosaic_MS-20131121-full_coverage.fits",
"HATLAS-NGP_350": "NGP-PMWmap-mosaic_MS-20131121-full_coverage.fits",
"HATLAS-NGP_250": "NGP-PSWmap-mosaic_MS-20131121-full_coverage.fits",
"HATLAS-SGP_500": "SGP-PLWmap-mosaic_MS-full_coverage.fits",
"HATLAS-SGP_350": "SGP-PMWmap-mosaic_MS-full_coverage.fits",
"HATLAS-SGP_250": "SGP-PSWmap-mosaic_MS-full_coverage.fits",
}
for field, band in product(hatlas_field_basenames, ["250", "350", "500"]):
basename = hatlas_field_basenames[field]
obsids = sorted(list(all_obsids['ObsID'][all_obsids['field'] == field]))
assert len(obsids) > 0
image_map = fits.open(f"../dmu19_HATLAS/data/{basename}RAW{band}.FITS")
image_hdu = fits.ImageHDU(header=image_map[0].header,
data=image_map[0].data)
image_hdu.data -= np.nanmean(image_hdu.data) # Background substration
image_hdu.header['EXTNAME'] = "IMAGE"
image_hdu.header['BUNIT'] = "Jy / beam"
mask_map = fits.open(f"../dmu19_HATLAS/data/{basename}MASK{band}.FITS")
mask_hdu = fits.ImageHDU(header=mask_map[0].header,
data=mask_map[0].data)
# H-ATLAS mask is 0 for bad and 1 for good while we use the reverse for
# HELP
good_mask = mask_hdu.data == 1
bad_mask = mask_hdu.data == 0
mask_hdu.data[good_mask] = 0
mask_hdu.data[bad_mask] = 1
mask_hdu.header['EXTNAME'] = "MASK"
# The maps contain the coverage and where observed in parallel mode.
coverage_map_name = hatlas_coverage_maps[f"{field}_{band}"]
exposure_map = fits.open(
f"../dmu19_HATLAS/data/coverages/{coverage_map_name}")
exposure_hdu = exposure_map[1]
assert exposure_hdu.header['EXTNAME'] == "coverage"
exposure_hdu.data *= 1 / 10. # Conversion of coverage to exposure
exposure_hdu.header['EXTNAME'] = "EXPOSURE"
exposure_hdu.header['BUNIT'] = "s"
exposure_hdu.header['QTTY____'] = "s"
error_map = fits.open(f"../dmu19_HATLAS/data/{basename}SIGMA{band}.FITS")
error_hdu = fits.ImageHDU(header=error_map[0].header,
data=error_map[0].data)
error_hdu.header['EXTNAME'] = "ERROR"
error_hdu.header['BUNIT'] = "Jy / beam"
nebfilt_map = fits.open(f"../dmu19_HATLAS/data/{basename}BACKSUB{band}.FITS")
nebfilt_hdu = fits.ImageHDU(header=nebfilt_map[0].header,
data=nebfilt_map[0].data)
nebfilt_hdu.header['EXTNAME'] = "NEBFILT"
nebfilt_hdu.header['BUNIT'] = "Jy / beam"
primary_hdu = fits.PrimaryHDU()
primary_hdu.header.append((
"CREATOR", "Herschel Extragalactic Legacy Project"
))
primary_hdu.header.append((
"TIMESYS", "UTC", "All dates are in UTC time"
))
primary_hdu.header.append((
"DATE", datetime.now().replace(microsecond=0).isoformat(),
"Date of file creation"
))
primary_hdu.header.append((
"VERSION", VERSION, "HELP product version"
))
primary_hdu.header.append((
"TELESCOP", "Herschel", "Name of the telescope"
))
primary_hdu.header.append((
"INSTRUME", "SPIRE", "Name of the instrument"
))
primary_hdu.header.append((
"FILTER", f"SPIRE-{band}", "Name of the filter"
))
primary_hdu.header.append((
"FIELD", field, "Name of the HELP field"
))
for idx, obs_id in enumerate(obsids):
keyword = "OBSID" + str(idx).zfill(3)
primary_hdu.header.append((keyword, obs_id))
hdu_list = fits.HDUList([primary_hdu, image_hdu, nebfilt_hdu, error_hdu,
exposure_hdu, mask_hdu])
hdu_list.writeto(f"data/{field}_SPIRE{band}_v{VERSION}.fits",
checksum=True)
print(f"{field} / {band} processed...")
| StarcoderdataPython |
3336236 | <reponame>cuauv/software
#!/usr/bin/env python3
''' Run this script to increase laod on a system (useful for stress-testing cpu heating)
Credits to Stackoverflow for the multiprocessing code:
http://stackoverflow.com/questions/1408356/keyboard-interrupts-with-pythons-multiprocessing-pool
'''
import multiprocessing
import sys
class KeyboardInterruptError(Exception): pass
def load_function(x):
try:
while True:
pass
except KeyboardInterrupt:
raise KeyboardInterruptError()
def spawn_processes(n):
p = multiprocessing.Pool(processes=n)
try:
print('starting the pool map')
p.map(load_function, range(10))
p.close()
print('pool map complete')
except KeyboardInterrupt:
print('got ^C while pool mapping, terminating the pool')
p.terminate()
print('pool is terminated')
except Exception as e:
print('got exception: "{}", terminating the pool'.format(e))
p.terminate()
print('pool is terminated')
finally:
print('joining pool processes')
p.join()
print('join complete')
print('All processes stopped')
if __name__ == '__main__':
number_of_infinite_loops = int(sys.argv[1]) if len(sys.argv) > 1 else multiprocessing.cpu_count()
print("Starting cpu test with {} processes...".format(number_of_infinite_loops))
spawn_processes(number_of_infinite_loops)
| StarcoderdataPython |
1760458 | from enum import Enum
# 2806
class Aplicacao(Enum):
E_HEALTH_1 = (1, 'Monitoramento de Saúde', 1.0, 0.3, 30.0, 1.15, 1.0)
E_HEALTH_2 = (2, 'Telemedicina', 25.0, 1.0, 30.0, 1.15, 0.4)
E_HEALTH_3 = (3, 'Navegação Web', 2.0, 1.0, 30.0, 1.15, 0.5)
E_LEARNING_4 = (4, 'EaD', 13.9, 1.0, 30.0, 1.15, 0.16)
E_LEARNING_5 = (5, 'Navegação Web', 2.0, 1.0, 30.0, 1.15, 0.16)
E_GOV_6 = (6, 'Monitoramento de Vias', 8.0, 1.0, 30.0, 1.15, 1.0)
E_GOV_7 = (7, 'Navegação Web', 2.0, 1.0, 30.0, 1.15, 0.16)
E_FINANCE_8 = (8, 'Caixas Eletrônicos', 2.0, 1.0, 30.0, 1.15, 0.16)
E_FINANCE_9 = (9, 'Aplicações Financeiras', 2.0, 1.0, 30.0, 1.15, 0.16)
IOT_10 = (10, 'Smart water measurement', 0.1, 1.0, 15.0, 0.55, 1.0)
IOT_11 = (11, 'Smart electricity measurement', 0.1, 1.0, 15.0, 0.55, 1.0)
IOT_12 = (12, 'GPS Tracking', 0.1, 1.0, 15.0, 0.55, 1.0)
'''
E_HEALTH_1 = (1, 'Monitoramento de Saúde', 1.0, 0.3, 15.0, 0.55, 1.0)
E_HEALTH_2 = (2, 'Telemedicina', 25.0, 1.0, 15.0, 0.55, 0.4)
E_HEALTH_3 = (3, 'Navegação Web', 2.0, 1.0, 15.0, 0.55, 0.5)
E_LEARNING_4 = (4, 'EaD', 13.9, 1.0, 15.0, 0.55, 0.16)
E_LEARNING_5 = (5, 'Navegação Web', 2.0, 1.0, 15.0, 0.55, 0.16)
E_GOV_6 = (6, 'Monitoramento de Vias', 8.0, 1.0, 15.0, 0.55, 1.0)
E_GOV_7 = (7, 'Navegação Web', 2.0, 1.0, 15.0, 0.55, 0.16)
E_FINANCE_8 = (8, 'Caixas Eletrônicos', 2.0, 1.0, 10.0, 0.55, 0.16)
E_FINANCE_9 = (9, 'Aplicações Financeiras', 2.0, 1.0, 10.0, 0.55, 0.16)
IOT_10 = (10, 'Smart water measurement', 0.1, 1.0, 15.0, 0.55, 1.0)
IOT_11 = (11, 'Smart electricity measurement', 0.1, 1.0, 15.0, 0.55, 1.0)
IOT_12 = (12, 'GPS Tracking', 0.1, 1.0, 15.0, 0.45, 1.0)
'''
def __init__(self, id_, nome, vazao, mu, beta, gamma, alpha):
self.id = id_
self.nome = nome
self.vazao = vazao
self.mu = mu
self.beta = beta
self.gamma = gamma
self.alpha = alpha
| StarcoderdataPython |
69951 | <filename>ModernArchitecturesFromScratch/basic_operations_01.py<gh_stars>0
# AUTOGENERATED! DO NOT EDIT! File to edit: basic_operations.ipynb (unless otherwise specified).
__all__ = ['MNIST_URL', 'Path', 'set_trace', 'datasets', 'pickle', 'gzip', 'math', 'torch', 'tensor', 'random', 'pdb',
'show_doc', 'is_equal', 'near', 'test_near', 'test_near_zero', 'normalize', 'get_mnist', 'get_stats',
'show_im', 'matmul']
# Cell
#hide
from pathlib import Path
from IPython.core.debugger import set_trace
from fastai import datasets
import pickle, gzip, math, torch, matplotlib as mpl
import matplotlib.pyplot as plt
from nbdev.showdoc import show_doc
#the only torch import we will have in the project
from torch import tensor
import random
import pdb
MNIST_URL='http://deeplearning.net/data/mnist/mnist.pkl'
# Cell
def is_equal(a,b):
"Test for equality between `a` and `b`"
assert(a==b)
# Cell
#hide
def near(a,b):
"Test if tensors `a` and `b` are the same within a small tolerance"
return torch.allclose(a, b, rtol=1e-3, atol=1e-5)
# Cell
def test_near(a,b):
"Test if tensors `a` and `b` are near within a small tolerance"
if near(a,b) == True:
print("good")
else:
print("not near")
# Cell
def test_near_zero(data, tol=1e-3):
"Tests if tensor values are near zero under given `tol`"
assert data.abs() < tol; print(f'Near zero: {data}')
# Cell
def normalize(datasets, mean=None, std=None):
"Normalizes according to given 'mean' and 'std' or mean of std of datasets if none given"
if mean is None: mean = datasets.mean()
if std is None: std = datasets.std()
return (datasets - mean) / std
# Cell
def get_mnist():
"Helper function to load `normalized` train and validation MNIST datasets"
path = datasets.download_data(MNIST_URL, ext='.gz')
with gzip.open(path, 'rb') as f:
((x_train, y_train), (x_valid, y_valid), _) = pickle.load(f, encoding='latin-1')
xt,yt,xv,yv = map(tensor, (x_train, y_train, x_valid, y_valid))
return normalize(xt).float(), yt.float(), normalize(xv, xt.mean(), xt.std()).float(), yv.float()
# Cell
def get_stats(data):
"Print mean and standard deviation of given `data`"
print (f'Mean: {data.mean()}')
print (f'Std: {data.std()}')
# Cell
#hide
mpl.rcParams['image.cmap'] = 'gray'
# Cell
def show_im(image, size=28):
"Displays 'image' or random 'image' from set if multiple given of given 'size'"
im_size = image.flatten().shape[0]
if im_size > size**2:
image = image[random.randint(0,math.floor((im_size-1)/size**2))]
plt.imshow(image.view(size,size))
# Cell
def matmul(a, b):
"Perform matrix multiplication on `a` and `b`"
ar, ac = a.shape
br, bc = b.shape
assert (ac == br)
c = torch.zeros(ar,bc)
for ar_in in range(ar):
c[ar_in] += (a[ar_in].unsqueeze(-1) * b).sum(dim=0)
return c | StarcoderdataPython |
3381551 | <filename>src/mOps/core/number_generators.py
import random
rangeGenerator = lambda start=0, size=10, skip=1: (x for x in range(start, size, skip))
randomGenerator = lambda size=10, min=10, max=100: (x for x in random.sample(range(min, max), size))
def isPrime(n):
if (n <= 1 or n % 1 > 0):
return False
for i in range(2, n // 2):
if (n % i == 0):
return False
return True
def isEven(n):
return n % 2 == 0
def isOdd(n):
return n % 2 == 1
def isWhole(n):
return int(n) == n
def isPositiveInt(n):
return isWhole(n) and n > 0
def isIn(n,s):
return n in s
def isNotIn(n,s):
return n not in s | StarcoderdataPython |
4825156 | <filename>affordable_water/settings/testing.py
import os
# pylint:disable=unused-wildcard-import
from affordable_water.settings.base import * # noqa: F401
SECRET_KEY = os.getenv('SECRET_KEY')
DEBUG = False
DEBUG_PROPAGRATE_EXCEPTIONS = True
ALLOWED_HOSTS = [
'127.0.0.1',
'localhost',
'testserver'
]
SECURE_SSL_REDIRECT = False
CSRF_COOKIE_SECURE = False
SESSION_COOKIE_SECURE = False
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.template.context_processors.i18n',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
'debug': False
},
},
] | StarcoderdataPython |
1694946 | <reponame>aolney/dgm_latent_bow
"""The matching model for measuring semantic similarity
based on:
https://github.com/airalcorn2/Deep-Semantic-Similarity-Model/blob/master/deep_semantic_similarity_keras.py
and the MSR paper:
A Latent Semantic Model with Convolutional-Pooling Structure for Information
Retrieval
<NAME>, Columbia University
<EMAIL>
APR 13RD 2019
"""
import numpy as np
import tensorflow as tf
| StarcoderdataPython |
148696 | <reponame>CyberZHG/keras-embed-sim
from .embeddings import *
__version__ = '0.9.0'
| StarcoderdataPython |
3370898 | #!/usr/local/CyberCP/bin/python
import socket
import sys
sys.path.append('/usr/local/CyberCP')
from plogical.CyberCPLogFileWriter import CyberCPLogFileWriter as logging
import argparse
from plogical.mailUtilities import mailUtilities
class cacheClient:
cleaningPath = '/home/cyberpanel/purgeCache'
@staticmethod
def handleCachePurgeRequest(command):
try:
mailUtilities.checkHome()
writeToFile = open(cacheClient.cleaningPath, 'w')
writeToFile.write(command)
writeToFile.close()
except BaseException as msg:
logging.writeToFile(str(msg) + ' [cacheClient.handleCachePurgeRequest]')
def main():
parser = argparse.ArgumentParser(description='CyberPanel Email Policy Cache Cleaner')
parser.add_argument('function', help='Specific a function to call!')
args = parser.parse_args()
if args.function == "hourlyCleanup":
command = 'cyberpanelCleaner hourlyCleanup'
cacheClient.handleCachePurgeRequest(command)
elif args.function == 'monthlyCleanup':
command = 'cyberpanelCleaner monthlyCleanup'
cacheClient.handleCachePurgeRequest(command)
if __name__ == "__main__":
main() | StarcoderdataPython |
7923 | <reponame>python-itb/knn-from-scratch
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 13 18:52:28 2018
@author: amajidsinar
"""
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
plt.style.use('seaborn-white')
iris = datasets.load_iris()
dataset = iris.data
# only take 0th and 1th column for X
data_known = iris.data[:,:2]
# y
label_known = iris.target
# the hard part
# so matplotlib does not readily support labeling based on class
# but we know that one of the feature of plt is that a plt call would give those set of number
# the same color
category = np.unique(label_known)
for i in category:
plt.scatter(data_known[label_known==i][:,0],data_known[label_known==i][:,1],label=i)
# Unknown class of a data
data_unknown = np.array([[5.7,3.3],[5.6,3.4],[6.4,3],[8.2,2.2]])
plt.scatter(data_unknown[:,0],data_unknown[:,1], label='?')
plt.legend()
#-------------
# Euclidean Distance
diff = data_known - data_unknown.reshape(data_unknown.shape[0],1,data_unknown.shape[1])
distance = (diff**2).sum(2)
#return sorted index of distance
dist_index = np.argsort(distance)
label = label_known[dist_index]
#for k in [1,2,3,4,5,6,7,8,9,10]:
#keep the rank
k = 10
label = label[:,:k]
label_predict = []
for i in range(data_unknown.shape[0]):
values,counts = np.unique(label[i], return_counts=True)
ind = np.argmax(counts)
label_predict.append(values[ind])
| StarcoderdataPython |
1757601 | # generated from catkin/cmake/template/pkg.context.pc.in
CATKIN_PACKAGE_PREFIX = ""
PROJECT_PKG_CONFIG_INCLUDE_DIRS = "/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/urg_node/include;/xavier_ssd/TrekBot/TrekBot2_WS/src/urg_node/include".split(';') if "/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/urg_node/include;/xavier_ssd/TrekBot/TrekBot2_WS/src/urg_node/include" != "" else []
PROJECT_CATKIN_DEPENDS = "dynamic_reconfigure;laser_proc;message_runtime;nodelet;rosconsole;roscpp;sensor_msgs;std_msgs;std_srvs;urg_c".replace(';', ' ')
PKG_CONFIG_LIBRARIES_WITH_PREFIX = "-lurg_c_wrapper;-lurg_node_driver".split(';') if "-lurg_c_wrapper;-lurg_node_driver" != "" else []
PROJECT_NAME = "urg_node"
PROJECT_SPACE_DIR = "/xavier_ssd/TrekBot/TrekBot2_WS/devel/.private/urg_node"
PROJECT_VERSION = "0.1.11"
| StarcoderdataPython |
3221213 | <gh_stars>0
import time
import tensorflow as tf
from model import evaluate
from model import srgan
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.losses import BinaryCrossentropy
from tensorflow.keras.losses import MeanAbsoluteError
from tensorflow.keras.losses import MeanSquaredError
from tensorflow.keras.metrics import Mean
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.optimizers.schedules import PiecewiseConstantDecay
class Trainer:
def __init__(self,
model,
loss,
learning_rate,
checkpoint_dir='./ckpt/edsr'):
self.now = None
self.loss = loss
self.checkpoint = tf.train.Checkpoint(step=tf.Variable(0),
psnr=tf.Variable(-1.0),
optimizer=Adam(learning_rate),
model=model)
self.checkpoint_manager = tf.train.CheckpointManager(checkpoint=self.checkpoint,
directory=checkpoint_dir,
max_to_keep=3)
self.restore()
@property
def model(self):
return self.checkpoint.model
def train(self, train_dataset, valid_dataset, steps, evaluate_every=1000, save_best_only=False):
loss_mean = Mean()
ckpt_mgr = self.checkpoint_manager
ckpt = self.checkpoint
self.now = time.perf_counter()
for lr, hr in train_dataset.take(steps - ckpt.step.numpy()):
ckpt.step.assign_add(1)
step = ckpt.step.numpy()
loss = self.train_step(lr, hr)
loss_mean(loss)
if step % evaluate_every == 0:
loss_value = loss_mean.result()
loss_mean.reset_states()
# Compute PSNR on validation dataset
psnr_value = self.evaluate(valid_dataset)
duration = time.perf_counter() - self.now
print(f'{step}/{steps}: loss = {loss_value.numpy():.3f}, PSNR = {psnr_value.numpy():3f} ({duration:.2f}s)')
if save_best_only and psnr_value <= ckpt.psnr:
self.now = time.perf_counter()
# skip saving checkpoint, no PSNR improvement
continue
ckpt.psnr = psnr_value
ckpt_mgr.save()
self.now = time.perf_counter()
@tf.function
def train_step(self, lr, hr):
with tf.GradientTape() as tape:
lr = tf.cast(lr, tf.float32)
hr = tf.cast(hr, tf.float32)
sr = self.checkpoint.model(lr, training=True)
loss_value = self.loss(hr, sr)
gradients = tape.gradient(loss_value, self.checkpoint.model.trainable_variables)
self.checkpoint.optimizer.apply_gradients(zip(gradients, self.checkpoint.model.trainable_variables))
return loss_value
def evaluate(self, dataset):
return evaluate(self.checkpoint.model, dataset)
def restore(self):
if self.checkpoint_manager.latest_checkpoint:
self.checkpoint.restore(self.checkpoint_manager.latest_checkpoint)
print(f'Model restored from checkpoint at step {self.checkpoint.step.numpy()}.')
class EdsrTrainer(Trainer):
def __init__(self,
model,
checkpoint_dir,
learning_rate=PiecewiseConstantDecay(boundaries=[200000], values=[1e-4, 5e-5])):
super().__init__(model, loss=MeanAbsoluteError(), learning_rate=learning_rate, checkpoint_dir=checkpoint_dir)
def train(self, train_dataset, valid_dataset, steps=300000, evaluate_every=1000, save_best_only=True):
super().train(train_dataset, valid_dataset, steps, evaluate_every, save_best_only)
class RcanTrainer(Trainer):
def __init__(self,
model,
checkpoint_dir,
learning_rate=PiecewiseConstantDecay(boundaries=[200000], values=[1e-4, 5e-5])):
super().__init__(model, loss=MeanAbsoluteError(), learning_rate=learning_rate, checkpoint_dir=checkpoint_dir)
def train(self, train_dataset, valid_dataset, steps=300000, evaluate_every=1000, save_best_only=True):
super().train(train_dataset, valid_dataset, steps, evaluate_every, save_best_only)
class WdsrTrainer(Trainer):
def __init__(self,
model,
checkpoint_dir,
learning_rate=PiecewiseConstantDecay(boundaries=[200000], values=[1e-3, 5e-4])):
super().__init__(model, loss=MeanAbsoluteError(), learning_rate=learning_rate, checkpoint_dir=checkpoint_dir)
def train(self, train_dataset, valid_dataset, steps=300000, evaluate_every=1000, save_best_only=True):
super().train(train_dataset, valid_dataset, steps, evaluate_every, save_best_only)
class SrganGeneratorTrainer(Trainer):
def __init__(self,
model,
checkpoint_dir,
learning_rate=1e-4):
super().__init__(model, loss=MeanSquaredError(), learning_rate=learning_rate, checkpoint_dir=checkpoint_dir)
def train(self, train_dataset, valid_dataset, steps=1000000, evaluate_every=1000, save_best_only=True):
super().train(train_dataset, valid_dataset, steps, evaluate_every, save_best_only)
class SrganTrainer:
#
# TODO: model and optimizer checkpoints
#
def __init__(self,
generator,
discriminator,
content_loss='VGG54',
learning_rate=PiecewiseConstantDecay(boundaries=[100000], values=[1e-4, 1e-5])):
if content_loss == 'VGG22':
self.vgg = srgan.vgg_22()
elif content_loss == 'VGG54':
self.vgg = srgan.vgg_54()
else:
raise ValueError("content_loss must be either 'VGG22' or 'VGG54'")
self.content_loss = content_loss
self.generator = generator
self.discriminator = discriminator
self.generator_optimizer = Adam(learning_rate=learning_rate)
self.discriminator_optimizer = Adam(learning_rate=learning_rate)
self.binary_cross_entropy = BinaryCrossentropy(from_logits=False)
self.mean_squared_error = MeanSquaredError()
def train(self, train_dataset, steps=200000):
pls_metric = Mean()
dls_metric = Mean()
step = 0
for lr, hr in train_dataset.take(steps):
step += 1
pl, dl = self.train_step(lr, hr)
pls_metric(pl)
dls_metric(dl)
if step % 50 == 0:
print(f'{step}/{steps}, perceptual loss = {pls_metric.result():.4f}, discriminator loss = {dls_metric.result():.4f}')
pls_metric.reset_states()
dls_metric.reset_states()
@tf.function
def train_step(self, lr, hr):
with tf.GradientTape() as gen_tape, tf.GradientTape() as disc_tape:
lr = tf.cast(lr, tf.float32)
hr = tf.cast(hr, tf.float32)
sr = self.generator(lr, training=True)
hr_output = self.discriminator(hr, training=True)
sr_output = self.discriminator(sr, training=True)
con_loss = self._content_loss(hr, sr)
gen_loss = self._generator_loss(sr_output)
perc_loss = con_loss + 0.001 * gen_loss
disc_loss = self._discriminator_loss(hr_output, sr_output)
gradients_of_generator = gen_tape.gradient(perc_loss, self.generator.trainable_variables)
gradients_of_discriminator = disc_tape.gradient(disc_loss, self.discriminator.trainable_variables)
self.generator_optimizer.apply_gradients(zip(gradients_of_generator, self.generator.trainable_variables))
self.discriminator_optimizer.apply_gradients(zip(gradients_of_discriminator, self.discriminator.trainable_variables))
return perc_loss, disc_loss
@tf.function
def _content_loss(self, hr, sr):
sr = preprocess_input(sr)
hr = preprocess_input(hr)
sr_features = self.vgg(sr) / 12.75
hr_features = self.vgg(hr) / 12.75
return self.mean_squared_error(hr_features, sr_features)
def _generator_loss(self, sr_out):
return self.binary_cross_entropy(tf.ones_like(sr_out), sr_out)
def _discriminator_loss(self, hr_out, sr_out):
hr_loss = self.binary_cross_entropy(tf.ones_like(hr_out), hr_out)
sr_loss = self.binary_cross_entropy(tf.zeros_like(sr_out), sr_out)
return hr_loss + sr_loss
| StarcoderdataPython |
184221 | N = int(input())
vals1 = [int(a) for a in input().split()]
vals2 = [int(a) for a in input().split()]
total = 0
for i in range(N):
h1, h2 = vals1[i], vals1[i + 1]
width = vals2[i]
h_dif = abs(h1 - h2)
min_h = min(h1, h2)
total += min_h * width
total += (h_dif * width) / 2
print(total) | StarcoderdataPython |
119778 | <reponame>tombiasz/django-pointer
from django.contrib.gis.db import models
class PointOfInterest(models.Model):
name = models.CharField(max_length=255)
point = models.PointField(srid=4326)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return self.name
class Meta:
ordering = ['created_at']
verbose_name = 'point of interest'
verbose_name_plural = 'points of interest' | StarcoderdataPython |
3388811 | <gh_stars>1-10
from configparser import ConfigParser
from croniter import croniter
from datetime import datetime, timedelta
import datetime
import sys
import subprocess
import pandas as pd
import logging
import os
from pathlib import Path
pd.options.mode.chained_assignment = None
pd.options.display.float_format = '{:.0f}'.format
pd.set_option('display.max_colwidth', None)
import numpy as np
from pandas import json_normalize
import ast
import os
import json
import pytz
'''Reading the Config file from command line argument'''
parser = ConfigParser()
pd.set_option('display.max_columns', None)
config_file = sys.argv[1]
parser.read(config_file)
all_apps = parser.get('yarn_apps', 'appname')
'''Printing the variables '''
rm_url_path_dev = parser.get('paths', 'rm_url_path_dev')
print("rm_url_path_dev is " + str(rm_url_path_dev))
rm_url_path_prod = parser.get('paths', 'rm_url_path_prod')
print("rm_url_path_prod is " + str(rm_url_path_prod))
app_path = parser.get('paths', 'app_path')
print("app_path is " + str(app_path))
data_path = parser.get('paths', 'data_path')
print("data_path is " + str(data_path))
html_path = parser.get('paths', 'html_path')
print("html_path is " + str(html_path))
logfile_path = parser.get('paths', 'logfile_path')
print("logfile_path is " + str(logfile_path))
mail_from = parser.get('mail', 'from')
print("mail_from is " + str(mail_from))
mail_to = parser.get('mail', 'to')
print("mail_to is " + str(mail_to))
mail_cc = parser.get('mail', 'cc')
print("mail_cc is " + str(mail_cc))
mail_msg = parser.get('mail', 'msg')
print("mail_msg is " + str(mail_msg))
tm_zone = parser.get('misc', 'tmzone')
print("tm_zone is " + str(tm_zone))
''' getting the current hostname '''
dev_string = 'tstr'
prod_string = 'oser'
if dev_string in os.uname().nodename:
print("Working with Dev cluster")
rm_url_path = rm_url_path_dev
cluster = 'DEV'
elif prod_string in os.uname().nodename:
print("Working with Prod Cluster")
rm_url_path = rm_url_path_prod
cluster = 'PROD'
''' Logging : Gets or creates a logger '''
logger = logging.getLogger(__name__)
''' setting the log level '''
logger.setLevel(logging.INFO)
''' defining file handler and setting the formatter '''
file_handler = logging.FileHandler(logfile_path, 'w+')
formatter = logging.Formatter('%(asctime)s : %(levelname)s : %(name)s : %(message)s')
logging.basicConfig(filemode='w')
file_handler.setFormatter(formatter)
''' adding file handler to logger '''
logger.addHandler(file_handler)
'''Creating DF for apps to be tracked from config file'''
list_apps = ast.literal_eval(all_apps)
logger.info(str(("List of Apps are : ", list_apps)))
df_apps = pd.DataFrame(list_apps, columns=['app_name', 'app_schedule', 'app_user', 'app_submit_file', 'app_type'])
logger.info(str(("rm url path is : ", rm_url_path)))
''' creating the string for searching yarn apps '''
list_appnames = df_apps['app_name'].to_list()
strapp = 'Application-Id'
list_appnames1 = [strapp] + list_appnames
grepstrng = '|'.join(list_appnames1)
print(grepstrng)
''' preparing the bash commands for yarn '''
yarn_cmd = ['yarn', 'application', '-list', '-appStates', 'RUNNING' + ' ' + 'FINISHED' + ' ' + 'KILLED']
tail_cmd = ['tail', '-n', '+2']
grep_cmd = ['grep', '-P', grepstrng]
df = pd.DataFrame()
o1 = subprocess.run(yarn_cmd, stdout=subprocess.PIPE)
o2 = subprocess.run(tail_cmd, input=o1.stdout, stdout=subprocess.PIPE)
o3 = subprocess.run(grep_cmd, input=o2.stdout, stdout=subprocess.PIPE)
if sys.version_info[0] < 3:
from io import StringIO
else:
from io import StringIO
b = StringIO(o3.stdout.decode('utf-8'))
''' creating a dataframe from the StringIO Object '''
df = pd.read_csv(b, sep="\t", skipinitialspace=True)
app_ids = df['Application-Id'].tolist()
data_out = []
for id in app_ids:
print(id)
proc = subprocess.run(["GET",
rm_url_path + id],
stdout=subprocess.PIPE, encoding='utf-8')
proc_out = proc.stdout
print(proc_out)
df2 = pd.DataFrame([json.loads(proc_out)])
df3 = json_normalize(df2['app'])
data_out.append(df3)
if len(data_out) == 0:
print("No data .. looks like no apps running on cluster that we are tracking ")
sys.exit()
data_out = pd.concat(data_out)
data_out = data_out.drop(
['preemptedResourceMB', 'preemptedResourceVCores', 'numNonAMContainerPreempted',
'numAMContainerPreempted', 'amNodeLabelExpression'], axis=1)
data_out = data_out[
['name', 'id', 'user', 'state', 'finalStatus', 'startedTime', 'finishedTime', 'elapsedTime', 'queue', 'progress',
'trackingUI', 'trackingUrl', 'diagnostics', 'clusterId', 'applicationType', 'applicationTags', 'priority',
'amContainerLogs', 'amHostHttpAddress', 'allocatedMB', 'allocatedVCores', 'runningContainers', 'memorySeconds',
'vcoreSeconds', 'queueUsagePercentage', 'clusterUsagePercentage', 'logAggregationStatus', 'unmanagedApplication']]
data_out.columns = ['app_name', 'app_id', 'user', 'state', 'final_status', 'start_time', 'finish_time', 'elapsed_time',
'queue', 'progress', 'trackingUI', 'tracking_url', 'diagnostics', 'cluster_id', 'application_type',
'application_tags', 'priority', 'am_container_log', 'am_host_http_address', 'allocated_mb',
'allocated_vcores', 'running_containers', 'memory_seconds', 'vcor_seconds', 'queue_usage_percen',
'cluster_usage_percent', 'log_aggregation_status', 'unmanaged_application']
data_sched = pd.merge(data_out, df_apps, how='left', on='app_name')
logger.info(str(("Apps data from config file is : ", df_apps)))
# logger.info(str(("Data from Yarn before join is : ",data_out)))
app_start_times = data_sched.loc[data_sched['app_schedule'] == "* * * * *"]['start_time'].tolist()
''' function to calculate the times '''
def get_dt(t):
to = pytz.timezone(tm_zone)
start_time_dt = datetime.datetime.fromtimestamp(int(str(t['start_time'])[0:10]))
start_time_dt = start_time_dt.replace(tzinfo=pytz.utc).astimezone(to)
finish_time_dt = "Not Finished" if t['finish_time'] == 0 else datetime.datetime.fromtimestamp(
int(str(t['finish_time'])[0:10])).replace(tzinfo=pytz.utc).astimezone(to)
sched_time_prev = datetime.datetime.fromtimestamp(
croniter(t['app_schedule'], start_time_dt + timedelta(hours=0)).get_prev()).replace(tzinfo=pytz.utc).astimezone(
to)
sched_time_next = datetime.datetime.fromtimestamp(
croniter(t['app_schedule'], start_time_dt + timedelta(hours=0)).get_next()).replace(tzinfo=pytz.utc).astimezone(
to)
days, minutes = divmod(t['elapsed_time'] / 1000, 60 * 60 * 24)
hours, remainder = divmod(minutes, 60 * 60)
minutes, seconds = divmod(remainder, 60)
total_run_time = '{:0.0f}d:{:0.0f}h:{:0.0f}m:{:0.0f}s'.format(days, hours, minutes, seconds)
return start_time_dt, finish_time_dt, sched_time_prev, sched_time_next, total_run_time
''' function that calculates the complex statuses in single column eagle_check '''
def eagle_stat(s):
to = pytz.timezone(tm_zone)
start_duration_chk = abs(s['sched_time_prev'] - s['start_time_dt']).total_seconds()
if (s['app_type'] == "batch") and (
s['finish_time_dt'] == "Not Finished") and (s['state'] == "RUNNING") and (
s['final_status'] == "UNDEFINED") and (
1 <= start_duration_chk <= 600) and (
s['sched_time_prev'] < s['start_time_dt'] < s['sched_time_next']):
eagle_check = 'Batch App RUNNING on schedule,sd lt 10min'
elif (s['app_type'] == "batch") and (
s['finish_time_dt'] == "Not Finished") and (s['state'] == "RUNNING") and (
s['final_status'] == "UNDEFINED") and (start_duration_chk > 600) and (
s['sched_time_prev'] < s['start_time_dt'] < s['sched_time_next']):
eagle_check = 'Batch App RUNNING on schedule,sd mt 10min'
elif (s['app_type'] == "batch") and (
s['finish_time_dt'] != "Not Finished") and (s['state'] == "FINISHED") and (
s['final_status'] == "SUCCEEDED") and (s['sched_time_prev'] < s['start_time_dt'] < s['sched_time_next']):
eagle_check = 'Batch App ' + s['final_status'] + ' on schedule'
elif (s['app_type'] == "batch") and (
s['finish_time_dt'] != "Not Finished") and (s['state'] == "FINISHED") and (
s['final_status'] == "FAILED") and (s['sched_time_prev'] < s['start_time_dt'] < s['sched_time_next']):
eagle_check = 'Batch App ' + s['final_status']
elif (s['app_type'] == "batch") and (
s['finish_time_dt'] != "Not Finished") and (s['state'] == "KILLED") and (
s['final_status'] == "KILLED") and (s['sched_time_prev'] < s['start_time_dt'] < s['sched_time_next']):
eagle_check = 'Batch App ' + s['final_status']
elif (s['app_type'] == "streaming") and (
s['finish_time_dt'] == "Not Finished") and (s['state'] == "RUNNING") and (s['final_status'] == "UNDEFINED"):
eagle_check = 'Streaming App RUNNING'
elif (s['app_type'] == "streaming") and (
s['finish_time_dt'] != "Not Finished") and (s['state'] == "KILLED") and (s['final_status'] == "KILLED"):
eagle_check = 'Streaming App KILLED'
elif (s['app_type'] == "streaming") and (
s['finish_time_dt'] != "Not Finished") and (s['state'] == "FINISHED") and (s['final_status'] == "FAILED"):
eagle_check = 'Streaming App FAILED'
else:
eagle_check = 'Eagle check not available'
return eagle_check
data_sched["start_time_dt"], data_sched["finish_time_dt"], data_sched["sched_time_prev"], data_sched["sched_time_next"], \
data_sched["total_run_time"] = zip(*data_sched.apply(get_dt, axis=1))
data_sched["eagle_check"] = data_sched.apply(eagle_stat, axis=1)
data_sched = data_sched.sort_values(by='app_name')
''' filtering the dataframe for columns of our interest '''
data_sched_f = data_sched.loc[:,
['app_name', 'app_id', 'user', 'trackingUI', 'tracking_url', 'app_schedule', 'app_submit_file',
'app_type', 'start_time_dt',
'finish_time_dt', 'sched_time_prev', 'sched_time_next', 'total_run_time', 'eagle_check']]
''' Finding the apps that we are tracking but are not running '''
''' Streaming Apps running '''
df_strm_app_running = data_sched_f.loc[data_sched_f['eagle_check'] == 'Streaming App RUNNING']
df_strm_app_running.drop(['sched_time_prev', 'sched_time_next'], axis=1, inplace=True)
''' Streaming Apps finished '''
df_strm_app_finished = data_sched_f.loc[
(data_sched_f['eagle_check'] == 'Streaming App FAILED') | (data_sched_f['eagle_check'] == 'Streaming App KILLED')]
''' Streaming Apps finshed get only the most recent record for each Finished App '''
df_strm_app_finished["finish_time_dt_num"] = pd.to_datetime(df_strm_app_finished["finish_time_dt"]).astype(
int) / 10 ** 9
df_strm_app_finished["rank"] = df_strm_app_finished.groupby(['app_name', 'eagle_check'])['finish_time_dt_num'].rank(
method="first",
ascending=False)
logger.info(str(("Streaming apps finished are : ", df_strm_app_finished)))
df_strm_app_finished = df_strm_app_finished.loc[df_strm_app_finished['rank'] == 1.0].drop(
['finish_time_dt_num', 'rank', 'sched_time_prev', 'sched_time_next'], axis=1)
''' Batch Apps running '''
df_batch_app_running = data_sched_f[data_sched_f['eagle_check'].str.contains('Batch App Running') == True]
''' Batch Apps Finished '''
df_batch_app_finished = data_sched_f[
data_sched_f['eagle_check'].str.contains('Batch App SUCCEEDED') == True].sort_values(['app_name', 'start_time_dt'],
ascending=[True, True])
df_strm_app_running_list = df_strm_app_running.app_name.unique().tolist() # All streaming app names from yarn that we are tracking as a list
list_strm_appnames = df_apps.loc[df_apps['app_type'] == 'streaming'][
'app_name'].unique().tolist() # Streaming Apps from our list of apps to be tracked
strm_app_not_run = list(set(list_strm_appnames) - set(df_strm_app_running_list)) # Streaming Apps that are not running
logger.info(str(("Streaming apps not running are : ", strm_app_not_run)))
strm_app_failed = df_strm_app_finished.loc[df_strm_app_finished['eagle_check'] == 'Streaming App FAILED'][
'app_name'].unique().tolist()
logger.info(str(("Streaming apps failed are : ", strm_app_failed)))
df_strm_app_norun = df_apps[df_apps['app_name'].isin(strm_app_not_run)]
df_strm_app_torestart = df_strm_app_norun[df_strm_app_norun['app_name'].isin(strm_app_failed)]
df_strm_app_torestart = df_strm_app_torestart.loc[:, ['app_name', 'app_user', 'app_submit_file', 'app_type']]
logger.info(str(("Streaming apps to be restarted are : ", df_strm_app_torestart)))
strm_app_torestart_gpd = df_strm_app_torestart.groupby('app_user')['app_name'].apply(list).reset_index(name='apps')
''' creating the html file tp be written as report '''
html = "Streaming Apps Running are :" + "\n\n\n" + df_strm_app_running.to_html() + "<br><br>" + "Streaming Apps Finished Failed or Killed are :" + "\n\n\n" + df_strm_app_finished.to_html() + "<br><br>" + "Streaming Apps to be Restarted are :" + "\n" + df_strm_app_torestart.to_html() + "<br><br>" + "Batch Apps Running are :" + "\n\n\n" + df_batch_app_running.to_html() + "<br><br>" + "Batch Apps Finished are :" + df_batch_app_finished.to_html()
''' Adding the colored labels to the html file '''
text_file_w = open(html_path, "w")
text_file_w.write(html)
text_file_w.close()
text_file_r = open(html_path, "rt")
tfr = text_file_r.read()
tfr = tfr.replace('<td>Streaming App RUNNING', '<td bgcolor="dodgerblue"<td>Streaming App RUNNING')
tfr = tfr.replace('<td>Streaming App FAILED', '<td bgcolor="red"<td>Streaming App FAILED')
tfr = tfr.replace('<td>Streaming App KILLED', '<td bgcolor="red"<td>Streaming App KILLED')
tfr = tfr.replace('<td>Batch App SUCCEEDED on schedule', '<td bgcolor="limegreen"<td>Batch App SUCCEEDED on schedule')
tfr = tfr.replace('<td>Batch App RUNNING on schedule,sd lt 10min',
'<td bgcolor="yellow"<td>Batch App RUNNING on schedule,sd lt 10min')
tfr = tfr.replace('<td>Batch App RUNNING on schedule,sd mt 10min',
'<td bgcolor="yellow"<td>Batch App RUNNING on schedule,sd mt 10min')
tfr = tfr.replace('<td>Batch App KILLED', '<td bgcolor="red"<td>Batch App KILLED')
tfr = tfr.replace('<td>Batch App FAILED', '<td bgcolor="red"<td>Batch App FAILED')
text_file_r.close()
text_file_w = open(html_path, "wt")
''' overwrite the input file with the resulting data '''
text_file_w.write(tfr)
''' close the file '''
text_file_w.close()
''' Function to send email '''
import subprocess
from email.message import EmailMessage
def sendEmail(from_addr, to_addrs, cc_addrs, msg_subject):
with open(html_path) as fp:
# Create a text/plain message
msg = EmailMessage()
msg.set_content(fp.read(), 'html')
msg['From'] = from_addr
msg['To'] = to_addrs
msg['Cc'] = cc_addrs
msg['Subject'] = msg_subject
sendmail_location = "/usr/sbin/sendmail"
subprocess.run([sendmail_location, "-t", "-oi"], input=msg.as_bytes())
''' notification email is sent only if the dt_to_be_restarted df is not empty '''
if not df_strm_app_torestart.empty:
logger.info("Looks like we might have to restart few apps ")
logger.info(str(("List of Apps to be restarted are : ", df_strm_app_torestart)))
sendEmail(mail_from, mail_to, mail_cc, mail_msg + " " + cluster)
data_path=Path(data_path)
for i, g in strm_app_torestart_gpd.groupby('app_user'): # iterate through unique values in app_user
file_name = f'{i}.config' # create the empty content string
file_path = data_path / "action" / file_name
logger.info(str(("creating & writing the file : ",file_path)))
with open(file_path, 'w') as fp: # open the file
#fp.write(str(g.apps.replace(regex='(d)', value='apps='))) # write content to file
h=g.apps # write content to file
fp.write(str(h))
#fp.write(str(g.apps).replace('\n.*', '')) # write content to file
logger.info(str(("Writing the config file : ",file_name)))
fp.close()
os.chmod(file_path, 0o777)
| StarcoderdataPython |
21106 | <gh_stars>0
import logging
import os
def initLogger() -> object:
"""
Initialize the logger.
"""
logger_level = logging.INFO
if 'APP_ENV' in os.environ:
if os.environ['APP_ENV'] == 'dev':
logger_level = logging.DEBUG
logging.basicConfig(level=logger_level,
format='%(asctime)s %(levelname)s:'
'%(name)s:%(message)s')
return logging
| StarcoderdataPython |
1779874 | <reponame>rraddi/iphas-dr2
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Constants used in the IPHAS Data Release modules."""
import os
from astropy.io import fits
DEBUGMODE = False
# What is the data release version name?
VERSION = 'iphas-dr2-rc6'
# Where are the CASU pipeline-produced images and detection tables?
RAWDATADIR = '/car-data/gb/iphas'
# Where to write the output data products?
DESTINATION = '/car-data/gb/'+VERSION
# Where can we write a large amount of temporary files?
TMPDIR = '/home/gb/tmp'
# Use a different configuration for development machines:
HOSTNAME = os.uname()[1]
if HOSTNAME == 'uhppc11.herts.ac.uk':
DEBUGMODE = True
RAWDATADIR = '/run/media/gb/0133d764-0bfe-4007-a9cc-a7b1f61c4d1d/iphas'
DESTINATION = '/home/gb/tmp/'+VERSION
if HOSTNAME == 'gvm':
DEBUGMODE = True
RAWDATADIR = '/media/uh/run/media/gb/0133d764-0bfe-4007-a9cc-a7b1f61c4d1d/iphas'
DESTINATION = '/home/gb/tmp/'+VERSION
# Where to store processing logs?
LOGDIR = os.path.join(DESTINATION, 'log')
# Make sure destination and logging dir exist
if not os.path.exists(LOGDIR):
os.makedirs(LOGDIR)
PACKAGEDIR = os.path.dirname(os.path.abspath(__file__))
LIBDIR = os.path.join(PACKAGEDIR, 'lib')
CALIBDIR = os.path.join(DESTINATION, 'calibration')
PATH_BANDMERGED = os.path.join(DESTINATION, 'bandmerged')
PATH_BANDMERGED_CALIBRATED = os.path.join(DESTINATION, 'bandmerged-calibrated')
PATH_SEAMED = os.path.join(DESTINATION, 'seamed')
PATH_CONCATENATED = os.path.join(DESTINATION, 'concatenated')
PATH_IMAGES = os.path.join(DESTINATION, 'images')
# Where is the IPHAS quality control table?
IPHASQC = fits.getdata('/home/gb/dev/iphas-qc/qcdata/iphas-qc.fits', 1)
IPHASQC_COND_RELEASE = (IPHASQC['is_best'] & (IPHASQC['qflag'] != 'D'))
# How to execute stilts?
STILTS = 'nice java -Xmx2000M -XX:+UseConcMarkSweepGC -jar {0}'.format(
os.path.join(LIBDIR, 'stilts.jar'))
# How to execute funpack?
FUNPACK = '/home/gb/bin/cfitsio3310/bin/funpack'
# Fields within this radius will be considered to overlap
FIELD_MAXDIST = 0.8 # degrees
# Width of the Galactic Plane strip to process
STRIPWIDTH = 5 # degrees galactic longitude
# Detections within this radius will be considered identical
MATCHING_DISTANCE = 1.0 # 0.5 # arcsec
#INT/WFC CCD pixel scale
PXSCALE = 0.333 # arcsec/pix
# Filter names
BANDS = ['r', 'i', 'ha']
# Which extensions to expect in the fits catalogues?
EXTENSIONS = [1, 2, 3, 4]
# Which are the possible filenames of the confidence maps?
CONF_NAMES = {'Halpha': ['Ha_conf.fits', 'Ha_conf.fit',
'Halpha_conf.fit',
'ha_conf.fits', 'ha_conf.fit',
'h_conf.fits', 'h_conf.fit',
'Halpha:197_iphas_aug2003_cpm.fit',
'Halpha:197_iphas_sep2003_cpm.fit',
'Halpha:197_iphas_oct2003_cpm.fit',
'Halpha:197_iphas_nov2003_cpm.fit',
'Halpha:197_nov2003b_cpm.fit',
'Halpha:197_dec2003_cpm.fit',
'Halpha:197_jun2004_cpm.fit',
'Halpha:197_iphas_jul2004a_cpm.fit',
'Halpha:197_iphas_jul2004_cpm.fit',
'Halpha:197_iphas_aug2004a_cpm.fit',
'Halpha:197_iphas_aug2004b_cpm.fit',
'Halpha:197_iphas_dec2004b_cpm.fit'],
'r': ['r_conf.fit', 'r_conf.fits',
'r:214_iphas_aug2003_cpm.fit',
'r:214_dec2003_cpm.fit',
'r:214_iphas_nov2003_cpm.fit',
'r:214_nov2003b_cpm.fit',
'r:214_iphas_sep2003_cpm.fit',
'r:214_iphas_aug2004a_cpm.fit',
'r:214_iphas_aug2004b_cpm.fit',
'r:214_iphas_jul2004a_cpm.fit',
'r:214_iphas_jul2004_cpm.fit',
'r:214_jun2004_cpm.fit'],
'i': ['i_conf.fit', 'i_conf.fits',
'i:215_iphas_aug2003_cpm.fit',
'i:215_dec2003_cpm.fit',
'i:215_iphas_nov2003_cpm.fit',
'i:215_nov2003b_cpm.fit',
'i:215_iphas_sep2003_cpm.fit',
'i:215_iphas_aug2004a_cpm.fit',
'i:215_iphas_aug2004b_cpm.fit',
'i:215_iphas_jul2004a_cpm.fit',
'i:215_iphas_jul2004_cpm.fit',
'i:215_jun2004_cpm.fit']}
| StarcoderdataPython |
3381501 | <filename>viz/viz_helpers.py
import csv
import numpy as np
import pandas as pd
import torch
from torchvision.utils import make_grid
from torchvision import transforms
from utils.datasets import get_background
from PIL import Image, ImageDraw, ImageFont
def reorder_img(orig_img, reorder, by_row=True, img_size=(3, 32, 32), padding=2):
"""
Reorders rows or columns of an image grid.
Parameters
----------
orig_img : torch.Tensor
Original image. Shape (channels, width, height)
reorder : list of ints
List corresponding to desired permutation of rows or columns
by_row : bool
If True reorders rows, otherwise reorders columns
img_size : tuple of ints
Image size following pytorch convention
padding : int
Number of pixels used to pad in torchvision.utils.make_grid
"""
reordered_img = torch.zeros(orig_img.size())
_, height, width = img_size
for new_idx, old_idx in enumerate(reorder):
if by_row:
start_pix_new = new_idx * (padding + height) + padding
start_pix_old = old_idx * (padding + height) + padding
reordered_img[:, start_pix_new:start_pix_new + height, :] = orig_img[:, start_pix_old:start_pix_old + height, :]
else:
start_pix_new = new_idx * (padding + width) + padding
start_pix_old = old_idx * (padding + width) + padding
reordered_img[:, :, start_pix_new:start_pix_new + width] = orig_img[:, :, start_pix_old:start_pix_old + width]
return reordered_img
def read_loss_from_file(log_file_path, loss_to_fetch="kl_loss_"):
""" Read the average KL per latent dimension at the final stage of training from the log file.
Parameters
----------
log_file_path : str
Full path and file name for the log file. For example 'experiments/custom/losses.log'.
loss_to_fetch : str
The loss type to search for in the log file and return. This must be in the exact form as stored.
"""
EPOCH = "Epoch"
LOSS = "Loss"
logs = pd.read_csv(log_file_path)
df_last_epoch_loss = logs[logs.loc[:, EPOCH] == logs.loc[:, EPOCH].max()]
df_last_epoch_loss = df_last_epoch_loss.loc[df_last_epoch_loss.loc[:, LOSS].str.startswith(loss_to_fetch), :]
df_last_epoch_loss.loc[:, LOSS] = df_last_epoch_loss.loc[:, LOSS].str.replace(loss_to_fetch,"").astype(int)
df_last_epoch_loss = df_last_epoch_loss.sort_values(LOSS).loc[:, "Value"]
return list(df_last_epoch_loss)
# def add_labels(label_name, tensor, num_rows, sorted_list, dataset):
def add_labels(label_name, input_image, num_rows, sorted_list, dataset):
""" Adds the label next to the relevant row as in an image. This is used to reproduce
figure 2 of Burgress et al.
Parameters
----------
label_name : str
The name of the labels to add, for sample 'KL' or 'C'.
input_image : image
The image to which to add the labels
num_rows : int
The number of rows of images to display
sorted_list : list
The list of sorted objects.
dataset : str
The dataset name.
"""
all_traversal_im = input_image
# Resize image
if num_rows == 7:
mult_x = 1.5
elif num_rows == 8:
mult_x = 1.3
elif num_rows == 9:
mult_x = 1.2
new_width = int(mult_x * all_traversal_im.width)
new_size = (new_width, all_traversal_im.height)
traversal_images_with_text = Image.new("RGB", new_size, color='white')
traversal_images_with_text.paste(all_traversal_im, (0, 0))
# Add KL text alongside each row
fraction_x = 1 / mult_x + 0.005
text_list = ['orig', 'recon']
fnt = ImageFont.truetype('Pillow/Tests/fonts/FreeMono.ttf', 20)
draw = ImageDraw.Draw(traversal_images_with_text)
for i in range(0, 2):
draw.text(xy=(int(fraction_x * traversal_images_with_text.width),
int((i / (len(sorted_list) + 2) + \
1 / (2 * (len(sorted_list) + 2))) * all_traversal_im.height)),
text=text_list[i],
fill=(0,0,0),
font=fnt)
for latent_idx, latent_dim in enumerate(sorted_list):
draw.text(xy=(int(fraction_x * traversal_images_with_text.width),
int(((latent_idx+2) / (len(sorted_list)+2) + \
1 / (2 * (len(sorted_list)+2))) * all_traversal_im.height)),
text=label_name + " = %7.4f"%(latent_dim),
fill=(0,0,0),
font=fnt)
return traversal_images_with_text
def upsample(input_data, scale_factor, is_torch_input=False, colour_flag=False):
""" TODO: add Docstring
"""
is_torch_input = False
if isinstance(input_data, torch.Tensor):
input_data = input_data.detach().numpy()
is_torch_input = True
new_array = np.zeros((input_data.shape[0], input_data.shape[1], input_data.shape[2] * scale_factor, input_data.shape[3] * scale_factor))
for latent_dim in range(input_data.shape[0]):
for x in range(input_data.shape[2]):
for y in range(input_data.shape[3]):
if colour_flag == False:
new_array[latent_dim, 0, x * scale_factor:x * scale_factor + scale_factor , y * scale_factor:y * scale_factor + scale_factor] = input_data[latent_dim, 0, x, y]
else:
new_array[latent_dim, 0, x * scale_factor:x * scale_factor + scale_factor, y * scale_factor:y * scale_factor + scale_factor] = input_data[latent_dim, 0, x, y]
new_array[latent_dim, 1, x * scale_factor:x * scale_factor + scale_factor, y * scale_factor:y * scale_factor + scale_factor] = input_data[latent_dim, 1, x, y]
new_array[latent_dim, 2, x * scale_factor:x * scale_factor + scale_factor, y * scale_factor:y * scale_factor + scale_factor] = input_data[latent_dim, 2, x, y]
if is_torch_input:
return torch.from_numpy(new_array)
else:
return new_array
def make_grid_img(tensor, **kwargs):
"""Converts a tensor to a grid of images that can be read by imageio.
Notes
-----
* from in https://github.com/pytorch/vision/blob/master/torchvision/utils.py
Parameters
----------
tensor (torch.Tensor or list): 4D mini-batch Tensor of shape (B x C x H x W)
or a list of images all of the same size.
kwargs:
Additional arguments to `make_grid_img`.
"""
grid = make_grid(tensor, **kwargs)
img_grid = grid.mul_(255).add_(0.5).clamp_(0, 255).permute(1, 2, 0)
img_grid = img_grid.to('cpu', torch.uint8).numpy()
return img_grid
def get_image_list(image_file_name_list):
image_list = []
for file_name in image_file_name_list:
image_list.append(Image.open(file_name))
return image_list
| StarcoderdataPython |
3256950 | #PyJ2D - Copyright (C) 2011 <NAME> <https://gatc.ca/>
#Released under the MIT License <https://opensource.org/licenses/MIT>
from java.awt.image import BufferedImage
from pyj2d.surface import Surface
__docformat__ = 'restructuredtext'
__doc__ = 'Surface pixel manipulation'
_initialized = False
def _init():
"""
Initialize surfarray module.
"""
global numeric, _initialized
from pyj2d.numeric import numeric
if not numeric:
raise ImportError("JNumeric module is required.")
_initialized = True
def array2d(surface):
"""
Return data array of the Surface argument.
Array consists of pixel data arranged by [x,y] in integer color format.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
data = numeric.zeros((surface.width*surface.height), 'i')
data = surface.getRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
array = numeric.reshape(data, (surface.width,surface.height))
return array
def array3d(surface):
"""
Return data array of the Surface argument.
Array consists of pixel data arranged by [x,y] in RGB format.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
data = surface.getRGB(0, 0, surface.width, surface.height, None, 0, surface.width)
data = numeric.array([(dat>>16 & 0xff, dat>>8 & 0xff, dat & 0xff) for dat in data])
array = numeric.reshape(data, (surface.width,surface.height,3))
return array
def array_alpha(surface):
"""
Return data array of the Surface argument.
Array consists of pixel data arranged by [x,y] of pixel alpha value.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
data = surface.getRGB(0, 0, surface.width, surface.height, None, 0, surface.width)
data = numeric.array([dat>>24 & 0xff for dat in data], numeric.Int8)
array = numeric.reshape(data, (surface.width,surface.height))
return array
def make_surface(array):
"""
Generates image pixels from array data.
Argument array containing image data.
Return Surface generated from array.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
surface = Surface((array.shape[0],array.shape[1]))
blit_array(surface, array)
return surface
def blit_array(surface, array):
"""
Generates image pixels from a JNumeric array.
Arguments include destination Surface and array of integer colors.
JNumeric required as specified in numeric module.
"""
if not _initialized:
_init()
if len(array.shape) == 2:
data = numeric.transpose(array, (1,0))
data = numeric.ravel(data)
else:
data = array[:,:,0]*0x10000 | array[:,:,1]*0x100 | array[:,:,2]
data = numeric.transpose(data, (1,0))
data = numeric.ravel(data)
if not surface.getColorModel().hasAlpha():
surface.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
else:
surf = Surface((surface.width,surface.height), BufferedImage.TYPE_INT_RGB)
surf.setRGB(0, 0, surface.width, surface.height, data, 0, surface.width)
g2d = surface.createGraphics()
g2d.drawImage(surf, 0, 0, None)
g2d.dispose()
return None
use_arraytype = lambda *arg: None
| StarcoderdataPython |
1795842 | <gh_stars>10-100
# -*- coding: utf-8 -*-
from __future__ import division
from collections import namedtuple
import math
Vector3 = namedtuple('Vector3', ['x', 'y', 'z'])
Vector2 = namedtuple('Vector2', ['x', 'y'])
def crossProduct(a, b):
""" return normalized cross product
"""
x = a.y*b.z - a.z*b.y
y = -(a.x*b.z - a.z*b.x)
z = a.x*b.y - a.y*b.x
mag = math.sqrt(x**2 + y**2 + z**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return Vector3(x/mag, y/mag, z/mag)
# end def
def normalizeV3(v):
mag = math.sqrt(v.x**2 + v.y**2 + v.z**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return Vector3(v.x/mag, v.y/mag, v.z/mag)
# end def
def normalizeV2(v):
x, y = v
mag = math.sqrt(x**2 + y**2)
if mag < 1e-8:
return Vector3(0, 0, 0)
else:
return x/mag, v.y/mag
# end def
def normalToPlane(v1, v2, v3):
""" Calculate unit normal to the normal to
the plane defined by vertices v1, v2, and v3
"""
def subVector(a, b): return Vector3(a.x - b.x, a.y - b.y, a.z - b.z)
a = subVector(v3, v2)
b = subVector(v1, v2)
return crossProduct(a, b)
# end def
def applyMatrix3(m, v):
x = m[0] * v.x + m[1] * v.y + m[2] * v.z
y = m[3] * v.x + m[4] * v.y + m[5] * v.z
z = m[6] * v.x + m[7] * v.y + m[8] * v.z
return Vector3(x, y, z)
# end def
def applyMatrix4(m, v):
x = m[0] * v.x + m[1] * v.y + m[2] * v.z + m[3]
y = m[4] * v.x + m[5] * v.y + m[6] * v.z + m[7]
z = m[8] * v.x + m[9] * v.y + m[10] * v.z + m[11]
return Vector3(x, y, z)
# end def
def v3SetX(v, x):
return Vector3(x, v.y, v.z)
def v3SetY(v, y):
return Vector3(v.x, y, v.z)
def v3SetZ(v, z):
return Vector3(v.x, v.y, z)
def addVectors(v1, v2):
return Vector3(v1.x+v2.x, v1.y+v2.y, v1.z+v2.z)
def subVectors(v1, v2):
""" return v1 - v2
"""
return Vector3(v1.x-v2.x, v1.y-v2.y, v1.z-v2.z)
def multiplyScalar(v, s):
""" return v1*s
"""
return Vector3(v.x*s, v.y*s, v.z*s)
def v2DistanceAndAngle(a, b):
dx = b[0] - a[0]
dy = b[1] - a[1]
dist = math.sqrt(dx*dx + dy*dy)
angle = math.atan2(dy, dx)
return dist, angle
def v2dot(a, b):
return a[0]*b[0]+a[1]*b[1]
def v2AngleBetween(a, b):
a = normalizeV2(a)
b = normalizeV2(b)
v2dot(a, b)
xa, xa = a
xb, yb = a
maga = math.sqrt(xa**2 + ya**2)
magb = math.sqrt(xb**2 + yb**2)
return math.acos(num/(maga*magb))
# end def
# end def
| StarcoderdataPython |
4825874 | from __future__ import annotations
from typing import Any, Optional
from datastax.linkedlists.doubly_linked_list import DoublyLinkedList
from datastax.linkedlists.private_lists import doubly_circular_llist
class DoublyCircularList(doubly_circular_llist.DoublyCircularList,
DoublyLinkedList):
def _construct(self, array: Optional[list[Any]]) -> DoublyCircularList:
if array and array[0] is not None:
for item in array:
self.append(item)
return self
def append(self, data: Any) -> None:
super().append(data)
self.head.prev, self.tail.next = self.tail, self.head
def insert(self, data: Any) -> None:
super().insert(data)
self.head.prev, self.tail.next = self.tail, self.head
| StarcoderdataPython |
3219978 | <filename>sched_slack_bot/utils/find_block_value.py
import logging
from enum import Enum
from typing import Optional, Union, List
from sched_slack_bot.utils.slack_typing_stubs import SlackState
logger = logging.getLogger(__name__)
class SlackValueContainerType(Enum):
value: str
plain_text_input = "value"
multi_users_select = "selected_users"
conversations_select = "selected_conversation"
static_select = "selected_option"
datepicker = "selected_date"
def find_block_value(state: SlackState, block_id: str) -> Optional[Union[str, List[str]]]:
block_state = state["values"].get(block_id)
if block_state is None:
return None
sub_blocks = list(block_state.keys())
if len(sub_blocks) == 0:
return None
value_container = block_state[sub_blocks[0]]
value_container_type = SlackValueContainerType[value_container["type"]]
value_key = value_container_type.value
logger.debug(value_container)
logger.debug(value_key)
# type ignores are necessary since the name of the field in the container is dynamic
if value_container_type == SlackValueContainerType.static_select:
# extra level of dictionary in this case
value = value_container[value_key]["value"] # type: ignore
else:
value = value_container[value_key] # type: ignore
return value # type: ignore
| StarcoderdataPython |
113039 | <reponame>GabrieleMaurina/withcd
import setuptools
with open('README.md', 'r') as fh:
long_description = fh.read()
setuptools.setup(
name='withcd',
version='1.0.2',
author='<NAME>',
author_email='<EMAIL>',
description='Change working directory utility compatible with with statements',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/GabrieleMaurina/withcd',
licence='MIT',
py_modules=['withcd'],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent'
],
python_requires='>=3.8',
)
| StarcoderdataPython |
1753178 | <reponame>j-bernardi/bayesian-label-smoothing<gh_stars>1-10
GLOBAL_TYPE = 'float32'
| StarcoderdataPython |
3332609 | """
Class to retrieve some meta information about dataset based on name and subset.
"""
from dataclasses import dataclass
from typing import Optional, List
@dataclass
class DatasetInfo:
"""
Class containing meta information about dataset
"""
name: str
subset: Optional[str]
text_columns: List[str]
validation_set_names: List[str]
test_set_names: List[str]
sentence_segmentation: bool
num_clf_classes: int # Number of classification labels
num_regr: int # Number of regression labels
@property
def is_classification(self):
"""
:return: True if classification task else False
"""
return self.num_clf_classes > 0
@property
def is_regression(self):
"""
:return: True if regression task else False
"""
return not self.is_classification
@property
def is_downstream(self):
"""
:return: True if downstream task else False
"""
return (self.num_regr > 0) or (self.num_clf_classes > 0)
@property
def is_pretraining(self):
"""
:return: True if pretraining task else False
"""
return not self.is_downstream
def __post_init(self):
if self.is_downstream:
assert (self.num_clf_classes == 0) or (self.num_regr == 0), "Only single task are allowed"
# noinspection SpellCheckingInspection
_dataset_infos: List[DatasetInfo] = [
# glue
DatasetInfo(name="glue", subset="cola", num_clf_classes=2, num_regr=0,
text_columns=["sentence"], sentence_segmentation=False,
validation_set_names=["validation"], test_set_names=["test"]),
DatasetInfo(name="glue", subset="sst2", num_clf_classes=2, num_regr=0,
text_columns=["sentence"], sentence_segmentation=False,
validation_set_names=["validation"], test_set_names=["test"]),
DatasetInfo(name="glue", subset="mrpc", num_clf_classes=2, num_regr=0,
text_columns=["sentence1", "sentence2"], sentence_segmentation=False,
validation_set_names=["validation"], test_set_names=["test"]),
DatasetInfo(name="glue", subset="qqp", num_clf_classes=2, num_regr=0,
text_columns=["question1", "question2"], sentence_segmentation=False,
validation_set_names=["validation"], test_set_names=["test"]),
DatasetInfo(name="glue", subset="stsb", num_clf_classes=0, num_regr=1,
text_columns=["sentence1", "sentence2"], sentence_segmentation=False,
validation_set_names=["validation"], test_set_names=["test"]),
DatasetInfo(name="glue", subset="mnli", num_clf_classes=3, num_regr=0,
text_columns=["premise", "hypothesis"], sentence_segmentation=False,
validation_set_names=["validation_matched", "validation_mismatched"],
test_set_names=["test_matched", "test_mismatched"]),
DatasetInfo(name="glue", subset="qnli", num_clf_classes=2, num_regr=0,
text_columns=["question", "sentence"], sentence_segmentation=False,
validation_set_names=["validation"], test_set_names=["test"]),
DatasetInfo(name="glue", subset="rte", num_clf_classes=2, num_regr=0,
text_columns=["sentence1", "sentence2"], sentence_segmentation=False,
validation_set_names=["validation"], test_set_names=["test"]),
DatasetInfo(name="glue", subset="wnli", num_clf_classes=2, num_regr=0,
text_columns=["sentence1", "sentence2"], sentence_segmentation=False,
validation_set_names=["validation"], test_set_names=["test"]),
# IMDB
DatasetInfo(name="imdb", subset=None, num_clf_classes=2, num_regr=0,
text_columns=["text"], sentence_segmentation=True,
validation_set_names=["test"], test_set_names=[]),
# Wikipedia
DatasetInfo(name="wikipedia", subset="20200501.en",
num_clf_classes=0, num_regr=0,
text_columns=["text"], sentence_segmentation=False,
validation_set_names=[], test_set_names=[]),
# BookCorpus
DatasetInfo(name="bookcorpus", subset=None,
num_clf_classes=0, num_regr=0,
text_columns=["text"], sentence_segmentation=False,
validation_set_names=[], test_set_names=[]),
# OWT
DatasetInfo(name="openwebtext", subset=None,
num_clf_classes=0, num_regr=0,
text_columns=["text"], sentence_segmentation=False,
validation_set_names=[], test_set_names=[]),
]
def get_dataset_info(dataset_name: str, dataset_subset: str) -> DatasetInfo:
"""
Return the DatasetInfo based on name and subset
:param dataset_name:
:param dataset_subset:
:return:
"""
dataset_infos = []
for _dataset_info in _dataset_infos:
if (_dataset_info.name == dataset_name) and ((_dataset_info.subset is None and dataset_subset is None)
or (_dataset_info.subset == dataset_subset)):
dataset_infos += [_dataset_info]
assert len(dataset_infos) == 1, dataset_infos
return dataset_infos[0]
| StarcoderdataPython |
2829 | #!/usr/bin/python
import unittest
import json
import sys
import os
import string
sys.path.append(os.path.join(os.path.dirname(__file__),
os.pardir,
os.pardir))
from nysa.cbuilder import sdb_component as sdbc
from nysa.cbuilder import sdb_object_model as som
from nysa.cbuilder.som_rom_parser import parse_rom_image
from nysa.cbuilder.som_rom_generator import generate_rom_image
from nysa.cbuilder.sdb import SDBInfo
from nysa.cbuilder.sdb import SDBWarning
from nysa.cbuilder.sdb import SDBError
from nysa.common.status import StatusLevel
from nysa.common.status import Status
class Test (unittest.TestCase):
"""Unit test SDB Tree"""
def setUp(self):
pass
'''
def test_simple_rom(self):
rom_in = ROM1
som = parse_rom_image(rom_in)
rom_out = generate_rom_image(som)
rom_out = sdbc.convert_rom_to_32bit_buffer(rom_out)
self.assertEqual(rom_in, rom_out)
'''
def test_full_dionysus_read(self):
from nysa.host.platform_scanner import PlatformScanner
pscanner = PlatformScanner()
platform_dict = pscanner.get_platforms()
platform_names = platform_dict.keys()
if "dionysus" not in platform_names:
return
s = Status()
platform_instance = platform_dict["dionysus"](s)
platforms = platform_instance.scan()
if len(platforms) == 0:
return
dionysus = platforms[platforms.keys()[0]]
#print "Found Dionysus"
s.set_level("fatal")
s.Verbose("Read SDB")
dionysus.read_sdb()
def test_full_bus(self):
sm = som.SOM()
sm.initialize_root()
root = sm.get_root()
peripheral = sm.insert_bus()
peripheral.set_name("peripheral")
memory = sm.insert_bus()
memory.set_name("memory")
d1 = sdbc.create_device_record(name = "device 1", size = 0x100)
d2 = sdbc.create_device_record(name = "device 2", size = 0x100)
m1 = sdbc.create_device_record(name = "memory 1", size = 0x10000)
m2 = sdbc.create_device_record(name = "memory 2", size = 0x20000)
peripheral.set_child_spacing(0x0010000000)
root.set_child_spacing (0x0100000000)
sm.insert_component(peripheral, d1)
sm.insert_component(peripheral, d2)
sm.insert_component(memory, m1)
sm.insert_component(memory, m2)
rom = generate_rom_image(sm)
rom_in = sdbc.convert_rom_to_32bit_buffer(rom)
#rom_in = ROM2
#print_sdb_rom(rom_in)
sm = parse_rom_image(rom_in)
rom_out = generate_rom_image(sm)
rom_out = sdbc.convert_rom_to_32bit_buffer(rom_out)
#print_sdb_rom(rom_out)
self.assertEqual(rom_in, rom_out)
def test_full_bus_with_integration(self):
sm = som.SOM()
sm.initialize_root()
root = sm.get_root()
peripheral = sm.insert_bus()
peripheral.set_name("peripheral")
memory = sm.insert_bus()
memory.set_name("memory")
d1 = sdbc.create_device_record(name = "device 1", size = 0x100)
d2 = sdbc.create_device_record(name = "device 2", size = 0x100)
m1 = sdbc.create_device_record(name = "memory 1", size = 0x10000)
m2 = sdbc.create_device_record(name = "memory 2", size = 0x20000)
intr = sdbc.create_integration_record("Integration Data",
vendor_id = 0x800BEAF15DEADC03,
device_id = 0x00000000)
peripheral.set_child_spacing(0x0100000000)
sm.insert_component(peripheral, intr)
sm.insert_component(peripheral, d1)
sm.insert_component(peripheral, d2)
sm.insert_component(memory, m1)
sm.insert_component(memory, m2)
rom = generate_rom_image(sm)
rom_in = sdbc.convert_rom_to_32bit_buffer(rom)
#rom_in = ROM2
#print_sdb_rom(rom_in)
sm = parse_rom_image(rom_in)
rom_out = generate_rom_image(sm)
rom_out = sdbc.convert_rom_to_32bit_buffer(rom_out)
#print_sdb_rom(rom_out)
#compare_roms(rom_in, rom_out)
self.assertEqual(rom_in, rom_out)
def test_generate_one_sub_bus_with_url(self):
sm = som.SOM()
sm.initialize_root()
root = sm.get_root()
peripheral = sm.insert_bus()
peripheral.set_name("peripheral")
memory = sm.insert_bus()
memory.set_name("memory")
d1 = sdbc.create_device_record(name = "device 1", size = 0x100)
d2 = sdbc.create_device_record(name = "device 2", size = 0x100)
m1 = sdbc.create_device_record(name = "memory 1", size = 0x10000)
m2 = sdbc.create_device_record(name = "memory 2", size = 0x20000)
intr = sdbc.create_integration_record("Integration Data",
vendor_id = 0x800BEAF15DEADC03,
device_id = 0x00000000)
url = sdbc.create_repo_url_record("http://www.geocities.com")
sm.insert_component(root, url)
peripheral.set_child_spacing(0x0100000000)
sm.insert_component(peripheral, intr)
sm.insert_component(peripheral, d1)
sm.insert_component(peripheral, d2)
sm.insert_component(memory, m1)
sm.insert_component(memory, m2)
rom = generate_rom_image(sm)
rom_in = sdbc.convert_rom_to_32bit_buffer(rom)
#print_sdb(rom)
sm = parse_rom_image(rom_in)
rom_out = generate_rom_image(sm)
rom_out = sdbc.convert_rom_to_32bit_buffer(rom_out)
#print_sdb_rom(rom_out)
#compare_roms(rom_in, rom_out)
self.assertEqual(rom_in, rom_out)
def test_generate_one_sub_bus_with_url(self):
sm = som.SOM()
sm.initialize_root()
root = sm.get_root()
peripheral = sm.insert_bus()
peripheral.set_name("peripheral")
memory = sm.insert_bus()
memory.set_name("memory")
d1 = sdbc.create_device_record(name = "device 1", size = 0x100)
d2 = sdbc.create_device_record(name = "device 2", size = 0x100)
m1 = sdbc.create_device_record(name = "memory 1", size = 0x10000)
m2 = sdbc.create_device_record(name = "memory 2", size = 0x20000)
intr = sdbc.create_integration_record("Integration Data",
vendor_id = 0x800BEAF15DEADC03,
device_id = 0x00000000)
url = sdbc.create_repo_url_record("http://www.geocities.com")
synthesis = sdbc.create_synthesis_record("Synthesis Name", 123, "cool tool", 1.0, "jeff")
sm.insert_component(root, url)
sm.insert_component(root, synthesis)
peripheral.set_child_spacing(0x0100000000)
sm.insert_component(peripheral, intr)
sm.insert_component(peripheral, d1)
sm.insert_component(peripheral, d2)
sm.insert_component(memory, m1)
sm.insert_component(memory, m2)
rom = generate_rom_image(sm)
rom_in = sdbc.convert_rom_to_32bit_buffer(rom)
#print_sdb(rom)
sm = parse_rom_image(rom_in)
rom_out = generate_rom_image(sm)
rom_out = sdbc.convert_rom_to_32bit_buffer(rom_out)
#print_sdb_rom(rom_out)
#compare_roms(rom_in, rom_out)
self.assertEqual(rom_in, rom_out)
def test_generate_one_sub_bus_with_url(self):
rom_in = ROMD
#print_sdb(rom)
sm = parse_rom_image(rom_in)
rom_out = generate_rom_image(sm)
rom_out = sdbc.convert_rom_to_32bit_buffer(rom_out)
print_sdb_rom(rom_out)
#compare_roms(rom_in, rom_out)
self.assertEqual(rom_in, rom_out)
def compare_roms(rom_in, rom_out):
if len(rom_in) != len(rom_out):
print "Length of rom is not equal!"
return
rom_in = rom_in.splitlines()
rom_out = rom_out.splitlines()
for i in range (0, len(rom_in), 4):
if (i % 16 == 0):
magic = "0x%s" % (rom_in[i].lower())
last_val = int(rom_in[i + 15], 16) & 0xFF
print ""
if (magic == hex(sdbc.SDB_INTERCONNECT_MAGIC) and last_val == 0):
print "Interconnect"
elif last_val == 0x01:
print "Device"
elif last_val == 0x02:
print "Bridge"
elif last_val == 0x80:
print "Integration"
elif last_val == 0x81:
print "URL"
elif last_val == 0x82:
print "Synthesis"
elif last_val == 0xFF:
print "Empty"
else:
print "???"
if rom_in[i] == rom_out[i] and rom_in[i + 1] == rom_out[i + 1] and rom_in[i + 2] == rom_out[i + 2] and rom_in[i + 3] == rom_out[i + 3]:
print "%s %s : %s %s" % (rom_in[i], rom_in[i + 1], rom_in[i + 2], rom_in[i + 3])
else:
print "%s %s : %s %s != %s %s : %s %s" % (rom_in[i], rom_in[i + 1], rom_in[i + 2], rom_in[i + 3], rom_out[i], rom_out[i + 1], rom_out[i + 2], rom_out[i + 3])
def print_sdb_rom(rom):
#rom = sdbc.convert_rom_to_32bit_buffer(rom)
rom = rom.splitlines()
print "ROM"
for i in range (0, len(rom), 4):
if (i % 16 == 0):
magic = "0x%s" % (rom[i].lower())
last_val = int(rom[i + 15], 16) & 0xFF
print ""
if (magic == hex(sdbc.SDB_INTERCONNECT_MAGIC) and last_val == 0):
print "Interconnect"
elif last_val == 0x01:
print "Device"
elif last_val == 0x02:
print "Bridge"
elif last_val == 0x80:
print "Integration"
elif last_val == 0x81:
print "URL"
elif last_val == 0x82:
print "Synthesis"
elif last_val == 0xFF:
print "Empty"
else:
print "???"
print "%s %s : %s %s" % (rom[i], rom[i + 1], rom[i + 2], rom[i + 3])
ROM1 = "5344422D\n"\
"00010100\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000100\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0105\n"\
"746F7000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000207\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000100\n"\
"80000000\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"140F0105\n"\
"64657669\n"\
"63652031\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"000000FF"
ROM2 = "5344422D\n"\
"00020100\n"\
"00000000\n"\
"00000000\n"\
"03000000\n"\
"00000000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0105\n"\
"746F7000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000020\n"\
"00000000\n"\
"00000000\n"\
"00000100\n"\
"00000000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0105\n"\
"70657269\n"\
"70686572\n"\
"616C0000\n"\
"00000000\n"\
"00000002\n"\
"00000000\n"\
"00000040\n"\
"00000100\n"\
"00000000\n"\
"00000200\n"\
"00030000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0105\n"\
"6D656D6F\n"\
"72790000\n"\
"00000000\n"\
"00000000\n"\
"00000002\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"000000FF\n"\
"5344422D\n"\
"00020100\n"\
"00000000\n"\
"00000000\n"\
"00000100\n"\
"00000000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0105\n"\
"70657269\n"\
"70686572\n"\
"616C0000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000207\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000100\n"\
"80000000\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"140F0105\n"\
"64657669\n"\
"63652031\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"00000000\n"\
"00000207\n"\
"00000001\n"\
"00000000\n"\
"00000003\n"\
"00000100\n"\
"80000000\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"140F0105\n"\
"64657669\n"\
"63652032\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"000000FF\n"\
"5344422D\n"\
"00020100\n"\
"00000100\n"\
"00000000\n"\
"00000200\n"\
"00030000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0105\n"\
"6D656D6F\n"\
"72790000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000207\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00010000\n"\
"80000000\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"140F0105\n"\
"6D656D6F\n"\
"72792031\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"00000000\n"\
"00000207\n"\
"00000000\n"\
"00010000\n"\
"00000000\n"\
"00030000\n"\
"80000000\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"140F0105\n"\
"6D656D6F\n"\
"72792032\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"000000FF"
ROMD = "5344422D\n"\
"00020100\n"\
"00000000\n"\
"00000000\n"\
"00000002\n"\
"00000000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0106\n"\
"746F7000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000020\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"20000000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0106\n"\
"70657269\n"\
"70686572\n"\
"616C0000\n"\
"00000000\n"\
"00000002\n"\
"00000000\n"\
"00000040\n"\
"00000001\n"\
"00000000\n"\
"00000001\n"\
"00800000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0106\n"\
"6D656D6F\n"\
"72790000\n"\
"00000000\n"\
"00000000\n"\
"00000002\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"000000FF\n"\
"5344422D\n"\
"00020100\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"20000000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0106\n"\
"70657269\n"\
"70686572\n"\
"616C0000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000207\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000340\n"\
"80000000\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"140F0106\n"\
"53444200\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"00000101\n"\
"00000207\n"\
"00000000\n"\
"10000000\n"\
"00000000\n"\
"10000008\n"\
"80000000\n"\
"0000C594\n"\
"00000000\n"\
"00000001\n"\
"140F0107\n"\
"77625F67\n"\
"70696F00\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"000000FF\n"\
"5344422D\n"\
"00010100\n"\
"00000001\n"\
"00000000\n"\
"00000001\n"\
"00800000\n"\
"80000000\n"\
"0000C594\n"\
"00000001\n"\
"00000001\n"\
"140F0106\n"\
"6D656D6F\n"\
"72790000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000502\n"\
"00000207\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00800000\n"\
"80000000\n"\
"0000C594\n"\
"00000000\n"\
"00000001\n"\
"140F0107\n"\
"77625F73\n"\
"6472616D\n"\
"00000000\n"\
"00000000\n"\
"00000001\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"00000000\n"\
"000000FF"
| StarcoderdataPython |
3215675 | # Copyright 2011 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For PickledScheduler.
"""
import datetime
from oslo_serialization import jsonutils
import six
from manila.scheduler import scheduler_options
from manila import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = six.b(filedata)
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
if six.PY2:
import StringIO
return StringIO.StringIO(self._file_data)
else:
import io
return io.BytesIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.TestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
| StarcoderdataPython |
1706630 | <gh_stars>0
# coding: utf-8
"""
Metacore IoT Object Storage API
Metacore Object Storage - IOT Core Services # noqa: E501
OpenAPI spec version: 1.1
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class PreferencesDashboards(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'name': 'str',
'uid': 'str',
'widgets': 'list[PreferencesWidgets]'
}
attribute_map = {
'name': 'name',
'uid': 'uid',
'widgets': 'widgets'
}
def __init__(self, name=None, uid=None, widgets=None): # noqa: E501
"""PreferencesDashboards - a model defined in Swagger""" # noqa: E501
self._name = None
self._uid = None
self._widgets = None
self.discriminator = None
self.name = name
self.uid = uid
if widgets is not None:
self.widgets = widgets
@property
def name(self):
"""Gets the name of this PreferencesDashboards. # noqa: E501
:return: The name of this PreferencesDashboards. # noqa: E501
:rtype: str
"""
return self._name
@name.setter
def name(self, name):
"""Sets the name of this PreferencesDashboards.
:param name: The name of this PreferencesDashboards. # noqa: E501
:type: str
"""
if name is None:
raise ValueError("Invalid value for `name`, must not be `None`") # noqa: E501
self._name = name
@property
def uid(self):
"""Gets the uid of this PreferencesDashboards. # noqa: E501
:return: The uid of this PreferencesDashboards. # noqa: E501
:rtype: str
"""
return self._uid
@uid.setter
def uid(self, uid):
"""Sets the uid of this PreferencesDashboards.
:param uid: The uid of this PreferencesDashboards. # noqa: E501
:type: str
"""
if uid is None:
raise ValueError("Invalid value for `uid`, must not be `None`") # noqa: E501
self._uid = uid
@property
def widgets(self):
"""Gets the widgets of this PreferencesDashboards. # noqa: E501
:return: The widgets of this PreferencesDashboards. # noqa: E501
:rtype: list[PreferencesWidgets]
"""
return self._widgets
@widgets.setter
def widgets(self, widgets):
"""Sets the widgets of this PreferencesDashboards.
:param widgets: The widgets of this PreferencesDashboards. # noqa: E501
:type: list[PreferencesWidgets]
"""
self._widgets = widgets
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(PreferencesDashboards, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, PreferencesDashboards):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| StarcoderdataPython |
84936 | <filename>auxiliaries.py<gh_stars>10-100
"""
Contains utility functions to compute standard DML metrics such as Recall, NMI or F1.
Also some other QOL stuff that is helpful and the main Data-Logger class.
"""
"""============================================================================================================="""
######## LIBRARIES #####################
import warnings
warnings.filterwarnings("ignore")
import numpy as np, os, sys, pandas as pd, csv
import torch, torch.nn as nn
from torch.utils.data import Dataset
from PIL import Image
from torchvision import transforms
import matplotlib.pyplot as plt
from tqdm import tqdm
import random
import faiss
from sklearn import metrics
from sklearn import cluster
import losses as losses
import datetime
import pickle as pkl
"""============================================================================================================="""
################# ACQUIRE NUMBER OF WEIGHTS #################
def gimme_params(model):
model_parameters = filter(lambda p: p.requires_grad, model.parameters())
params = sum([np.prod(p.size()) for p in model_parameters])
return params
################# SAVE TRAINING PARAMETERS IN NICE STRING #################
def gimme_save_string(opt):
varx = vars(opt)
base_str = ''
for key in varx:
base_str += str(key)
if isinstance(varx[key],dict):
for sub_key, sub_item in varx[key].items():
base_str += '\n\t'+str(sub_key)+': '+str(sub_item)
else:
base_str += '\n\t'+str(varx[key])
base_str+='\n\n'
return base_str
def f1_score(model_generated_cluster_labels, target_labels, feature_coll, computed_centroids):
from scipy.special import comb
d = np.zeros(len(feature_coll))
for i in range(len(feature_coll)):
d[i] = np.linalg.norm(feature_coll[i,:] - computed_centroids[model_generated_cluster_labels[i],:])
labels_pred = np.zeros(len(feature_coll))
for i in np.unique(model_generated_cluster_labels):
index = np.where(model_generated_cluster_labels == i)[0]
ind = np.argmin(d[index])
cid = index[ind]
labels_pred[index] = cid
N = len(target_labels)
# cluster n_labels
avail_labels = np.unique(target_labels)
n_labels = len(avail_labels)
# count the number of objects in each cluster
count_cluster = np.zeros(n_labels)
for i in range(n_labels):
count_cluster[i] = len(np.where(target_labels == avail_labels[i])[0])
# build a mapping from item_id to item index
keys = np.unique(labels_pred)
num_item = len(keys)
values = range(num_item)
item_map = dict()
for i in range(len(keys)):
item_map.update([(keys[i], values[i])])
# count the number of objects of each item
count_item = np.zeros(num_item)
for i in range(N):
index = item_map[labels_pred[i]]
count_item[index] = count_item[index] + 1
# compute True Positive (TP) plus False Positive (FP)
tp_fp = 0
for k in range(n_labels):
if count_cluster[k] > 1:
tp_fp = tp_fp + comb(count_cluster[k], 2)
# compute True Positive (TP)
tp = 0
for k in range(n_labels):
member = np.where(target_labels == avail_labels[k])[0]
member_ids = labels_pred[member]
count = np.zeros(num_item)
for j in range(len(member)):
index = item_map[member_ids[j]]
count[index] = count[index] + 1
for i in range(num_item):
if count[i] > 1:
tp = tp + comb(count[i], 2)
# False Positive (FP)
fp = tp_fp - tp
# compute False Negative (FN)
count = 0
for j in range(num_item):
if count_item[j] > 1:
count = count + comb(count_item[j], 2)
fn = count - tp
# compute F measure
P = tp / (tp + fp)
R = tp / (tp + fn)
beta = 1
F = (beta*beta + 1) * P * R / (beta*beta * P + R)
return F
"""============================================================================================================="""
def eval_metrics_one_dataset(model, test_dataloader, device, k_vals=[1,2,4,8], spliteval=True, evaltypes=['Class'], epoch=0, opt=None):
torch.cuda.empty_cache()
_ = model.eval()
n_classes = len(test_dataloader.dataset.avail_classes)
feature_colls = {evaltype:[] for evaltype in evaltypes}
### For all test images, extract features
with torch.no_grad():
target_labels, feature_coll = [],[]
final_iter = tqdm(test_dataloader, desc='Computing {} Set(s) of Evaluation Metrics...'.format(len(evaltypes)))
image_paths= [x[0] for x in test_dataloader.dataset.image_list]
for idx,inp in enumerate(final_iter):
input_img,target = inp[-1], inp[0]
target_labels.extend(target.numpy().tolist())
out = model(input_img.to(device))
for evaltype in evaltypes:
if 'Combined' in evaltype:
weights = [float(x) for x in evaltype.split('-')[1:]]
feature_colls[evaltype].extend(torch.nn.functional.normalize(torch.cat([weights[0]*out['Class'],weights[1]*out['Aux']], dim=-1), dim=-1).cpu().detach().numpy().tolist())
else:
if isinstance(out, dict):
feature_colls[evaltype].extend(out[evaltype].cpu().detach().numpy().tolist())
else:
feature_colls[evaltype].extend(out.cpu().detach().numpy().tolist())
target_labels = np.hstack(target_labels).reshape(-1,1)
computed_metrics = {evaltype:{} for evaltype in evaltypes}
for evaltype in evaltypes:
feature_coll = np.vstack(feature_colls[evaltype]).astype('float32')
torch.cuda.empty_cache()
### Set CPU Cluster index
cpu_cluster_index = faiss.IndexFlatL2(feature_coll.shape[-1])
kmeans = faiss.Clustering(feature_coll.shape[-1], n_classes)
kmeans.niter = 20
kmeans.min_points_per_centroid = 1
kmeans.max_points_per_centroid = 1000000000
### Train Kmeans
kmeans.train(feature_coll, cpu_cluster_index)
computed_centroids = faiss.vector_float_to_array(kmeans.centroids).reshape(n_classes, feature_coll.shape[-1])
### Assign feature points to clusters
faiss_search_index = faiss.IndexFlatL2(computed_centroids.shape[-1])
faiss_search_index.add(computed_centroids)
_, model_generated_cluster_labels = faiss_search_index.search(feature_coll, 1)
### Compute NMI
NMI = metrics.cluster.normalized_mutual_info_score(model_generated_cluster_labels.reshape(-1), target_labels.reshape(-1))
### Recover max(k_vals) nearest neighbours to use for recall computation
faiss_search_index = faiss.IndexFlatL2(feature_coll.shape[-1])
faiss_search_index.add(feature_coll)
_, k_closest_points = faiss_search_index.search(feature_coll, int(np.max(k_vals)+1))
k_closest_classes = target_labels.reshape(-1)[k_closest_points[:,1:]]
### Compute Recall
recall_all_k = []
for k in k_vals:
recall_at_k = np.sum([1 for target, recalled_predictions in zip(target_labels, k_closest_classes) if target in recalled_predictions[:k]])/len(target_labels)
recall_all_k.append(recall_at_k)
### Compute F1 Score
F1 = f1_score(model_generated_cluster_labels, target_labels, feature_coll, computed_centroids)
computed_metrics[evaltype] = {'F1':F1, 'NMI':NMI, 'Recall@k':recall_all_k, 'Features':feature_coll}
return computed_metrics, target_labels
"""============================================================================================================="""
####### RECOVER CLOSEST EXAMPLE IMAGES #######
def recover_closest_one_dataset(feature_matrix_all, image_paths, save_path, n_image_samples=10, n_closest=3):
image_paths = np.array([x[0] for x in image_paths])
sample_idxs = np.random.choice(np.arange(len(feature_matrix_all)), n_image_samples)
faiss_search_index = faiss.IndexFlatL2(feature_matrix_all.shape[-1])
faiss_search_index.add(feature_matrix_all)
_, closest_feature_idxs = faiss_search_index.search(feature_matrix_all, n_closest+1)
sample_paths = image_paths[closest_feature_idxs][sample_idxs]
f,axes = plt.subplots(n_image_samples, n_closest+1)
for i,(ax,plot_path) in enumerate(zip(axes.reshape(-1), sample_paths.reshape(-1))):
ax.imshow(np.array(Image.open(plot_path)))
ax.set_xticks([])
ax.set_yticks([])
if i%(n_closest+1):
ax.axvline(x=0, color='g', linewidth=13)
else:
ax.axvline(x=0, color='r', linewidth=13)
f.set_size_inches(10,20)
f.tight_layout()
f.savefig(save_path)
plt.close()
"""============================================================================================================="""
################## SET NETWORK TRAINING CHECKPOINT #####################
def set_checkpoint(model, opt, progress_saver, savepath, aux=None):
torch.save({'state_dict':model.state_dict(), 'opt':opt, 'progress':progress_saver, 'aux':aux}, savepath)
"""============================================================================================================="""
################## WRITE TO CSV FILE #####################
class CSV_Writer():
def __init__(self, save_path):
self.save_path = save_path
self.written = []
self.n_written_lines = {}
def log(self, group, segments, content):
if group not in self.n_written_lines.keys():
self.n_written_lines[group] = 0
with open(self.save_path+'_'+group+'.csv', "a") as csv_file:
writer = csv.writer(csv_file, delimiter=",")
if group not in self.written: writer.writerow(segments)
for line in content:
writer.writerow(line)
self.n_written_lines[group] += 1
self.written.append(group)
################## PLOT SUMMARY IMAGE #####################
class InfoPlotter():
def __init__(self, save_path, title='Training Log', figsize=(25,19)):
self.save_path = save_path
self.title = title
self.figsize = figsize
self.colors = ['r','g','b','y','m','c','orange','darkgreen','lightblue']
def make_plot(self, base_title, title_append, sub_plots, sub_plots_data):
sub_plots = list(sub_plots)
if 'epochs' not in sub_plots:
x_data = range(len(sub_plots_data[0]))
else:
x_data = range(sub_plots_data[np.where(np.array(sub_plots)=='epochs')[0][0]][-1]+1)
self.ov_title = [(sub_plot,sub_plot_data) for sub_plot, sub_plot_data in zip(sub_plots,sub_plots_data) if sub_plot.lower() not in ['epoch','epochs','time']]
self.ov_title = [(x[0],np.max(x[1])) if 'loss' not in x[0].lower() else (x[0],np.min(x[1])) for x in self.ov_title]
self.ov_title = title_append +': '+ ' | '.join('{0}: {1:.4f}'.format(x[0],x[1]) for x in self.ov_title)
sub_plots_data = [x for x,y in zip(sub_plots_data, sub_plots) if y.lower() not in ['epochs']]
sub_plots = [x for x in sub_plots if x.lower() not in ['epochs']]
plt.style.use('ggplot')
f,ax = plt.subplots(1)
ax.set_title(self.ov_title, fontsize=22)
for i,(data, title) in enumerate(zip(sub_plots_data, sub_plots)):
ax.plot(x_data, data, '-{}'.format(self.colors[i]), linewidth=1.7, label=base_title+' '+title)
ax.tick_params(axis='both', which='major', labelsize=18)
ax.tick_params(axis='both', which='minor', labelsize=18)
ax.legend(loc=2, prop={'size': 16})
f.set_size_inches(self.figsize[0], self.figsize[1])
f.savefig(self.save_path+'_'+title_append+'.svg')
plt.close()
################## GENERATE LOGGING FOLDER/FILES #######################
def set_logging(opt):
checkfolder = opt.save_path+'/'+opt.savename
if opt.savename == '':
date = datetime.datetime.now()
time_string = '{}-{}-{}-{}-{}-{}'.format(date.year, date.month, date.day, date.hour, date.minute, date.second)
checkfolder = opt.save_path+'/{}_{}_'.format(opt.dataset.upper(), opt.arch.upper())+time_string
counter = 1
while os.path.exists(checkfolder):
checkfolder = opt.save_path+'/'+opt.savename+'_'+str(counter)
counter += 1
os.makedirs(checkfolder)
opt.save_path = checkfolder
with open(opt.save_path+'/Parameter_Info.txt','w') as f:
f.write(gimme_save_string(opt))
pkl.dump(opt,open(opt.save_path+"/hypa.pkl","wb"))
class Progress_Saver():
def __init__(self):
self.groups = {}
def log(self, segment, content, group=None):
if group is None: group = segment
if group not in self.groups.keys():
self.groups[group] = {}
if segment not in self.groups[group].keys():
self.groups[group][segment] = {'content':[],'saved_idx':0}
self.groups[group][segment]['content'].append(content)
class LOGGER():
def __init__(self, opt, sub_loggers=[], prefix=None, start_new=True, log_to_wandb=False):
self.prop = opt
self.prefix = '{}_'.format(prefix) if prefix is not None else ''
self.sub_loggers = sub_loggers
### Make Logging Directories
if start_new: set_logging(opt)
### Set Graph and CSV writer
self.csv_writer, self.graph_writer, self.progress_saver = {},{},{}
for sub_logger in sub_loggers:
csv_savepath = opt.save_path+'/CSV_Logs'
if not os.path.exists(csv_savepath): os.makedirs(csv_savepath)
self.csv_writer[sub_logger] = CSV_Writer(csv_savepath+'/Data_{}{}'.format(self.prefix, sub_logger))
prgs_savepath = opt.save_path+'/Progression_Plots'
if not os.path.exists(prgs_savepath): os.makedirs(prgs_savepath)
self.graph_writer[sub_logger] = InfoPlotter(prgs_savepath+'/Graph_{}{}'.format(self.prefix, sub_logger))
self.progress_saver[sub_logger] = Progress_Saver()
### WandB Init
self.save_path = opt.save_path
self.log_to_wandb = log_to_wandb
def update(self, *sub_loggers, all=False):
wandb_content = []
if all: sub_loggers = self.sub_loggers
for sub_logger in list(sub_loggers):
for group in self.progress_saver[sub_logger].groups.keys():
pgs = self.progress_saver[sub_logger].groups[group]
segments = pgs.keys()
per_seg_saved_idxs = [pgs[segment]['saved_idx'] for segment in segments]
per_seg_contents = [pgs[segment]['content'][idx:] for segment,idx in zip(segments, per_seg_saved_idxs)]
per_seg_contents_all = [pgs[segment]['content'] for segment,idx in zip(segments, per_seg_saved_idxs)]
#Adjust indexes
for content,segment in zip(per_seg_contents, segments):
self.progress_saver[sub_logger].groups[group][segment]['saved_idx'] += len(content)
tupled_seg_content = [list(seg_content_slice) for seg_content_slice in zip(*per_seg_contents)]
self.csv_writer[sub_logger].log(group, segments, tupled_seg_content)
if 'epoch' not in group.lower():
self.graph_writer[sub_logger].make_plot(sub_logger, group, segments, per_seg_contents_all)
for i,segment in enumerate(segments):
if 'epoch' not in segment:
if group == segment:
name = sub_logger+': '+group.title()
else:
name = sub_logger+': '+group.title()+': '+segment.title()
wandb_content.append((name,per_seg_contents[i]))
if self.log_to_wandb:
import wandb
commit=False
for i,item in enumerate(wandb_content):
if i==len(wandb_content)-1: commit=True
if isinstance(item[1], list):
for j,sub_item in enumerate(item[1]):
wandb.log({item[0]:sub_item}, commit=commit)
else:
wandb.log({item[0]:item[0]}, commit=commit)
"""================================================================================================="""
### Container to use with latent space separation
def run_kmeans(features, n_cluster):
n_samples, dim = features.shape
kmeans = faiss.Kmeans(dim, n_cluster)
kmeans.n_iter, kmeans.min_points_per_centroid, kmeans.max_points_per_centroid = 20,5,1000000000
kmeans.train(features)
_, cluster_assignments = kmeans.index.search(features,1)
return cluster_assignments
"""================================================================================================="""
### Adjust Parameters for different loss classes
def adjust_pars(loss_pars, opt, ix=0, mode='class'):
pars_to_check = ['nu', 'beta', 'beta_lr', 'beta_constant', 'embed_dim', 'margin', 'loss', 'sampling', 'num_classes', 'proxy_lr']
ref = [mode+'_'+x for x in pars_to_check]
dopt, lopt = vars(opt), vars(loss_pars)
for loss_key, class_key in zip(pars_to_check, ref):
lopt[loss_key] = dopt[class_key]
loss_pars.lr = opt.lr
"""============================================================================================================="""
### Generate Network Graph
def save_graph(opt, model):
inp = torch.randn((1,3,224,224)).to(opt.device)
network_output = model(inp)
if isinstance(network_output, dict): network_output = network_output['Class']
from graphviz import Digraph
def make_dot(var, savename, params=None):
"""
Generate a symbolic representation of the network graph.
"""
if params is not None:
assert all(isinstance(p, Variable) for p in params.values())
param_map = {id(v): k for k, v in params.items()}
node_attr = dict(style='filled',
shape='box',
align='left',
fontsize='6',
ranksep='0.1',
height='0.6',
width='1')
dot = Digraph(node_attr=node_attr, format='svg', graph_attr=dict(size="40,10", rankdir='LR', rank='same'))
seen = set()
def size_to_str(size):
return '('+(', ').join(['%d' % v for v in size])+')'
def add_nodes(var):
replacements = ['Backward', 'Th', 'Cudnn']
color_assigns = {'Convolution':'orange',
'ConvolutionTranspose': 'lightblue',
'Add': 'red',
'Cat': 'green',
'Softmax': 'yellow',
'Sigmoid': 'yellow',
'Copys': 'yellow'}
if var not in seen:
op1 = torch.is_tensor(var)
op2 = not torch.is_tensor(var) and str(type(var).__name__)!='AccumulateGrad'
text = str(type(var).__name__)
for rep in replacements:
text = text.replace(rep, '')
color = color_assigns[text] if text in color_assigns.keys() else 'gray'
if 'Pool' in text: color = 'lightblue'
if op1 or op2:
if hasattr(var, 'next_functions'):
count = 0
for i, u in enumerate(var.next_functions):
if str(type(u[0]).__name__)=='AccumulateGrad':
if count==0: attr_text = '\nParameter Sizes:\n'
attr_text += size_to_str(u[0].variable.size())
count += 1
attr_text += ' '
if count>0: text += attr_text
if op1:
dot.node(str(id(var)), size_to_str(var.size()), fillcolor='orange')
if op2:
dot.node(str(id(var)), text, fillcolor=color)
seen.add(var)
if op1 or op2:
if hasattr(var, 'next_functions'):
for u in var.next_functions:
if u[0] is not None:
if str(type(u[0]).__name__)!='AccumulateGrad':
dot.edge(str(id(u[0])), str(id(var)))
add_nodes(u[0])
if hasattr(var, 'saved_tensors'):
for t in var.saved_tensors:
dot.edge(str(id(t)), str(id(var)))
add_nodes(t)
add_nodes(var.grad_fn)
dot.save(savename)
return dot
if not os.path.exists(opt.save_path):
raise Exception('No save folder {} available!'.format(opt.save_path))
viz_graph = make_dot(network_output, opt.save_path+"/Network_Graphs"+"/{}_network_graph".format(opt.arch))
viz_graph.format = 'svg'
viz_graph.render()
torch.cuda.empty_cache()
# print('Done.')
# if view: viz_graph.view()
| StarcoderdataPython |
119513 | '''
Program: string_processor.py
Demo of method chaining in Python.
By: <NAME> -
http://jugad2.blogspot.in/p/about-vasudev-ram.html
Copyright 2016 <NAME>
'''
import copy
class StringProcessor(object):
'''
A class to process strings in various ways.
'''
def __init__(self, st):
'''Pass a string for st'''
self._st = st
def lowercase(self):
'''Make lowercase'''
self._st = self._st.lower()
return self
def uppercase(self):
'''Make uppercase'''
self._st = self._st.upper()
return self
def capitalize(self):
'''Make first char capital (if letter); make other letters lower'''
self._st = self._st.capitalize()
return self
def delspace(self):
'''Delete spaces'''
self._st = self._st.replace(' ', '')
return self
def rep(self):
'''Like Python's repr'''
return self._st
def dup(self):
'''Duplicate the object'''
return copy.deepcopy(self)
def process_string(s):
print
sp = StringProcessor(s)
print 'Original:', sp.rep()
print 'After uppercase:', sp.dup().uppercase().rep()
print 'After lowercase:', sp.dup().lowercase().rep()
print 'After uppercase then capitalize:', sp.dup().uppercase().\
capitalize().rep()
print 'After delspace:', sp.dup().delspace().rep()
def main():
print "Demo of method chaining in Python:"
# Use extra spaces between words to show effect of delspace.
process_string('hOWz It GoInG?')
process_string('The QUIck brOWn fOx')
main()
| StarcoderdataPython |
165774 | <reponame>GuyPaulHadad/IML.HUJI<gh_stars>0
import IMLearn.learners.regressors.linear_regression
from IMLearn.learners.regressors import PolynomialFitting
from IMLearn.utils import split_train_test
import numpy as np
import pandas as pd
import plotly.express as px
import plotly.io as pio
import math
import plotly.graph_objects as go
import plotly as pt
pio.templates.default = "simple_white"
TEST_PERCENT = 0.25
FINAL_DEG = 5
def clean_data(temp_data: pd.DataFrame) -> pd.DataFrame:
temp_data['DayOfYear'] = temp_data['DayOfYear'].apply(lambda x: x.dayofyear)
for greater_than_zero_feature in temp_data[['Year', 'Month', 'Day']]:
temp_data = temp_data.drop(
temp_data.index[temp_data[greater_than_zero_feature] <= 0])
temp_data = temp_data.drop(temp_data.index[temp_data["Temp"] < -50])
temp_data = temp_data.dropna(
axis=0).reset_index(drop=True)
return temp_data
def load_data(filename: str) -> pd.DataFrame:
"""
Load city daily temperature dataset and preprocess data.
Parameters
----------
filename: str
Path to house prices dataset
Returns
-------
Design matrix and response vector (Temp)
"""
daily_temp_df = pd.read_csv(filename, parse_dates={'DayOfYear': ['Date']})
daily_temp_df = clean_data(daily_temp_df)
return daily_temp_df
def q2fig(city_info_df: pd.DataFrame):
israel_temp_df = city_info_df[city_info_df['Country'] == "Israel"].reset_index()
fig1 = px.scatter(israel_temp_df, x='DayOfYear', y='Temp', color='Year',
color_discrete_sequence=px.colors.qualitative.Vivid,
title="Temperature as a function of The Day of the year",
labels={"DayOfYear": "Day of the Year", "Temp": "Temperature"})
fig1.update_layout(title={'x': 0.5})
fig1.show()
months_temp_sd_df = pd.DataFrame()
months_temp_sd_df['Month_STD'] = (israel_temp_df.groupby('Month').agg(func=np.std)['Temp'])
"""
for month in range(1,13):
index_col = israel_temp_df.index[israel_temp_df["Month"]==month]
israel_temp_df.loc[index_col,'Month_STD'] = month_std[month]
"""
months_temp_sd_df['Month'] = pd.Series(np.linspace(0, 12, 13))
fig2 = px.bar(months_temp_sd_df, x='Month', y='Month_STD', title="Each month's standard deviation for daily "
"temperatures ",
labels={'Month_STD': "Month's Standard Deviation"})
fig2.update_layout(title={'x': 0.5}).show()
def q3fig(city_info_df: pd.DataFrame):
mn = city_info_df.groupby(['Country', 'Month'])['Temp'].mean().reset_index().rename(columns={'Temp': 'Temp_Mean'})
sd = city_info_df.groupby(['Country', 'Month'])['Temp'].agg(func=np.std).reset_index().rename(
columns={'Temp': 'Temp_SD'})
mn["Temp_SD"] = sd['Temp_SD']
fig = px.line(mn, x='Month', y='Temp_Mean', error_y='Temp_SD', color='Country',
title='Temperature mean as a function of the month')
fig.update_layout(title={'x': 0.5}).show()
def q4fig(city_temp_df: pd.DataFrame):
isr_temp_df = city_temp_df[city_temp_df['Country'] == "Israel"].reset_index()
israel_df_doy = isr_temp_df.pop('DayOfYear')
temp_df = isr_temp_df.pop('Temp')
train, test = split_training_and_test(israel_df_doy, temp_df, TEST_PERCENT)
results = []
for i in range(1, 11):
results.append(round(fit_and_calc_loss(train, test, i), 3))
print("Test Err:" + str(results))
ls = np.linspace(1, 10, 10)
poly_loss_df = pd.DataFrame({"Degree": ls, "Loss": results})
fig = px.bar(poly_loss_df, x='Degree', y='Loss', title="Loss as a function of polynomial degree k ")
fig.update_layout(title={'x': 0.5}).show()
def split_training_and_test(data_x, pred_y, test_percent):
data_x = data_x.sample(frac=1)
pred_y = pred_y.reindex_like(data_x)
n = round(test_percent * len(pred_y))
return (data_x[:-n], pred_y[:-n]), (data_x[-n:], pred_y[-n:])
def mult_coeff(coeffs, num):
poly = 0
for i in range(len(coeffs)):
poly = num ** i * coeffs[i]
return poly
def fit_and_calc_loss(train_data, test_data, k) -> float:
train_features = train_data[0]
train_true_y = train_data[1]
pf = PolynomialFitting(k)
pf.fit(train_features.to_numpy(), train_true_y.to_numpy())
# test_feature = test_data[0].apply(lambda x: mult_coeff(pf.coefs_,x))
return pf._loss(test_data[0].to_numpy(), test_data[1].to_numpy())
if __name__ == '__main__':
np.random.seed(0)
# Question 1 - Load and preprocessing of city temperature dataset
city_temp_df = load_data("C:/Users/guyha/Desktop/uniCourse/Year 2/Semester B/IML/datasets/City_Temperature.csv")
# Question 2 - Exploring data for specific country
# q2fig(city_temp_df)
# Question 3 - Exploring differences between countries
# q3fig(city_temp_df)
# Question 4 - Fitting model for different values of `k`
# q4fig(city_temp_df)
# Question 5 - Evaluating fitted model on different countries
israel_df = city_temp_df[city_temp_df['Country'] == "Israel"].reset_index()
israel_df_doy = israel_df.pop('DayOfYear')
israel_temp_df = israel_df.pop('Temp')
pf = PolynomialFitting(FINAL_DEG)
pf._fit(israel_df_doy, israel_temp_df)
jordan_df = city_temp_df[city_temp_df['Country'] == "Jordan"].reset_index()
jordan_df_doy = jordan_df.pop('DayOfYear')
jordan_temp_df = jordan_df.pop('Temp')
south_africa_df = city_temp_df[city_temp_df['Country'] == "South Africa"].reset_index()
south_africa_df_doy = south_africa_df.pop('DayOfYear')
south_africa_temp_df = south_africa_df.pop('Temp')
nether_df = city_temp_df[city_temp_df['Country'] == "The Netherlands"].reset_index()
nether_df_doy = nether_df.pop('DayOfYear')
nether_temp_df = nether_df.pop('Temp')
country_temp = [jordan_temp_df, south_africa_temp_df, nether_temp_df]
country_doy = [jordan_df_doy, south_africa_df_doy, nether_df_doy]
results = []
for i in range(3):
results.append(pf._loss(country_doy[i], country_temp[i]))
err_country_df = pd.DataFrame({'Country': ["Jordan", "South Africa", "The Netherlands"], "Model Error": results})
fig = px.bar(err_country_df, x='Country', y='Model Error', title="Model Error for each country")
fig.update_layout(title={'x': 0.5}).show()
| StarcoderdataPython |
3365436 | import json
from notifications.models import Notification
from rest_framework.generics import GenericAPIView, ListAPIView
from rest_framework import exceptions
from rest_framework.permissions import IsAuthenticated
from rest_framework.response import Response
from rest_framework import status
from authors.apps.notify.helper import Notifier
from authors.apps.notify.models import MailList
from authors.apps.notify.serializers import (CreateMailListSerializer,
FetchMailListSerializer,
)
from django.contrib.sites.shortcuts import get_current_site
# Helper response format method
def serialize_queryset(queryset):
serialized_queryset = []
for qs in queryset:
data = {
'id': qs.id,
'description': qs.description,
'verb': qs.verb,
'unread': qs.unread,
'emailed': qs.emailed,
'recipient': qs.recipient.username,
'actor': qs.actor.username,
'timestamp': qs.timestamp,
'data': qs.data,
}
serialized_queryset.append(data)
return serialized_queryset
# Create your views here.
class FetchMailListView(ListAPIView):
"""
Fetch all mail list objects from the mail list table
"""
queryset = MailList.objects.all()
serializer_class = FetchMailListSerializer
permission_classes = (IsAuthenticated,)
class FetchUpdateMailList(GenericAPIView):
""" Fetch or create a new mmail list object for the logged in user"""
serializer_class = CreateMailListSerializer
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
""""""
mail_list_obj = MailList.objects.get_or_create(user=request.user)[0]
serialized_mail_list_obj = FetchMailListSerializer(mail_list_obj)
return Response(serialized_mail_list_obj.data,
status=status.HTTP_200_OK)
def put(self, request, *args, **kwargs):
"""
Update notification subscription status for a logged in user
by changing the email or push notification status to false.
opt-in and out of notifications
"""
mail_list_obj = MailList.objects.get_or_create(user=request.user)[0]
data = request.data
serialized_data = self.serializer_class(
instance=mail_list_obj, data=data, context=request, partial=True
)
serialized_data.is_valid(raise_exception=True)
serialized_data.save()
return Response(serialized_data.data, status=status.HTTP_200_OK)
class FetchAllNotifications(ListAPIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
user_notifications = Notifier.fetch_all_notifications(
user=request.user)
if user_notifications.count() == 0:
return Response(dict(
message='You currently dont have any notifications')
)
data = serialize_queryset(user_notifications)
return Response(
{'count': user_notifications.count(),
'notifications': data})
class FetchAllUnReadNotifications(ListAPIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
unread_notifications = Notifier.fetch_unread_notifications(
user=request.user)
if unread_notifications.count() == 0:
return Response(dict(
message='You currently dont have any unread notifications')
)
return Response(
{'count': unread_notifications.count(),
'notifications': serialize_queryset(unread_notifications)})
class FetchAllReadNotifications(ListAPIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
read_notifications = Notifier.fetch_read_notifications(
user=request.user)
if read_notifications.count() == 0:
return Response(dict(
message='You currently dont have any read notifications')
)
return Response(
{'count': read_notifications.count(),
'notifications': serialize_queryset(read_notifications)})
class MarkAllNotificationsAsRead(GenericAPIView):
permission_classes = (IsAuthenticated,)
def get(self, request, *args, **kwargs):
user_notifications = Notifier.fetch_unread_notifications(
user=request.user)
if user_notifications.count() == 0:
return Response(dict(
message='You currently dont have any notifications')
)
user_notifications.mark_all_as_read()
return Response(
dict(message='All notifications marked as read')
)
| StarcoderdataPython |
1694728 | <filename>pyaxis/pyaxis.py
"""Pcaxis Parser module parses px files into dataframes.
This module obtains a pandas DataFrame of tabular data from a PC-Axis
file or URL. Reads data and metadata from PC-Axis [1]_ into a dataframe and
dictionary, and returns a dictionary containing both structures.
Example:
from pyaxis import pyaxis
px = pyaxis.parse(self.base_path + 'px/2184.px', encoding='ISO-8859-2')
.. [1] https://www.scb.se/en/services/statistical-programs-for-px-files/
..todo::
meta_split: "NOTE" attribute can be multiple, but only the last one
is added to the dictionary.
"""
import itertools
import logging
import re
from numpy import nan
from pandas import DataFrame, Series
import requests
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
def uri_type(uri):
"""Determine the type of URI.
Args:
uri (str): pc-axis file name or URL
Returns:
uri_type_result (str): 'URL' | 'FILE'
.. Regex debugging:
https://pythex.org/
"""
uri_type_result = 'FILE'
# django url validation regex:
regex = re.compile(r'^(?:http|ftp)s?://' # http:// or https://
# domain...
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|'
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
if re.match(regex, uri):
uri_type_result = 'URL'
return uri_type_result
def read(uri, encoding, timeout=10):
"""Read a text file from file system or URL.
Args:
uri (str): file name or URL
encoding (str): charset encoding
timeout (int): request timeout; optional
Returns:
raw_pcaxis (str): file contents.
"""
raw_pcaxis = ''
if uri_type(uri) == 'URL':
try:
response = requests.get(uri, stream=True, timeout=timeout)
response.raise_for_status()
response.encoding = encoding
raw_pcaxis = response.text
response.close()
except requests.exceptions.ConnectTimeout as connect_timeout:
logger.error('ConnectionTimeout = %s', str(connect_timeout))
raise
except requests.exceptions.ConnectionError as connection_error:
logger.error('ConnectionError = %s', str(connection_error))
raise
except requests.exceptions.HTTPError as http_error:
logger.error('HTTPError = %s',
str(http_error.response.status_code) + ' ' +
http_error.response.reason)
raise
except requests.exceptions.InvalidURL as url_error:
logger.error('URLError = ' + url_error.response.status_code + ' ' +
url_error.response.reason)
raise
except Exception:
import traceback
logger.error('Generic exception: %s', traceback.format_exc())
raise
else: # file parsing
file_object = open(uri, encoding=encoding)
raw_pcaxis = file_object.read()
file_object.close()
return raw_pcaxis
def metadata_extract(pc_axis):
r"""Extract metadata and data from pc-axis file contents.
Args:
pc_axis (str): pc_axis file contents.
Returns:
metadata_attributes (list of string): each item conforms to an\
ATTRIBUTE=VALUES pattern.
data (string): data values.
"""
# replace new line characters with blank
pc_axis = pc_axis.replace('\n', ' ').replace('\r', ' ')
# split file into metadata and data sections
metadata, data = pc_axis.split('DATA=')
# meta: list of strings that conforms to pattern ATTRIBUTE=VALUES
metadata_attributes = split_ignore_quotation_marks(metadata,
';', final=True)
# metadata_attributes = re.findall('([^=]+=[^=]+)(?:;|$)', metadata)
# remove all semicolons
data = data.replace(';', '')
# remove trailing blanks
data = data.strip()
for i, item in enumerate(metadata_attributes):
metadata_attributes[i] = item.strip().rstrip(';')
return metadata_attributes, data
def split_ignore_quotation_marks(string_input, separator, final=False):
"""Split the string_input into a list avoiding quotation marks.
Arg:
string_input (string): metadata element
separator (string): character to split ('=')
final (bool): if the separator is also the last character
Return:
list: ['text1', 'text2', ...]
"""
quotation_mark_start = False
result = []
index_from = 0
for index, element in enumerate(string_input):
if element == '"' and not quotation_mark_start:
quotation_mark_start = True
elif element == '"' and quotation_mark_start:
quotation_mark_start = False
if element == separator and not quotation_mark_start:
result.append(string_input[index_from:index])
index_from = index + 1
if len(result) > 0:
if final:
return result
else:
result.append(string_input[index_from:index+1])
return result
return string_input
def metadata_split_to_dict(metadata_elements):
"""Split the list of metadata elements into a multi-valued keys dict.
Args:
metadata_elements (list of string): pairs ATTRIBUTE=VALUES
Returns:
metadata (dictionary): {'attribute1': ['value1', 'value2', ... ], ...}
"""
metadata = {}
for element in metadata_elements:
name, values = split_ignore_quotation_marks(element, '=', final=False)
name = name.replace('"', '')
# remove leading and trailing blanks from element names
name = name.replace('( ', '(')
name = name.replace(' )', ')')
# split values delimited by double quotes into list
# additionally strip leading and trailing blanks
metadata[name] = re.findall('"[ ]*(.+?)[ ]*"+?', values)
return metadata
def get_dimensions(metadata):
"""Read STUB and HEADING values from metadata dictionary.
Args:
metadata: dictionary of metadata
Returns:
dimension_names (list)
dimension_members (list)
"""
dimension_names = []
dimension_members = []
# add STUB and HEADING elements to a list of dimension names
# add VALUES of STUB and HEADING to a list of dimension members
stubs = metadata.get('STUB', [])
for stub in stubs:
dimension_names.append(stub)
stub_values = []
raw_stub_values = metadata['VALUES(' + stub + ')']
for value in raw_stub_values:
stub_values.append(value)
dimension_members.append(stub_values)
# add HEADING values to the list of dimension members
headings = metadata.get('HEADING', [])
for heading in headings:
dimension_names.append(heading)
heading_values = []
raw_heading_values = metadata['VALUES(' + heading + ')']
for value in raw_heading_values:
heading_values.append(value)
dimension_members.append(heading_values)
return dimension_names, dimension_members
def get_codes(metadata):
"""Read dimension codes and their dimension names from metadata dictionary.
Args:
metadata: dictionary of metadata
Returns:
dimensions_with_codes(list)
dimension_codes(list)
"""
dimensions_with_codes = []
dimension_codes = []
# add CODES of STUB to a list of dimension codes
stubs = metadata.get('STUB', [])
for stub in stubs:
stub_values = []
code_key = 'CODES(' + stub + ')'
# Not all stubs necessarily have CODES
if code_key in metadata:
dimensions_with_codes.append(stub)
raw_stub_values = metadata['CODES(' + stub + ')']
for value in raw_stub_values:
stub_values.append(value)
dimension_codes.append(stub_values)
# add HEADING values to the list of dimension codes
headings = metadata.get('HEADING', [])
for heading in headings:
heading_values = []
code_key = 'CODES(' + heading + ')'
# Not all headings necessarily have CODES
if code_key in metadata:
dimensions_with_codes.append(heading)
raw_heading_values = metadata['CODES(' + heading + ')']
for value in raw_heading_values:
heading_values.append(value)
dimension_codes.append(heading_values)
return dimensions_with_codes, dimension_codes
def build_dataframe(dimension_names, dimension_members, data_values,
null_values, sd_values):
"""Build a dataframe from dimensions and data.
Adds the cartesian product of dimension members plus the series of data.
Args:
dimension_names (list of string)
dimension_members (list of string)
data_values(Series): pandas series with the data values column.
null_values(str): regex with the pattern for the null values in the px
file. Defaults to '.'.
sd_values(str): regex with the pattern for the statistical disclosured
values in the px file. Defaults to '..'.
Returns:
df (pandas dataframe)
"""
# cartesian product of dimension members
dim_exploded = list(itertools.product(*dimension_members))
df = DataFrame(data=dim_exploded, columns=dimension_names)
# column of data values
df['DATA'] = data_values
# null values and statistical disclosure treatment
df = df.replace({'DATA': {null_values: ''}}, regex=True)
df = df.replace({'DATA': {sd_values: nan}}, regex=True)
return df
def parse(uri, encoding, timeout=10,
null_values=r'^"\."$', sd_values=r'"\.\."'):
"""Extract metadata and data sections from pc-axis.
Args:
uri (str): file name or URL
encoding (str): charset encoding
timeout (int): request timeout in seconds; optional
null_values(str): regex with the pattern for the null values in the px
file. Defaults to '.'.
sd_values(str): regex with the pattern for the statistical disclosured
values in the px file. Defaults to '..'.
Returns:
pc_axis_dict (dictionary): dictionary of metadata and pandas df.
METADATA: dictionary of metadata
DATA: pandas dataframe
"""
# get file content or URL stream
try:
pc_axis = read(uri, encoding, timeout)
except ValueError:
import traceback
logger.error('Generic exception: %s', traceback.format_exc())
raise
# metadata and data extraction and cleaning
metadata_elements, raw_data = metadata_extract(pc_axis)
# stores raw metadata into a dictionary
metadata = metadata_split_to_dict(metadata_elements)
# explode raw data into a Series of values, which can contain nullos or sd
# (statistical disclosure)
data_values = Series(raw_data.split())
# extract dimension names and members from
# 'meta_dict' STUB and HEADING keys
dimension_names, dimension_members = get_dimensions(metadata)
# build a dataframe
df = build_dataframe(
dimension_names,
dimension_members,
data_values,
null_values=null_values,
sd_values=sd_values)
# dictionary of metadata and data (pandas dataframe)
parsed_pc_axis = {
'METADATA': metadata,
'DATA': df
}
return parsed_pc_axis
| StarcoderdataPython |
107856 | <reponame>haydenshively/Tezos-Prediction
from training.train_cnn_timeseries import main as train_cnn_timeseries
from training.train_lstm import main as train_lstm
from testing.test_cnn_timeseries import main as test_cnn_timeseries
from testing.test_lstm import main as test_lstm
if __name__ == '__main__':
# train_cnn_timeseries('dataset/train')
# train_lstm('dataset/train')
cnn_results = test_cnn_timeseries('models/cnn_timeseries_16_40_5_1.h5', 'dataset/test')
lstm_results = test_lstm('models/lstm_16_40_5.h5', 'dataset/test')
print('CNN Results \t|\t\tmse: {},\t\tmae: {}'.format(*cnn_results[:2]))
print('LSTM Results\t|\t\tmse: {},\t\tmae: {}'.format(*lstm_results[:2]))
| StarcoderdataPython |
184106 | """
Algorithm to use Obspy's metadata to pull response and other metadata. Returns an apporpiate
inventory class.
A.V. Newman Mon Jul 26 15:26:35 EDT 2021
"""
from obspy.clients.fdsn import Client as fdsnClient
from obspy import UTCDateTime
def get_respinv(network,eloc,etime,rads,chan):
fclient = fdsnClient()
now=UTCDateTime()
elat,elon,edep = eloc
minrad,maxrad = rads
inventory = fclient.get_stations(network = network,
latitude = elat, longitude = elon,
minradius = minrad, maxradius = maxrad,
starttime=etime-86400, endtime=etime,
channel=chan,
#location="00",
matchtimeseries=True,
#filename="test.txt", format="text", # cannot be used with response
#filename="test.xml", format="xml",
level="response"
)
return inventory # this is an Obspy type
| StarcoderdataPython |
3385101 | import pytest
from Modules.device_module import *
class TestDeviceModule:
def test_device_module_no_input(self):
with pytest.raises(json.decoder.JSONDecodeError):
dm_json_check("")
def test_device_module_incomplete_input(self):
with pytest.raises(AttributeError):
json_str = '''{
"patientname": "Jack",
"temperature": "36.5",
"systolicbloodpressure": "110",
"diastolicbloodpressure": "70",
"pulse": "95",
"oximeter": "98"
}'''
dm_json_check(json_str)
def test_device_module_not_number(self):
with pytest.raises(ValueError):
json_str = '''{
"patientname": "Jack",
"temperature": "36.5",
"systolicbloodpressure": "110",
"diastolicbloodpressure": "70",
"pulse": "95",
"oximeter": "98",
"glucometer": "hello"
}'''
dm_json_check(json_str)
def test_device_module_negative_number(self):
with pytest.raises(ValueError):
json_str = '''{
"patientname": "Jack",
"temperature": "36.5",
"systolicbloodpressure": "110",
"diastolicbloodpressure": "-70",
"pulse": "95",
"oximeter": "98",
"glucometer": "82"
}'''
dm_json_check(json_str)
| StarcoderdataPython |
1724820 | <gh_stars>1-10
import gym, gym_envs
from stable_baselines.common.vec_env import DummyVecEnv, VecNormalize
from stable_baselines.common.env_checker import check_env
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import os
import time
import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--env', help='environment ID', type=str, default='CartPole-v1')
parser.add_argument('-f', '--folder', help='Log folder', type=str, default='trained_agents')
parser.add_argument('-n', '--n-timesteps', help='number of timesteps', default=1000,)
parser.add_argument('--nb-seeds', help='Number of seeds to evaluate', type=int, default=0)
# get arguments
args = parser.parse_args()
env_id = args.env
log_path = args.folder
nb_timesteps = int(args.n_timesteps)
nb_seeds = int(args.nb_seeds)
os.makedirs(log_path, exist_ok=True)
env = gym.make(env_id)
## LEARNING CURVE
walltime_seed = []
all_rewards = []
ep_reward = 0
start_time = time.time()
for seed in range(nb_seeds):
obs = env.reset()
rewards = []
timesteps = []
for t in range(nb_timesteps):
action = env.action_space.sample()
obs, reward, done, info = env.step(action)
ep_reward += reward
if done:
rewards.append(ep_reward)
timesteps.append(t+1)
ep_reward = 0
obs = env.reset()
# walltime
end_time = time.time()
walltime = end_time - start_time
walltime_seed.append(walltime)
# reward
df_rewards = pd.Series(rewards, name="seed_"+str(seed))
all_rewards.append(df_rewards)
env.close()
## walltime
print(walltime_seed)
mean_walltime = np.mean(walltime_seed)
std_walltime = np.std(walltime_seed)
# # convert to min
# mean_walltime /= 60
# std_walltime /= 60
d_walltime = {"mean_walltime": mean_walltime, "std_walltime": std_walltime}
df_walltime = pd.DataFrame(d_walltime, index=[0])
df_walltime.to_csv(log_path+"/walltime.csv", index=False)
## reward
# print(all_rewards)
all_rewards_df = pd.concat(all_rewards, axis=1)
all_rewards_df['timesteps'] = pd.Series(timesteps)
print(all_rewards_df)
all_rewards_df.to_csv(log_path+"/all_rewards.csv", index=False)
if __name__ == '__main__':
main() | StarcoderdataPython |
127020 | #usando strip para eliminar espaços vazios nas bordas de uma string. Equivalente ao trim()
arquivo = open('pessoas.csv')
for linha in arquivo:
print('Nome: {}, Idade: {}'.format(*linha.strip().split(','))) #Usando * irá extrair os elementos de uma coleção de dados (lista, tuplas dicionários, sets, etc)
arquivo.close() | StarcoderdataPython |
5751 | <filename>qiskit_metal/qlibrary/lumped/cap_n_interdigital.py
# -*- coding: utf-8 -*-
# This code is part of Qiskit.
#
# (C) Copyright IBM 2017, 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
from qiskit_metal import draw, Dict
from qiskit_metal.qlibrary.core import QComponent
import numpy as np
class CapNInterdigital(QComponent):
"""Generates a two pin (+) structure comprised of a north CPW transmission
line, and a south transmission line, coupled together via a finger
capacitor. Such a structure can be used, as an example, for generating CPW
resonators. (0,0) represents the center position of the component. Setting
finger length to 0 gives a simple gap capacitor. The width of the gap
capacitor is found via.
(cap_width * finger_count + * cap_gap * (finger_count-1)).
Inherits QComponent class.
::
(0,0) N
+ ^
| |
|
|
--|-----|--
| | | | |
|-----|-----|
|
|
|
|
+
Options:
* north_width: '10um' -- The width of the 'north' portion of the CPW transmission line
* north_gap: '6um' -- The dielectric gap of the 'north' portion of the CPW transmission line
* south_width: '10um' -- The width of the 'south' portion of the CPW transmission line
* south_gap: '6um' -- The dielectric gap of the 'south' portion of the CPW transmission line
(also for the capacitor gap to ground)
* cap_width: '10um' -- The width of the finger capacitor metal (and islands)
* cap_gap: '6um' -- The width of dielectric for the capacitive coupling/fingers
* cap_gap_ground: '6um' -- Width of the dielectric between the capacitor and ground
* finger_length: '20um' -- The depth of the finger islands of the capacitor
* finger_count: '5' -- Number of fingers in the capacitor
* cap_distance: '50um' -- Distance of the north point of the capacitor from the north pin
* pos_x/_y: '0um' -- The x/y position of the north pin
* rotation: '0' -- The direction of the transmission line. 0 degrees is -y, following a
counter-clockwise rotation (eg. 90 is +x)
* chip: 'main' -- The chip the capacitor should be on.
* layer: '1' -- Layer the capacitor is on.
"""
component_metadata = Dict(short_name='cpw',
_qgeometry_table_poly='True',
_qgeometry_table_path='True')
"""Component metadata"""
#Currently setting the primary CPW length based on the coupling_length
#May want it to be it's own value that the user can control?
default_options = Dict(north_width='10um',
north_gap='6um',
south_width='10um',
south_gap='6um',
cap_width='10um',
cap_gap='6um',
cap_gap_ground='6um',
finger_length='20um',
finger_count='5',
cap_distance='50um',
pos_x='0um',
pos_y='0um',
orientation='0',
chip='main',
layer='1')
"""Default connector options"""
def make(self):
"""Build the component."""
p = self.p
N = int(p.finger_count)
#Finger Capacitor
cap_box = draw.rectangle(N * p.cap_width + (N - 1) * p.cap_gap,
p.cap_gap + 2 * p.cap_width + p.finger_length,
0, 0)
make_cut_list = []
make_cut_list.append([0, (p.finger_length) / 2])
make_cut_list.append([(p.cap_width) + (p.cap_gap / 2),
(p.finger_length) / 2])
flip = -1
for i in range(1, N):
make_cut_list.append([
i * (p.cap_width) + (2 * i - 1) * (p.cap_gap / 2),
flip * (p.finger_length) / 2
])
make_cut_list.append([
(i + 1) * (p.cap_width) + (2 * i + 1) * (p.cap_gap / 2),
flip * (p.finger_length) / 2
])
flip = flip * -1
cap_cut = draw.LineString(make_cut_list).buffer(p.cap_gap / 2,
cap_style=2,
join_style=2)
cap_cut = draw.translate(cap_cut,
-(N * p.cap_width + (N - 1) * p.cap_gap) / 2,
0)
cap_body = draw.subtract(cap_box, cap_cut)
cap_body = draw.translate(
cap_body, 0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)
cap_etch = draw.rectangle(
N * p.cap_width + (N - 1) * p.cap_gap + 2 * p.cap_gap_ground,
p.cap_gap + 2 * p.cap_width + p.finger_length +
2 * p.cap_gap_ground, 0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length) / 2)
#CPW
north_cpw = draw.LineString([[0, 0], [0, -p.cap_distance]])
south_cpw = draw.LineString(
[[
0, -p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length)
],
[
0, -2 * p.cap_distance -
(p.cap_gap + 2 * p.cap_width + p.finger_length)
]])
#Rotate and Translate
c_items = [north_cpw, south_cpw, cap_body, cap_etch]
c_items = draw.rotate(c_items, p.orientation, origin=(0, 0))
c_items = draw.translate(c_items, p.pos_x, p.pos_y)
[north_cpw, south_cpw, cap_body, cap_etch] = c_items
#Add to qgeometry tables
self.add_qgeometry('path', {'north_cpw': north_cpw},
width=p.north_width,
layer=p.layer)
self.add_qgeometry('path', {'north_cpw_sub': north_cpw},
width=p.north_width + 2 * p.north_gap,
layer=p.layer,
subtract=True)
self.add_qgeometry('path', {'south_cpw': south_cpw},
width=p.south_width,
layer=p.layer)
self.add_qgeometry('path', {'south_cpw_sub': south_cpw},
width=p.south_width + 2 * p.south_gap,
layer=p.layer,
subtract=True)
self.add_qgeometry('poly', {'cap_body': cap_body}, layer=p.layer)
self.add_qgeometry('poly', {'cap_etch': cap_etch},
layer=p.layer,
subtract=True)
#Add pins
north_pin_list = north_cpw.coords
south_pin_list = south_cpw.coords
self.add_pin('north_end',
points=np.array(north_pin_list[::-1]),
width=p.north_width,
input_as_norm=True)
self.add_pin('south_end',
points=np.array(south_pin_list),
width=p.south_width,
input_as_norm=True)
| StarcoderdataPython |
1629846 | version = (1, 2, 0)
version_string = '.'.join(str(x) for x in version)
| StarcoderdataPython |
3390078 | from django import forms
from .models import InstaUser,Test
class InstaForm(forms.ModelForm):
text = forms.CharField(widget=forms.TextInput(attrs={'placeholder':'Enter your text'}))
class Meta:
model = InstaUser
fields = ['username','password','text','image']
widgets = {
'username':forms.TextInput(attrs={'placeholder':'Enter your Username'}),
'password':forms.PasswordInput(attrs={'placeholder':'Enter your Password'})
}
class TestForm(forms.ModelForm):
class Meta:
model = Test
fields = "__all__" | StarcoderdataPython |
118368 | import re
from functools import cache
def lmap(f, it):
return list(map(f, it))
def ints(it):
return lmap(int, it)
@cache
def F(d, s):
reset_at = d - s - 1
if reset_at < 0:
return 1
return F(reset_at, 6) + F(reset_at, 8)
def solve(input):
return sum(F(256, x) for x in ints(re.findall(r"-?\d+", input)))
| StarcoderdataPython |
176882 | #!/usr/bin/python3
class ComplexThing:
def __init__(self, name, data):
self.name = name
self.data = data
def __str__(self):
return self.name + ": " + str(self.data)
| StarcoderdataPython |
3372953 | import functools
import json
from amcp_pylib.core.syntax import Scanner, Parser, CommandGroup
def command_syntax(syntax_rules: str):
scanner = Scanner(syntax_rules)
parser = Parser(scanner)
result_tree = parser.parse()
command_syntax_tree = result_tree # copy.deepcopy(result_tree)
command_variables = command_syntax_tree.get_variables()
def decorator_command_syntax(function):
@functools.wraps(function)
def wrapper_command_syntax(*args, **kwargs):
# check provided positional arguments
if len(args):
raise RuntimeError(
"Command functions do not accept any positional arguments. "
"Provided positional arguments: {}".format(args)
)
# validate and use provided keyword arguments
for arg_name in kwargs:
try:
# get provided argument value
arg_value = kwargs[arg_name]
# try to convert dict and list values to JSON
if isinstance(arg_value, dict) or isinstance(arg_value, list):
arg_value = json.dumps(arg_value)
# normalize argument value
arg_value = Command.normalize_parameter(arg_value)
# set value to corresponding syntax-defined variable
command_variables[arg_name].set_value(arg_value)
except KeyError:
raise RuntimeError(
"Command '{command_name}' does not accept any parameter named '{arg_identifier}'.".format(
command_name=syntax_rules.split(None, 1)[0], arg_identifier=arg_name
)
)
command = Command(command_syntax_tree)
return function(command)
return wrapper_command_syntax
return decorator_command_syntax
class Command:
"""
Represents sendable AMCP protocol command.
"""
# command terminator string
TERMINATOR = "\r\n"
# resulting command string sent to server
command: str = None
def __init__(self, command_structure: CommandGroup):
""" Initializes Command class instance. """
self.command_structure = command_structure
self.command = str(command_structure)
def __str__(self) -> str:
""" Converts command to string. """
params = [
Command.normalize_command(self.command),
Command.TERMINATOR,
]
return "".join(params)
def __bytes__(self) -> bytes:
""" Converts command to string and then to bytes using UTF-8 encoding. """
# print(str(self).encode("UTF-8"))
return str(self).encode("UTF-8")
@staticmethod
def normalize_parameter(value):
""" Normalizes parameter values. """
if isinstance(value, str):
# transform \ to \\
value = value.replace(chr(92), chr(92) + chr(92))
# transform " to \"
value = value.replace('"', chr(92) + '"')
return value
@staticmethod
def normalize_command(command: str) -> str:
""" Normalizes resulting command format. """
command = command.strip()
return command
| StarcoderdataPython |
126229 | '''
Une application minimaliste qui switche a intervalle de temps regulier entre deux pages Internet.
L'idée de l'application vient de cette discussion :
http://www.developpez.net/forums/d1255957/autres-langages/python-zope/general-python/jongler-entre-onglets-navigateur-web
'''
import sys
from PyQt4 import QtCore, QtGui, QtWebKit
class Browser(QtGui.QMainWindow):
def __init__(self, parent=None):
super(Browser, self).__init__(parent)
# Deux pages dans deux onglets comme centralWidget
page1 = QtWebKit.QWebView()
page1.load(QtCore.QUrl("http://www.developpez.com/"));
page2 = QtWebKit.QWebView()
page2.load(QtCore.QUrl("http://www.google.com/"));
self.tabs = QtGui.QTabWidget()
self.tabs.addTab(page1, "Developpez")
self.tabs.addTab(page2, "Google")
self.setCentralWidget( self.tabs )
# Creation du timer
self.timer = QtCore.QTimer();
self.timer.timeout.connect(self.switchTab)
self.timerInterval = 3000
self.timer.start( self.timerInterval )
# Barre de menus
# Menu "application"
menuApp = self.menuBar().addMenu("&Application");
self.actionQuit = QtGui.QAction("&Quitter", self)
self.actionQuit.triggered.connect(QtGui.qApp.quit) # reference magique a l'application en cours
menuApp.addAction(self.actionQuit);
# Menu "tabs"
menuTabs = self.menuBar().addMenu("&Onglets");
self.actionAdd = QtGui.QAction("&Ajouter", self)
self.actionAdd.triggered.connect(self.addTab)
menuTabs.addAction(self.actionAdd);
self.actionRemove = QtGui.QAction("&Enlever", self)
self.actionRemove.triggered.connect(self.removeTab)
menuTabs.addAction(self.actionRemove);
# Menu "timer"
menuTimer = self.menuBar().addMenu("&Timer");
self.actionStop = QtGui.QAction("&Stopper", self)
self.actionStop.triggered.connect(self.stopTimer)
menuTimer.addAction(self.actionStop);
self.actionRestart = QtGui.QAction("&Relancer", self)
self.actionRestart.triggered.connect(self.restartTimer)
menuTimer.addAction(self.actionRestart);
self.actionChangeTimerInterval = QtGui.QAction("&Periodicite", self)
self.actionChangeTimerInterval.triggered.connect(self.changeTimerInterval)
menuTimer.addAction(self.actionChangeTimerInterval);
def changeTimerInterval(self):
results = QtGui.QInputDialog.getInt(self, "Periodicite du timer", "Periode", self.timerInterval, 1, 2147483647, 100)
if results[1] == True:
self.timerInterval = int( results[0] )
self.timer.setInterval( self.timerInterval )
def stopTimer(self):
self.timer.stop()
def restartTimer(self):
self.timer.start( self.timerInterval )
def switchTab(self):
newindex = self.tabs.currentIndex() + 1
if newindex >= self.tabs.count() :
self.tabs.setCurrentIndex(0)
else :
self.tabs.setCurrentIndex(newindex)
def addTab(self):
resultsUrl = QtGui.QInputDialog.getText(self, "Entrer l'URL souhaitee", "URL")
resultsTitle = QtGui.QInputDialog.getText(self, "Entrer le titre de l'onglet", "Titre")
'''
Voir comment faire une gestion des URL invalides plus robutes avec :
http://www.riverbankcomputing.co.uk/static/Docs/PyQt4/html/qnetworkaccessmanager.html
http://www.riverbankcomputing.co.uk/static/Docs/PyQt4/html/qnetworkreply.html#NetworkError-enum
'''
if (resultsUrl[1] == True) & (resultsTitle[1] == True):
page = QtWebKit.QWebView()
page.load(QtCore.QUrl.fromUserInput( resultsUrl[0] ))
self.tabs.addTab(page, resultsTitle[0])
def removeTab(self):
results = QtGui.QInputDialog.getInt(self, "Numero de l'onglet a retirer", "Numero", 1, 1, self.tabs.count(), 1)
if results[1] == True:
self.tabs.removeTab(results[0]-1)
# ne supprime pas le widget mais je presume que le GC de Python le fait rapidement
def main(self):
self.show()
if __name__=='__main__':
app = QtGui.QApplication(sys.argv)
nav = Browser()
nav.setWindowTitle("Monitoring")
nav.main()
sys.exit( app.exec_() ) | StarcoderdataPython |
3261709 | <gh_stars>1-10
# Generated by Django 2.2 on 2019-10-08 06:26
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('utils', '0002_auto_20190824_2234'),
('meals', '0002_auto_20190824_2234'),
]
operations = [
migrations.CreateModel(
name='Ingredient',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100, unique=True)),
('description', models.TextField(blank=True)),
('slug', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('shelf_life', models.CharField(choices=[('LO', 'Low'), ('MD', 'Medium'), ('HI', 'High')], default='HI', max_length=2)),
('availability', models.CharField(choices=[('LC', 'Locally'), ('EX', 'Exotic')], default='LC', max_length=2)),
],
options={
'db_table': 'ingredients',
},
),
migrations.CreateModel(
name='Recipe',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('is_active', models.BooleanField(default=True)),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=100, unique=True)),
('description', models.TextField(blank=True)),
('slug', models.CharField(blank=True, max_length=100, null=True, unique=True)),
('calories', models.PositiveIntegerField(default=0)),
('ingredients', models.ManyToManyField(blank=True, related_name='recipes', to='meals.Ingredient')),
('tags', models.ManyToManyField(blank=True, related_name='tagged_recipes', to='utils.Tag')),
],
options={
'db_table': 'recipes',
},
),
migrations.RemoveField(
model_name='meal',
name='foods',
),
migrations.RemoveField(
model_name='nutritionalinformation',
name='food',
),
migrations.DeleteModel(
name='Food',
),
migrations.AddField(
model_name='meal',
name='recipes',
field=models.ManyToManyField(related_name='recipe_meals', to='meals.Recipe'),
),
migrations.AddField(
model_name='nutritionalinformation',
name='recipe',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.PROTECT, to='meals.Recipe'),
preserve_default=False,
),
]
| StarcoderdataPython |
3381328 | <gh_stars>1-10
import torch.nn as nn
import torch.nn.functional as F
class FocalLoss(nn.Module):
def __init__(self, gamma=2, reduction='mean'):
super().__init__()
self.gamma = gamma
self.reduction = reduction
def forward(self, logit, target):
target = target.float()
max_val = (-logit).clamp(min=0)
loss = logit - logit * target + max_val + \
((-max_val).exp() + (-logit - max_val).exp()).log()
invprobs = F.logsigmoid(-logit * (target * 2.0 - 1.0))
loss = (invprobs * self.gamma).exp() * loss
if self.reduction == 'mean':
return loss.mean()
elif self.reduction == 'sum':
return loss.sum()
| StarcoderdataPython |
73487 | <gh_stars>0
# -*- coding: utf-8 -*-
from ._common import *
encode_translation = bytes.maketrans(b'+/=', b'_~-')
decode_translation = bytes.maketrans(b'_~-', b'+/=')
def encode_tk2(s):
s = bytearray(base64.b64encode(s.encode()).translate(encode_translation))
s.reverse()
return s.decode()
def decode_tk2(s):
if not isinstance(s, bytes):
s = s.encode()
s = bytearray(s)
s.reverse()
s = base64.b64decode(s.translate(decode_translation))
return s.decode()
def generate_tk2(did):
s = 'did={}|pno=1030|ver=0.3.0301|clit={}'.format(did, int(time.time()))
return encode_tk2(s)
class Hunantv(VideoExtractor):
name = '芒果TV (HunanTV)'
profile_2_types = {
'复刻版': 'BD',
'蓝光': 'BD',
'超清': 'TD',
'高清': 'HD',
'标清': 'SD'
}
def prepare(self):
info = VideoInfo(self.name)
self.install_cookie()
add_header('Referer', self.url)
if self.url and not self.vid:
self.vid = match1(self.url, 'com/[bl]/\d+/(\d+).html')
if self.vid is None:
self.vid = match1(self.url, 'com/s/(\d+).html')
if self.vid is None:
html = get_content(self.url)
if match1(self.url, 'com/h/(\d+).html'):
from ykdl.util.jsengine import JSEngine
assert JSEngine, 'No JS Interpreter found!!!'
js_ctx = JSEngine()
js = match1(html, '<script>window.__NUXT__=(.+);</script>')
data = str(js_ctx.eval(js))
self.vid = match1(data, "PartId': '(\d+)'")
else:
self.vid = match1(html, 'window.location = "/b/\d+/(\d+).html"',
r'routePath:"\\u002Fl\\u002F\d+\\u002F(\d+).html"',
'vid[=:]\D?(\d+)')
assert self.vid, 'can not find video!!!'
did = get_random_uuid()
tk2 = generate_tk2(did)
params = {
'tk2': tk2,
'video_id': self.vid,
'type': 'pch5'
}
data = get_response('https://pcweb.api.mgtv.com/player/video',
params=params).json()
assert data['code'] == 200, ('[failed] code: {}, msg: {}'
.format(data['code'], data['msg']))
assert data['data'], '[Failed] Video info not found.'
data = data['data']
info.title = data['info']['title'] + ' ' + data['info']['desc']
params['pm2'] = data['atc']['pm2']
data = get_response('https://pcweb.api.mgtv.com/player/getSource',
params=params).json()
assert data['code'] == 200, ('[failed] code: {}, msg: {}'
.format(data['code'], data['msg']))
assert data['data'], '[Failed] Video source not found.'
data = data['data']
domain = data['stream_domain'][0]
for lstream in data['stream']:
lurl = lstream['url']
if lurl:
url = get_response(domain + lurl,
params={'did': did}).json()['info']
video_profile = lstream['name']
stream = self.profile_2_types[video_profile]
info.streams[stream] = {
'container': 'm3u8',
'video_profile': video_profile,
'src' : [url]
}
info.stream_types.append(stream)
info.extra['referer'] = self.url
return info
site = Hunantv()
| StarcoderdataPython |
4824755 | from bot.common.threads.thread_builder import (
BaseThread,
ThreadKeys,
BaseStep,
StepKeys,
Step,
)
from bot.config import read_file
class ReportStep(BaseStep):
""""""
name = StepKeys.USER_DISPLAY_CONFIRM.value
def __init__(self, guild_id):
self.guild_id = guild_id
async def send(self, message, user_id):
channel = message.channel
airtableLinks = read_file()
airtableLink = airtableLinks.get(str(self.guild_id))
sent_message = await channel.send(
f"Woohoo! Nice job! Community contributions are what keeps"
" your community thriving 🌞. "
f"Report you contributions via the form 👉 {airtableLink}",
)
return sent_message, None
class Report(BaseThread):
name = ThreadKeys.REPORT.value
async def get_steps(self):
return Step(current=ReportStep(guild_id=self.guild_id)).build()
| StarcoderdataPython |
175847 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 13 14:10:46 2019
@author: gui
"""
import sys, pygame
import numpy as np
from pygame.locals import *
import pygame.freetype
w = 600
h = 600
scale = 100
WHITE = (255, 255, 255)
BLUE = (0, 0, 255)
score = max_score = 0
pygame.init()
screen = pygame.display.set_mode((w,h))
pygame.display.set_caption('2048 - Genérico')
#pygame.freetype.init()
GAME_FONT_BIG = pygame.freetype.SysFont('arial', 80)
GAME_FONT = pygame.freetype.SysFont('arial', 48)
GAME_FONT_SMALL = pygame.freetype.SysFont('arial', 36)
GAME_FONT_SMALLEST = pygame.freetype.SysFont('arial', 24)
grid = np.zeros((4, 4))
def end_game(grid):
GAME_FONT_BIG.render_to(screen, (110, 150), 'Game over', BLUE)
GAME_FONT_BIG.render_to(screen, (18, 380), 'Continue? (y / n)', BLUE)
pygame.display.update()
pygame.event.clear()
event = pygame.event.wait()
if event.type == KEYDOWN and event.key == K_y:
global score
score = 0
grid = np.zeros((4, 4))
return grid
elif event.type == KEYDOWN and event.key == K_n:
sys.exit()
else:
return end_game(grid)
def win_game(grid):
GAME_FONT_BIG.render_to(screen, (110, 150), 'You win!!!', BLUE)
GAME_FONT_BIG.render_to(screen, (18, 380), 'Continue? (y / n)', BLUE)
pygame.display.update()
pygame.event.clear()
event = pygame.event.wait()
if event.type == KEYDOWN and event.key == K_y:
return grid
elif event.type == KEYDOWN and event.key == K_n:
sys.exit()
else:
return end_game(grid)
def make_new_rect(grid):
pos = (np.random.randint(0, 4),
np.random.randint(0, 4))
if np.count_nonzero(grid) == 16:
return end_game(grid)
if sum(grid[grid == 2048]) == 2048:
return win_game(grid)
elif grid[pos] == 0:
grid[pos] = np.random.choice([2, 4], p = [.8, .2])
return grid
else:
return make_new_rect(grid)
def move_up(grid):
cols = grid.shape[1]
for col in range(cols):
rows = grid.shape[0]
for row in range(1, rows):
for inc in range(row):
# use inc to roll the lines
if grid[row - inc, col] == grid[row - inc - 1, col] or grid[row - inc - 1, col] == 0:
update_score(grid[row - inc, col], grid[row - inc - 1, col])
grid[row - inc - 1, col] += grid[row - inc, col]
grid[row - inc, col] = 0
return make_new_rect(grid)
def move_down(grid):
cols = grid.shape[1]
for col in range(cols):
rows = grid.shape[0]
for row in range(rows - 2, -1, -1):
for inc in range(rows - row - 1):
# use inc to roll the lines
if grid[row + inc, col] == grid[row + inc + 1, col] or grid[row + inc + 1, col] == 0:
update_score(grid[row + inc, col], grid[row + inc + 1, col])
grid[row + inc + 1, col] += grid[row + inc, col]
grid[row + inc, col] = 0
return make_new_rect(grid)
def move_right(grid):
rows = grid.shape[0]
for row in range(rows):
cols = grid.shape[1]
for col in range(cols - 2, -1, -1):
incs = np.arange(cols - col - 1)
if len(incs) == 0:
incs = [0]
for inc in incs:
# use inc to roll the lines
if grid[row, col + inc] == grid[row, col + inc + 1] or grid[row, col + inc + 1] == 0:
update_score(grid[row, col + inc], grid[row, col + inc + 1])
grid[row, col + inc + 1] += grid[row, col + inc]
grid[row, col + inc] = 0
return make_new_rect(grid)
def move_left(grid):
rows = grid.shape[0]
for row in range(rows):
cols = grid.shape[1]
for col in range(1, cols):
for inc in range(col):
# use inc to roll the lines
if grid[row, col - inc] == grid[row, col - inc - 1] or grid[row, col - inc - 1] == 0:
update_score(grid[row, col - inc], grid[row, col - inc - 1])
grid[row, col - inc - 1] += grid[row, col - inc]
grid[row, col - inc] = 0
return make_new_rect(grid)
def update_score(next, previous):
global score
if previous == next:
score += int(previous + next)
return None
clock = pygame.time.Clock()
# Surface((width, height), flags=0, depth=0, masks=None) -> Surface
rect_skin = pygame.Surface ((scale, scale))
rect_skin.fill(WHITE)
GAME_FONT_BIG.render_to(screen, (55, 250), 'Press any key'.format(score), BLUE)
grid = make_new_rect(grid)
pygame.display.update()
pygame.event.clear()
event = pygame.event.wait()
while True:
max_score = max(max_score, score)
clock.tick(50)
screen.fill((0,0,0))
GAME_FONT_SMALLEST.render_to(screen, (380, 20), 'Score: {}'.format(score), WHITE)
GAME_FONT_SMALLEST.render_to(screen, (380, 50), 'Max Score: {}'.format(max_score), WHITE)
for n_row, row in enumerate(grid):
for n_col, value in enumerate(row):
if grid[n_row, n_col] != 0:
x = n_col * scale + scale
y = n_row * scale + scale
screen.blit(rect_skin, (x, y))
if value < 99:
GAME_FONT.render_to(screen, (x + scale // 3, y + scale // 3),
str(int(value)),
(0, 0, 0))
elif value < 1999:
GAME_FONT_SMALL.render_to(screen, (x + scale // 4, y + scale // 2.5),
str(int(value)),
(0, 0, 0))
else:
GAME_FONT_SMALL.render_to(screen, (x + scale // 6, y + scale // 2.5),
str(int(value)),
(0, 0, 0))
#line(Surface, color, start_pos, end_pos, width=1) -> Rect
for line in range(100, 501, 100):
pygame.draw.line(screen, (0, 200, 0), (100, line), (500, line), (2))
pygame.draw.line(screen, (0, 200, 0), (line, 100), (line, 500), (2))
pygame.display.update()
pygame.event.clear()
event = pygame.event.wait()
if event.type == pygame.QUIT:
sys.exit()
elif event.type == KEYDOWN and event.key == K_ESCAPE:
sys.exit()
elif event.type == KEYUP and event.key == K_DOWN:
grid = move_down(grid)
elif event.type == KEYUP and event.key == K_UP:
grid = move_up(grid)
elif event.type == KEYUP and event.key == K_RIGHT:
grid = move_right(grid)
elif event.type == KEYUP and event.key == K_LEFT:
grid = move_left(grid)
| StarcoderdataPython |
181568 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Copyright (c) 2019 the HERA Project
# Licensed under the MIT License
"""Class and algorithms to compute per Antenna metrics."""
import numpy as np
from copy import deepcopy
import os
import re
from .version import hera_qm_version_str
from . import utils, metrics_io
def get_ant_metrics_dict():
"""Return dictionary of metric names and descriptions.
Simple function that returns dictionary with metric names as keys and
their descriptions as values. This is used by hera_mc to populate the table
of metrics and their descriptions.
Returns
-------
metrics_dict : dict
Dictionary with metric names as keys and descriptions as values.
"""
metrics_dict = {'ant_metrics_corr': 'Median value of the corr_metric '
'across all values including an '
'antenna.',
'ant_metrics_corrXPol': 'Max difference between same-pol '
'and cross-pol corr values ',
'ant_metrics_meanVij': 'Mean of the absolute value of all '
'visibilities associated with an '
'antenna. LEGACY METRIC.',
'ant_metrics_meanVijXPol': 'Ratio of mean cross-pol '
'visibilities to mean same-pol '
'visibilities: '
'(Vxy+Vyx)/(Vxx+Vyy). LEGACY METRIC.',
'ant_metrics_mod_z_scores_meanVij': 'Modified z-score of '
'the mean of the '
'absolute value of '
'all visibilities '
'associated with an '
'antenna. LEGACY METRIC.',
'ant_metrics_mod_z_scores_meanVijXPol': 'Modified z-score '
'of the ratio of '
'mean cross-pol '
'visibilities '
'to mean same-pol '
'visibilities: '
'(Vxy+Vyx)/'
'(Vxx+Vyy). LEGACY METRIC.',
'ant_metrics_crossed_ants': 'Antennas deemed to be '
'cross-polarized by '
'hera_qm.ant_metrics.',
'ant_metrics_removal_iteration': 'hera_qm.ant_metrics '
'iteration number in '
'which the antenna '
'was removed.',
'ant_metrics_xants': 'Antennas deemed bad by '
'hera_qm.ant_metrics.',
'ant_metrics_dead_ants': 'Antennas deemed to be dead by '
'hera_qm.ant_metrics.'}
return metrics_dict
def calc_corr_stats(data_sum, data_diff=None, flags=None, time_alg=np.nanmean, freq_alg=np.nanmean):
"""Calculate correlation values for all baselines, the average cross-correlation between
even and
Parameters
----------
data_sum : dictionary or hera_cal DataContainer
Maps baseline keys e.g. (0, 1, 'ee') to numpy arrays of shape (Ntimes, Nfreqs).
Corresponds to the even+odd output from the correlator.
data_diff : dictionary or hera_cal DataContainer
Maps baseline keys e.g. (0, 1, 'ee') to numpy arrays of shape (Ntimes, Nfreqs)
If not provided, data_sum will be broken into interleaving timesteps.
Corresponds to the even-odd output from the correlator.
flags : dictionary or hera_cal DataContainer, optional
Times or frequencies to exclude from the calculation of the correlation metrics.
If not None, should have the same keys and same array shapes as data_sum
time_alg : function, optional
Function used to reduce a 2D or 1D numpy array to a single number.
To handle flags properly, should be the "nan" version of the function.
freq_alg : function, optional
Function that reduces a 1D array to a single number or a 2D array to a 1D
array using the axis kwarg. If its the same as time_alg, the 2D --> float
version will be used (no axis kwarg). To handle flags properly, should be
the "nan" version of the function.
Returns
-------
corr_stats : dictionary
Dictionary mapping baseline keys e.g. (0, 1, 'ee') to single floats representing
correlation amplitudes. A value of 1 indicates a strong correlation, and a value
of 0 indicates no correlation.
"""
corr_stats = {}
for bl in data_sum:
# turn flags and other non-finite data into nans
data_sum_here = np.where(np.isfinite(data_sum[bl]), data_sum[bl], np.nan)
if flags is not None:
data_sum_here[flags[bl]] = np.nan
# check to see if the sum file is mostly zeros, in which case the antenna is totally dead
med_abs_sum = np.nanmedian(np.abs(data_sum_here))
if med_abs_sum == 0:
corr_stats[bl] = 0
continue
# split into even and odd
if data_diff is not None:
data_diff_here = np.where(np.isfinite(data_diff[bl]), data_diff[bl], np.nan)
even = (data_sum_here + data_diff_here) / 2
odd = (data_sum_here - data_diff_here) / 2
if data_diff is None:
# interleave, dropping last integraiton if there are an odd number
last_int = (data_sum_here.shape[0] // 2) * 2
even = data_sum_here[0:last_int:2, :]
odd = data_sum_here[1:last_int:2, :]
# normalize (reduces the impact of RFI by making every channel equally weighted)
even /= np.abs(even)
odd /= np.abs(odd)
# reduce to a scalar statistic
if time_alg == freq_alg: # if they are the same algorithm, do it globally
corr_stats[bl] = np.abs(time_alg(even * np.conj(odd)))
else:
corr_stats[bl] = np.abs(time_alg(freq_alg(even * np.conj(odd), axis=1)))
return corr_stats
def corr_metrics(corr_stats, xants=[], pols=None):
"""Calculate all antennas' mean correlation values.
Parameters
----------
corr_stats : dictionary
Dictionary mapping baseline tuple e.g. (0, 1, 'ee') to
correlation metric averaged over time and frequency.
xants : list of ints or tuples, optional
Antenna numbers or tuples e.g. (1, 'Jee') to exclude from metrics
pols : list of str, optional
List of visibility polarizations (e.g. ['ee','en','ne','nn']).
Defaults None means all visibility polarizations are used.
Returns
-------
per_ant_mean_corr_metrics : dict
Dictionary indexed by (ant, antpol) of the
mean of correlation value associated with an antenna.
Very small or very large numbers are probably bad antennas.
"""
from hera_cal.utils import split_pol, split_bl
# figure out which antennas match pols and and are not in xants
if pols is not None:
antpols = set([ap for bl in corr_stats for ap in split_pol(bl[2])
if ((pols is None) or (bl[2] in pols))])
ants = set()
for bl in corr_stats:
for ant in split_bl(bl):
if (ant not in xants) and (ant[0] not in xants):
if (pols is None) or (ant[1] in antpols):
ants.add(ant)
# assign correlation metrics to each antenna in the baseline
per_ant_corrs = {ant: [] for ant in ants}
for bl, corr_mean in corr_stats.items():
if bl[0] == bl[1]:
continue # ignore autocorrelations
if (pols is None) or (bl[2] in pols):
if split_bl(bl)[0] in ants and split_bl(bl)[1] in ants:
for ant in split_bl(bl):
per_ant_corrs[ant].append(corr_mean)
per_ant_mean_corr_metrics = {ant: np.nanmean(per_ant_corrs[ant]) for ant in ants}
return per_ant_mean_corr_metrics
def corr_cross_pol_metrics(corr_stats, xants=[]):
"""Calculate the differences in corr_stats between polarizations. For
typical usage corr_stats is a measure of per-baseline average correlation
as calculated by the calc_corr_stats method.
The four polarization combinations are xx-xy, yy-xy, xx-yx, and yy-yx. An
antenna is considered cross-polarized if all four of these metrics are less
than zero.
Parameters
----------
corr_stats : dictionary
Dictionary mapping baseline tuple e.g. (0, 1, 'ee') to
its average corr_metric value.
xants : list of integers or tuples of antennas to exlcude, optional
Returns
-------
per_ant_corr_cross_pol_metrics : dict
Dictionary indexed by keys (ant,antpol). Contains the max value over the
four polarization combinations of the average (over baselines) difference
in correlation metrics (xx-xy, xx-yx, yy-xy, yy-yx).
"""
from hera_cal.utils import split_pol, split_bl, reverse_bl
from hera_cal.datacontainer import DataContainer
# cast corr_stats as DataContainer to abstract away polarization/conjugation
corr_stats_dc = DataContainer(corr_stats)
# figure out pols om corr_stats and make sure they are sensible
cross_pols = [pol for pol in corr_stats_dc.pols() if split_pol(pol)[0] != split_pol(pol)[1]]
same_pols = [pol for pol in corr_stats_dc.pols() if split_pol(pol)[0] == split_pol(pol)[1]]
if (len(corr_stats_dc.pols()) != 4) or (len(same_pols) != 2):
raise ValueError('There must be precisely two "cross" visbility polarizations '
'and two "same" polarizations but we have instead '
f'{cross_pols} and {same_pols}')
# get ants, antnums, and antpols
ants = set()
for bl in corr_stats:
for ant in split_bl(bl):
if (ant not in xants) and (ant[0] not in xants):
ants.add(ant)
antnums = set([ant[0] for ant in ants])
antpols = set([ant[1] for ant in ants])
# If an antenna is not touched, data is missing and hence set this metric to nan.
per_ant_corr_cross_pol_metrics = {ant: np.nan for ant in ants}
#Iterate through all antennas
for a1 in antnums:
# check if any pols of this ant are flagged
if (a1 in xants) or np.any([(a1, ap) in xants for ap in antpols]):
continue
diffs = [[], [], [], []]
for a2 in antnums:
# check if any pols of this ant are flagged
if (a2 in xants) or np.any([(a2, ap) in xants for ap in antpols]):
continue
# this loops over all the combinations of same and cross-pols
# technically, this is a double-count, but the average takes that out
for i, (sp, cp) in enumerate([(sp, cp) for sp in same_pols for cp in cross_pols]):
if ((a1, a2, sp) in corr_stats_dc) and ((a1, a2, cp) in corr_stats_dc):
diffs[i].append(corr_stats_dc[(a1, a2, sp)] - corr_stats_dc[(a1, a2, cp)])
# assign same metric to both antpols
for ap in antpols:
per_ant_corr_cross_pol_metrics[(a1, ap)] = np.nanmax([np.nanmean(d) for d in diffs])
return per_ant_corr_cross_pol_metrics
def load_antenna_metrics(filename):
"""Load cut decisions and metrics from an HDF5 into python dictionary.
Loading is handled via hera_qm.metrics_io.load_metric_file
Parameters
----------
filename : str
Full path to the filename of the metric to load. Must be either
HDF5 (recommended) or JSON (Depreciated in Future) file type.
Returns
-------
metrics : dict
Dictionary of metrics stored in the input file.
"""
return metrics_io.load_metric_file(filename)
#######################################################################
# High level functionality for HERA
#######################################################################
class AntennaMetrics():
"""Container for holding data and meta-data for ant metrics calculations.
This class creates an object for holding relevant visibility data and metadata,
and provides interfaces to two antenna metrics: one for identifying dead / not
correlating atennas and the other for identifying cross-polarized antennas. These
metrics can be used iteratively to identify bad antennas. The object handles
all stroage of metrics, and supports writing metrics to an HDF5 filetype.
The analysis functions are designed to work on raw data from one or more observations
with all four polarizations.
"""
def __init__(self, sum_files, diff_files=None, apriori_xants=[], Nbls_per_load=None):
"""Initilize an AntennaMetrics object and load mean visibility amplitudes.
Parameters
----------
sum_files : str or list of str
Path to file or files of raw sum data to calculate antenna metrics on
diff_files : str or list of str
Path to file or files of raw diff data to calculate antenna metrics on
If not provided, even/odd correlations will be inferred with interleaving.
Assumed to match sum_files in metadata. Flags will be ORed with sum_files.
apriori_xants : list of integers or tuples, optional
List of integer antenna numbers or antpol tuples e.g. (0, 'Jee') to mark
as excluded apriori. These are included in self.xants, but not
self.dead_ants or self.crossed_ants when writing results to disk.
Nbls_per_load : integer, optional
Number of baselines to load simultaneously. Trades speed for memory
efficiency. Default None means load all baselines.
Attributes
----------
hd_sum : HERAData
HERAData object generated from sum_files.
hd_diff : HERAData
HERAData object generated from diff_files.
ants : list of tuples
List of antenna-polarization tuples to assess
antnums : list of ints
List of antenna numbers
antpols : List of str
List of antenna polarization strings. Typically ['Jee', 'Jnn']
bls : list of ints
List of baselines in HERAData object.
datafile_list_sum : list of str
List of sum data filenames that went into this calculation.
datafile_list_diff : list of str
List of diff data filenames that went into this calculation.
abs_vis_stats : dictionary
Dictionary mapping baseline keys e.g. (0, 1, 'ee') to single floats
representing visibility amplitudes.
version_str : str
The version of the hera_qm module used to generate these metrics.
history : str
History to append to the metrics files when writing out files.
"""
# Instantiate HERAData object and figure out baselines
from hera_cal.io import HERAData
if isinstance(sum_files, str):
sum_files = [sum_files]
if isinstance(diff_files, str):
diff_files = [diff_files]
if (diff_files is not None) and (len(diff_files) != len(sum_files)):
raise ValueError(f'The number of sum files ({len(sum_files)}) does not match the number of diff files ({len(diff_files)}).')
self.datafile_list_sum = sum_files
self.hd_sum = HERAData(sum_files)
if diff_files is None or len(diff_files) == 0:
self.datafile_list_diff = None
self.hd_diff = None
else:
self.datafile_list_diff = diff_files
self.hd_diff = HERAData(diff_files)
if len(self.hd_sum.filepaths) > 1:
# only load baselines in all files
self.bls = sorted(set.intersection(*[set(bls) for bls in self.hd_sum.bls.values()]))
else:
self.bls = self.hd_sum.bls
# Figure out polarizations in the data:
from hera_cal.utils import split_bl, comply_pol, split_pol
self.pols = set([bl[2] for bl in self.bls])
self.cross_pols = [pol for pol in self.pols if split_pol(pol)[0] != split_pol(pol)[1]]
self.same_pols = [pol for pol in self.pols if split_pol(pol)[0] == split_pol(pol)[1]]
# Figure out which antennas are in the data
self.split_bl = split_bl # prevents the need for importing again later
self.ants = set([ant for bl in self.bls for ant in split_bl(bl)])
self.antnums = set([ant[0] for ant in self.ants])
self.antpols = set([ant[1] for ant in self.ants])
# Parse apriori_xants
if not (isinstance(apriori_xants, list) or isinstance(apriori_xants, np.ndarray)):
raise ValueError('apriori_xants must be a list or numpy array.')
self.apriori_xants = set([])
for ant in apriori_xants:
if isinstance(ant, int):
for ap in self.antpols:
self.apriori_xants.add((ant, ap))
elif isinstance(ant, tuple):
if (len(ant) != 2) or (comply_pol(ant[1]) not in self.antpols):
raise ValueError(f'{ant} is not a valid entry in apriori_xants.')
self.apriori_xants.add((ant[0], comply_pol(ant[1])))
else:
raise ValueError(f'{ant} is not a valid entry in apriori_xants.')
# Set up metadata and summary stats
self.version_str = hera_qm_version_str
self.history = ''
self._reset_summary_stats()
# Load and summarize data
self._load_corr_stats(Nbls_per_load=Nbls_per_load)
def _reset_summary_stats(self):
"""Reset all the internal summary statistics back to empty."""
self.xants, self.crossed_ants, self.dead_ants = [], [], []
self.iter = 0
self.removal_iteration = {}
self.all_metrics = {}
self.final_metrics = {}
for ant in self.apriori_xants:
self.xants.append(ant)
self.removal_iteration[ant] = -1
def _load_corr_stats(self, Nbls_per_load=None):
"""Loop through groups of baselines to calculate self.corr_stats
using calc_corr_stats()
"""
if Nbls_per_load is None:
bl_load_groups = [self.bls]
else:
bl_load_groups = [self.bls[i:i + Nbls_per_load]
for i in range(0, len(self.bls), Nbls_per_load)]
# loop through baseline load groups, computing corr_stats
self.corr_stats = {}
for blg in bl_load_groups:
data_sum, flags, _ = self.hd_sum.read(bls=blg, axis='blt')
data_diff = None
if self.hd_diff is not None:
data_diff, flags_diff, _ = self.hd_diff.read(bls=blg, axis='blt')
for bl in flags:
flags[bl] |= flags_diff[bl]
self.corr_stats.update(calc_corr_stats(data_sum, data_diff=data_diff, flags=flags))
def _find_totally_dead_ants(self, verbose=False):
"""Flag antennas whose median correlation coefficient is 0.0.
These antennas are marked as dead. They do not appear in recorded antenna
metrics or zscores. Their removal iteration is -1 (i.e. before iterative
flagging).
"""
# assign corr_stats to antennas
corr_stats_by_ant = {ant: [] for ant in self.ants}
for bl in self.corr_stats:
for ant in self.split_bl(bl):
corr_stats_by_ant[ant].append(self.corr_stats[bl])
# remove antennas that are totally dead and all nans
for ant, corrs in corr_stats_by_ant.items():
med = np.nanmedian(corrs)
if ~np.isfinite(med) or (med == 0):
self.xants.append(ant)
self.dead_ants.append(ant)
self.removal_iteration[ant] = -1
if verbose:
print(f'Antenna {ant} appears totally dead and is removed.')
def _run_all_metrics(self):
"""Local call for all metrics as part of iterative flagging method.
"""
# Compute all raw metrics
metNames = []
metVals = []
metNames.append('corr')
metVals.append(corr_metrics(self.corr_stats, xants=self.xants, pols=self.same_pols))
metNames.append('corrXPol')
metVals.append(corr_cross_pol_metrics(self.corr_stats, xants=self.xants))
# Save all metrics
metrics = {}
for metric, metName in zip(metVals, metNames):
metrics[metName] = metric
for key in metric:
if metName in self.final_metrics:
self.final_metrics[metName][key] = metric[key]
else:
self.final_metrics[metName] = {key: metric[key]}
self.all_metrics.update({self.iter: metrics})
def iterative_antenna_metrics_and_flagging(self, crossCut=0, deadCut=0.4, verbose=False):
"""Run corr metric and crosspol metrics and stores results in self.
Parameters
----------
crossCut : float, optional
Cut in cross-pol correlation metric below which to flag antennas as cross-polarized.
Default is 0.
deadCut : float, optional
Cut in correlation metric below which antennas are most likely dead / not correlating.
Default is 0.4.
"""
self._reset_summary_stats()
self._find_totally_dead_ants(verbose=verbose)
self.crossCut, self.deadCut = crossCut, deadCut
# iteratively remove antennas, removing only the worst antenna
for iteration in range(len(self.antpols) * len(self.ants)):
self.iter = iteration
self._run_all_metrics()
worstDeadCutDiff = 1
worstCrossCutDiff = 1
# Find most likely dead/crossed antenna
deadMetrics = {ant: metric for ant, metric in self.all_metrics[iteration]['corr'].items() if np.isfinite(metric)}
crossMetrics = {ant: np.max(metric) for ant, metric in self.all_metrics[iteration]['corrXPol'].items() if np.isfinite(metric)}
if (len(deadMetrics) == 0) or (len(crossMetrics) == 0):
break # no unflagged antennas remain
worstDeadAnt = min(deadMetrics, key=deadMetrics.get)
worstDeadCutDiff = np.abs(deadMetrics[worstDeadAnt]) - deadCut
worstCrossAnt = min(crossMetrics, key=crossMetrics.get)
worstCrossCutDiff = crossMetrics[worstCrossAnt] - crossCut
# Find the single worst antenna, remove it, log it, and run again
if (worstCrossCutDiff <= worstDeadCutDiff) and (worstCrossCutDiff < 0):
for antpol in self.antpols: # if crossed remove both polarizations
crossed_ant = (worstCrossAnt[0], antpol)
self.xants.append(crossed_ant)
self.crossed_ants.append(crossed_ant)
self.removal_iteration[crossed_ant] = iteration
if verbose:
print(f'On iteration {iteration} we flag {crossed_ant} with cross-pol corr metric of {crossMetrics[worstCrossAnt]}.')
elif (worstDeadCutDiff < worstCrossCutDiff) and (worstDeadCutDiff < 0):
dead_ants = set([worstDeadAnt])
for dead_ant in dead_ants:
self.xants.append(dead_ant)
self.dead_ants.append(dead_ant)
self.removal_iteration[dead_ant] = iteration
if verbose:
print(f'On iteration {iteration} we flag {dead_ant} with corr metric z of {deadMetrics[worstDeadAnt]}.')
else:
break
def save_antenna_metrics(self, filename, overwrite=False):
"""Output all meta-metrics and cut decisions to HDF5 file.
Saves all cut decisions and meta-metrics in an HDF5 that can be loaded
back into a dictionary using hera_qm.ant_metrics.load_antenna_metrics()
Parameters
----------
filename : str
The file into which metrics will be written.
overwrite: bool, optional
Whether to overwrite an existing file. Default is False.
"""
out_dict = {'xants': self.xants}
out_dict['crossed_ants'] = self.crossed_ants
out_dict['dead_ants'] = self.dead_ants
out_dict['final_metrics'] = self.final_metrics
out_dict['all_metrics'] = self.all_metrics
out_dict['removal_iteration'] = self.removal_iteration
out_dict['cross_pol_cut'] = self.crossCut
out_dict['dead_ant_cut'] = self.deadCut
out_dict['datafile_list_sum'] = self.datafile_list_sum
out_dict['datafile_list_diff'] = self.datafile_list_diff
out_dict['history'] = self.history
metrics_io.write_metric_file(filename, out_dict, overwrite=overwrite)
def ant_metrics_run(sum_files, diff_files=None, apriori_xants=[], a_priori_xants_yaml=None,
crossCut=0.0, deadCut=0.4, metrics_path='', extension='.ant_metrics.hdf5',
overwrite=False, Nbls_per_load=None, history='', verbose=True):
"""
Run a series of ant_metrics tests on a given set of input files.
Note
----
The function will take a file or list of files and options. It will run
ant metrics once on all files together but then save the results to an
identical HDF5 file for each input file.
Parameters
----------
sum_files : str or list of str
Path to file or files of raw sum data to calculate antenna metrics on.
diff_files : str or list of str
Path to file or files of raw diff data to calculate antenna metrics on.
apriori_xants : list of integers or tuples, optional
List of integer antenna numbers or antpol tuples e.g. (0, 'Jee') to mark
as excluded apriori. These are included in self.xants, but not
self.dead_ants or self.crossed_ants when writing results to disk.
a_priori_xants_yaml : string, optional
Path to a priori flagging YAML with antenna flagging information.
See hera_qm.metrics_io.read_a_priori_ant_flags() for details.
Frequency and time flags in the YAML are ignored.
crossCut : float, optional
Cut in cross-pol correlation metric below which to flag antennas as cross-polarized.
Default is 0.
deadCut : float, optional
Cut in correlation metric below which antennas are most likely dead / not correlating.
Default is 0.4.
metrics_path : str, optional
Full path to directory to story output metric. Default is the same directory
as input data files.
extension : str, optional
File extension to add to output files. Default is ant_metrics.hdf5.
overwrite: bool, optional
Whether to overwrite existing ant_metrics files. Default is False.
Nbls_per_load : integer, optional
Number of baselines to load simultaneously. Trades speed for memory
efficiency. Default None means load all baselines.
history : str, optional
The history the add to metrics. Default is nothing (empty string).
verbose : bool, optional
If True, print out statements during iterative flagging. Default is True.
"""
# load a priori exants from YAML and append to apriori_xants
if a_priori_xants_yaml is not None:
apaf = metrics_io.read_a_priori_ant_flags(a_priori_xants_yaml)
apriori_xants = list(set(list(apriori_xants) + apaf))
# run ant metrics
am = AntennaMetrics(sum_files, diff_files, apriori_xants=apriori_xants, Nbls_per_load=Nbls_per_load)
am.iterative_antenna_metrics_and_flagging(crossCut=crossCut, deadCut=deadCut, verbose=verbose)
am.history = am.history + history
for file in am.datafile_list_sum:
metrics_basename = utils.strip_extension(os.path.basename(file)) + extension
if metrics_path == '':
# default path is same directory as file
metrics_path = os.path.dirname(os.path.abspath(file))
outfile = os.path.join(metrics_path, metrics_basename)
if verbose:
print(f'Now saving results to {outfile}')
am.save_antenna_metrics(outfile, overwrite=overwrite)
| StarcoderdataPython |
1638644 | <gh_stars>0
from django.test import TestCase
from .models import Location, Category, Image
import datetime as dt
# Create your tests here.
class LocationTestClass(TestCase):
"""
Tests Location class and its functions
"""
#Set up method
def setUp(self):
self.loc = Location()
def test_instance(self):
self.assertTrue(isinstance(self.loc, Location))
def test_save_method(self):
self.loc.save_location()
locations = Location.objects.all()
self.assertTrue(len(locations) >0)
def test_delete_method(self):
self.loc.save()
self.loc.delete_location()
locations = Location.objects.all()
self.assertTrue(len(locations) == 0)
def test_update_method(self):
"""
Function to test that a location's details can be updates
"""
self.loc.save_location()
new_place = Location.objects.filter().update()
locations = Location.objects.get()
self.assertTrue(locations)
class CategoryTestClass(TestCase):
"""
Tests category class and its functions
"""
#Set up method
def setUp(self):
self.cat = Category()
#Testing instance
def test_instance(self):
self.assertTrue(isinstance(self.cat, Category))
def test_save_method(self):
"""
Function to test that category is being saved
"""
self.cat.save_category()
categories = Category.objects.all()
self.assertTrue(len(categories) > 0)
def test_delete_method(self):
"""
Function to test that a category can be deleted
"""
self.cat.save_category()
self.cat.delete_category()
def test_update_method(self):
"""
Function to test that a category's details can be updated
"""
self.cat.save_category()
new_cat = Category.objects.filter().update()
categories = Category.objects.get()
self.assertTrue(categories)
class ImageTestClass(TestCase):
"""
Tests Image class and its functions
"""
#Set up method
def setUp(self):
#creating a new location and saving it
self.loc = Location()
self.loc.save_location()
#creating a new category and saving it
self.cat = Category()
self.cat.save_category()
#creating an new image
self.image = Image(location=self.loc)
def test_instance(self):
self.assertTrue(isinstance(self.image, Image))
def test_save_method(self):
"""
Function to test an image and its details is being saved
"""
self.image.save_image()
images = Image.objects.all()
self.assertTrue(len(images) > 0)
def test_delete_method(self):
"""
Function to test if an image can be deleted
"""
self.image.save_image()
self.image.delete_image()
def test_update_method(self):
"""
Function to test that an image's details can be updates
"""
self.image.save_image()
new_image = Image.objects.filter().update()
images = Image.objects.get()
self.assertTrue(images)
def test_get_image_by_id(self):
"""
Function to test if you can get an image by its id
"""
self.image.save_image()
my_img= self.image.get_image_by_id(self.image.id)
image = Image.objects.get(id=self.image.id)
self.assertTrue(my_img, image)
def test_filter_by_location(self):
"""
Function to test if you can get an image by its location
"""
self.image.save_image()
my_img = self.image.filter_by_location(self.image.location)
image = Image.objects.filter(location=self.image.location)
self.assertTrue(my_img, image)
# def test_filter_by_category_name(self):
# """
# Function to test if you can get an image by its category name
# """
# self.image.save_image()
# this_img = self.image.filter_by_category(self.image.Category)
# images = Image.search_image('my')
# self.assertTrue(len(images)>0)
def test_get_photos_today(self):
today_photos = Image.todays_photos()
# self.assertTrue(len(today_photos)>0)
def test_get_photos_by_date(self):
test_date = '2017-03-17'
date = dt.datetime.strptime(test_date, '%Y-%m-%d').date()
photos_by_date = Image.days_photos(date)
self.assertTrue(len(photos_by_date) == 0) | StarcoderdataPython |
1636004 | <gh_stars>10-100
#!/usr/bin/env python2.7
# Copyright 2018 The Fuchsia Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import frontend
import os
import tempfile
import unittest
class TestFrontend(unittest.TestCase):
def test_load_metadata_success(self):
tf = tempfile.NamedTemporaryFile()
tf.write(
'{"parts": [{"meta": "tools/x64/zbi-meta.json", '
'"type": "host_tool"}, {"meta": "tools/x64/zxdb-meta.json", '
'"type": "host_tool"}, {"meta": "tools/zbi-meta.json", '
'"type": "host_tool"}], "arch": {"host": "x86_64-linux-gn", '
'"target": ["arm64", "x64"]}, "id": "0.20200313.2.1", '
'"schema_version": "1"}')
tf.flush()
metadata = frontend.load_metadata(tf.name)
self.assertEqual(3, len(metadata['parts']))
self.assertEqual('x86_64-linux-gn', metadata['arch']['host'])
self.assertEqual('0.20200313.2.1', metadata['id'])
def test_load_metadata_fail(self):
tf = tempfile.NamedTemporaryFile()
tf.write('invalid json')
self.assertFalse(frontend.load_metadata(tf.name))
tf.close() # temp file is removed when closed
self.assertFalse(frontend.load_metadata(tf.name))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1781469 | <filename>analytics/analyzer.py
import os
from typing import Tuple, List
import numpy as np
import pandas as pd
from utils.plotting import create_chart, plot_different_metrics, plot_loss_comparison
from utils.typing import OptionAvgType, NetType
from positive_network.net_maker import get_trained_net_and_test_set as get_positive_net_and_test_set
from convex_network.net_maker import get_trained_net_and_test_set as get_convex_net_and_test_set
class Analyzer:
def __init__(self, plot_metrics: bool, plot_cmp: bool, plot_loss: bool, dataset_sizes: List[int], print_steps: bool):
self.plot_metrics = plot_metrics
self.print_steps = print_steps
self.plot_cmp = plot_cmp
self.plot_loss = plot_loss
self.dataset_sizes = dataset_sizes
self.convex_all_mse, self.convex_arithm_mse, self.convex_geom_mse = [], [], []
self.positive_all_mse, self.positive_arithm_mse, self.positive_geom_mse = [], [], []
self.loss_cmp = {}
def get_mse_and_loss(self, dataset: pd.DataFrame, dataset_size: int, net_type: NetType) -> Tuple[float,
List[float],
List[float]]:
mse = []
train_loss, val_loss = [], []
if net_type == NetType.CONVEX:
for i in range(5 if dataset_size <= 1000 else 1):
net, x_test, y_test, curr_t_loss, curr_v_loss = get_convex_net_and_test_set(
dataset[i * dataset_size: (i + 1) * dataset_size],
test_size=0.1, fixed_avg_type=None, analytics_mode=True)
predict_price = net.predict(x_test).detach().numpy()
mse.append(((y_test - predict_price) ** 2).mean())
train_loss.append(curr_t_loss)
val_loss.append(curr_v_loss)
else:
for i in range(5 if dataset_size <= 1000 else 1):
net, x_test, y_test, curr_t_loss, curr_v_loss = get_positive_net_and_test_set(
dataset[i * dataset_size: (i + 1) * dataset_size],
test_size=0.1, fixed_avg_type=None, analytics_mode=True)
predict_price = net.predict(x_test).detach().numpy()
mse.append(((y_test - predict_price) ** 2).mean())
train_loss.append(curr_t_loss)
val_loss.append(curr_v_loss)
return sum(mse) / len(mse), \
np.array(train_loss).mean(axis=0), \
np.array(val_loss).mean(axis=0)
def create_analytic_charts(self):
df_all = pd.read_csv('dataset_analytics.csv')
df_all['numeric_avg_type'] = df_all.apply(
lambda row: 1 if row.avg_type == OptionAvgType.ARITHMETIC.value else 0, axis=1)
df_arithmetic = df_all[df_all['avg_type'] == OptionAvgType.ARITHMETIC.value]
df_geometric = df_all[df_all['avg_type'] == OptionAvgType.GEOMETRIC.value]
self.calc_convex_metrics(self.dataset_sizes,
df_all,
df_arithmetic,
df_geometric)
self.calc_positive_metrics(self.dataset_sizes,
df_all,
df_arithmetic,
df_geometric)
if self.plot_metrics:
plot_different_metrics({
'Convex all': self.convex_all_mse,
'Convex arithm': self.convex_arithm_mse,
'Convex geom': self.convex_geom_mse,
'Positive all': self.positive_all_mse,
'Positive arithm': self.positive_arithm_mse,
'Positive geom': self.positive_geom_mse
},
dataset_sizes=self.dataset_sizes,
strict_path=os.path.join('charts', 'mse_comp.png'),
metric_name='MSE'
)
if self.plot_cmp:
plot_loss_comparison(self.loss_cmp, os.path.join('charts', 'comparison'))
def calc_positive_metrics(self, dataset_sizes, df_all, df_arithmetic, df_geometric):
for dataset_size in dataset_sizes:
if dataset_size not in self.loss_cmp:
self.loss_cmp[dataset_size] = {}
# Arithmetic options
if self.print_steps:
print(f'Positive, arithm, df size: {dataset_size}')
mean_mse, mean_train_loss, mean_val_loss = self.get_mse_and_loss(df_arithmetic, dataset_size, NetType.POSITIVE)
self.positive_arithm_mse.append(mean_mse)
if self.plot_loss:
create_chart(mean_train_loss, mean_val_loss, '',
strict_path=os.path.join('charts', 'positive_net', 'arithm', f'{dataset_size}.png'))
self.loss_cmp[dataset_size][f'Positive arithm {dataset_size}'] = mean_train_loss
# Geometric options
if self.print_steps:
print(f'Positive, geom, df size: {dataset_size}')
mean_mse, mean_train_loss, mean_val_loss = self.get_mse_and_loss(df_geometric, dataset_size, NetType.POSITIVE)
self.positive_geom_mse.append(mean_mse)
if self.plot_loss:
create_chart(mean_train_loss, mean_val_loss, '',
strict_path=os.path.join('charts', 'positive_net', 'geom', f'{dataset_size}.png'))
self.loss_cmp[dataset_size][f'Positive geom {dataset_size}'] = mean_train_loss
# All options
if self.print_steps:
print(f'Positive, all, df size: {dataset_size}')
mean_mse, mean_train_loss, mean_val_loss = self.get_mse_and_loss(df_all, dataset_size, NetType.POSITIVE)
self.positive_all_mse.append(mean_mse)
if self.plot_loss:
create_chart(mean_train_loss, mean_val_loss, '',
strict_path=os.path.join('charts', 'positive_net', 'all', f'{dataset_size}.png'))
self.loss_cmp[dataset_size][f'Positive all {dataset_size}'] = mean_train_loss
def calc_convex_metrics(self, dataset_sizes, df_all, df_arithmetic, df_geometric):
for dataset_size in dataset_sizes:
if dataset_size not in self.loss_cmp:
self.loss_cmp[dataset_size] = {}
# Arithmetic options
if self.print_steps:
print(f'Convex, arithm, df size: {dataset_size}')
mean_mse, mean_train_loss, mean_val_loss = self.get_mse_and_loss(df_arithmetic, dataset_size, NetType.CONVEX)
self.convex_all_mse.append(mean_mse)
if self.plot_loss:
create_chart(mean_train_loss, mean_val_loss, '',
strict_path=os.path.join('charts', 'convex_net', 'arithm', f'{dataset_size}.png'))
self.loss_cmp[dataset_size][f'Convex arithm {dataset_size}'] = mean_train_loss
# Geometric options
if self.print_steps:
print(f'Convex, geom, df size: {dataset_size}')
mean_mse, mean_train_loss, mean_val_loss = self.get_mse_and_loss(df_geometric, dataset_size, NetType.CONVEX)
self.convex_geom_mse.append(mean_mse)
if self.plot_loss:
create_chart(mean_train_loss, mean_val_loss, '',
strict_path=os.path.join('charts', 'convex_net', 'geom', f'{dataset_size}.png'))
self.loss_cmp[dataset_size][f'Convex geom {dataset_size}'] = mean_train_loss
# All options
if self.print_steps:
print(f'Convex, all, df size: {dataset_size}')
mean_mse, mean_train_loss, mean_val_loss = self.get_mse_and_loss(df_all, dataset_size, NetType.CONVEX)
self.convex_arithm_mse.append(mean_mse)
if self.plot_loss:
create_chart(mean_train_loss, mean_val_loss, '',
strict_path=os.path.join('charts', 'convex_net', 'all', f'{dataset_size}.png'))
self.loss_cmp[dataset_size][f'Convex all {dataset_size}'] = mean_train_loss
if __name__ == '__main__':
Analyzer(plot_metrics=False,
plot_cmp=True,
plot_loss=False,
dataset_sizes=[30, 100, 500],
print_steps=True).create_analytic_charts()
| StarcoderdataPython |
1633643 | <reponame>MaggieChege/New_App
import os
# You need to replace the next values with the appropriate values for your configuration
basedir = os.path.abspath(os.path.dirname(__file__))
SQLALCHEMY_ECHO = False
SQLALCHEMY_TRACK_MODIFICATIONS = True
SQLALCHEMY_DATABASE_URI = "postgresql://username:password@localhost/database_na"
#Clone the repo
#setup the DATABASE_URI as statated above
#To create virtual environment $ python3.6 -m venv env
#To activate virtual env $ source env/bin/activate
#Install requirements run $ pip3 install -r requirements.txt
# To start server $ python run.py
# TO test the URL on postman $ http://127.0.0.1:5000/api/Hello
# Run migrations initialization, using the db init command as follows:
# $ python migrate.py db init
# $ python migrate.py db migrate
# TO apply migrations to the DB $ python migrate.py db upgrade
| StarcoderdataPython |
1707409 | <filename>app/exam/models.py<gh_stars>0
from django.db import models
from ckeditor.fields import RichTextField
from ckeditor_uploader.fields import RichTextUploadingField
from django.contrib.auth.models import User
from django.urls import reverse
from django.shortcuts import render, redirect
import datetime
from django.utils import timezone
from question.models import Question
from user.models import Profile
# Section to sort questions based on particular section
# i.e mcq, integer, database etc to create paper.
# Various section will contribute to be a question set, and rand field will help to position section.
class QuestionSection(models.Model):
title = models.CharField(max_length=100, null=True)
# Will introduce, how this section behaves, as well as marking schemes or any type of general instructions.
section_instructions = RichTextUploadingField(blank=True, null=True)
# Will help to priotise questions set in question paper
rank = models.IntegerField(default=6, blank=True, null=True)
# particular type of questions added
question = models.ManyToManyField(Question)
def __str__(self):
return f'{self.pk} | {self.title}'
class QuestionSet(models.Model):
title = models.CharField(max_length=100)
note = models.CharField(max_length=100)
created_date = models.DateTimeField(
blank=True, null=True, auto_now_add=True)
# Each question set consists of various sections
question_section = models.ManyToManyField(QuestionSection)
def __str__(self):
return f'{self.pk} | {self.title}'
class Exam(models.Model):
created_date = models.DateTimeField(
blank=True, null=True, auto_now_add=True)
title = models.CharField(max_length=100)
instructions = RichTextUploadingField(blank=True, null=True)
# Exam can have multiple question set, first set is default
question_set = models.ManyToManyField(QuestionSet)
note = models.CharField(max_length=100)
scheduled = models.DateTimeField(blank=True, null=True)
def __str__(self):
return f'{self.pk} | {self.title}'
# After adding profiles to ExamSubscriber,
# particular exam will be available to users
class ExamSubscriber(models.Model):
exam = models.OneToOneField(Exam, on_delete=models.CASCADE, null=True)
profile = models.ManyToManyField(Profile)
def __str__(self):
return f'{self.pk} | exam-id : {self.exam}'
# last added exam will be in live mode
# control which exam to conduct
# Exam id will be generated based on this data.
# one user can only give one exam at a time
# so in live exam, there is not two examSubscriber containing same profile
class SetExam(models.Model):
exam = models.OneToOneField(Exam, on_delete=models.CASCADE, null=True)
exam_subscriber = models.OneToOneField(ExamSubscriber, on_delete=models.CASCADE, null=True)
def __str__(self):
return f'{self.pk} | {self.exam}'
# Handle status of exam for a particular exam, user
class ExamStatus(models.Model):
# this entry will be cleared once exam is complited
exam = models.ForeignKey(Exam, on_delete=models.CASCADE, null=True)
profile = models.OneToOneField(Profile, on_delete=models.CASCADE, null=True)
is_submitted = models.BooleanField(default=False)
total_questions = models.IntegerField(blank=True, null=True, default=0)
completed_questions = models.ManyToManyField(Question)
# remove time_left entry
time_left = models.IntegerField(blank=True, null=True, default=0)
start_time = models.DateTimeField(blank=True, null=True)
end_time = models.DateTimeField(blank=True, null=True)
def __str__(self):
return f'{self.pk} | {self.profile}' | StarcoderdataPython |
29170 | # -*- coding: utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fixtures
import mock
from reno import create
from reno.tests import base
class TestPickFileName(base.TestCase):
@mock.patch('os.path.exists')
def test_not_random_enough(self, exists):
exists.return_value = True
self.assertRaises(
ValueError,
create._pick_note_file_name,
'somepath',
'someslug',
)
@mock.patch('os.path.exists')
def test_random_enough(self, exists):
exists.return_value = False
result = create._pick_note_file_name('somepath', 'someslug')
self.assertIn('somepath', result)
self.assertIn('someslug', result)
class TestCreate(base.TestCase):
def setUp(self):
super(TestCreate, self).setUp()
self.tmpdir = self.useFixture(fixtures.TempDir()).path
def test_create_from_template(self):
filename = create._pick_note_file_name(self.tmpdir, 'theslug')
create._make_note_file(filename, 'i-am-a-template')
with open(filename, 'r') as f:
body = f.read()
self.assertEqual('i-am-a-template', body)
def test_edit(self):
self.useFixture(fixtures.EnvironmentVariable('EDITOR', 'myeditor'))
with mock.patch('subprocess.call') as call_mock:
self.assertTrue(create._edit_file('somepath'))
call_mock.assert_called_once_with(['myeditor', 'somepath'])
def test_edit_without_editor_env_var(self):
self.useFixture(fixtures.EnvironmentVariable('EDITOR'))
with mock.patch('subprocess.call') as call_mock:
self.assertFalse(create._edit_file('somepath'))
call_mock.assert_not_called()
| StarcoderdataPython |
3347479 | <reponame>ruchirtravadi/pykaldi
from kaldi.base import math as kaldi_math
from kaldi.matrix import Vector, Matrix
from kaldi.cudamatrix import (CuMatrix, CuVector,
approx_equal_cu_matrix, same_dim_cu_matrix, cuda_available)
import unittest
import numpy as np
class TestCuMatrix(unittest.TestCase):
def testNew(self):
A = CuMatrix()
self.assertIsNotNone(A)
self.assertEqual(0, A.num_rows())
self.assertEqual(0, A.num_cols())
dim = A.dim()
self.assertEqual(0, dim.rows)
self.assertEqual(0, dim.cols)
A = CuMatrix.new_from_size(10, 10)
self.assertIsNotNone(A)
self.assertEqual(10, A.num_rows())
self.assertEqual(10, A.num_cols())
dim = A.dim()
self.assertEqual(10, dim.rows)
self.assertEqual(10, dim.cols)
A = CuMatrix.new_from_matrix(Matrix([[2, 3], [5, 7]]))
self.assertIsNotNone(A)
self.assertEqual(2, A.num_rows())
self.assertEqual(2, A.num_cols())
B = CuMatrix.new_from_other(A)
self.assertIsNotNone(B)
self.assertEqual(2, B.num_rows())
self.assertEqual(2, B.num_cols())
def testResize(self):
A = CuMatrix()
A.resize(10, 10)
self.assertEqual(10, A.num_rows())
self.assertEqual(10, A.num_cols())
# A.resize(-1, -1) #This hard-crashes
A.resize(0, 0)
# TODO:
# A = CuMatrix.new_from_matrix(Matrix.new([[1, 2], [3, 4], [5, 6]])) #A is 3x2
# with self.assertRaises(Exception):
# A.resize(2, 2) #Try to resize to something invalid
# FIXME:
# Hard crashing...
@unittest.skip("hard-crashes")
def testSwap(self):
for i in range(10):
dim = (10 * i, 4 * i)
M = Matrix(np.random.random(dim))
A = CuMatrix.new_from_matrix(M)
B = CuMatrix.new_from_size(A.num_rows(), A.num_cols())
B.Swap(A)
self.assertAlmostEqual(A.sum(), B.sum(), places = 4) #Kaldi's precision is aweful
self.assertAlmostEqual(M.sum(), B.sum(), places = 4) #Kaldi's precision is aweful
C = CuMatrix.new_from_size(M.shape[0], M.shape[1])
C.SwapWithMatrix(M)
self.assertAlmostEqual(B.sum(), C.sum(), places = 4) #Kaldi's precision is aweful
def testcopy_from_mat(self):
for i in range(10):
rows, cols = 10*i, 5*i
A = Matrix(rows, cols)
A.set_randn_()
B = CuMatrix.new_from_size(*A.shape)
B.copy_from_mat(A)
self.assertAlmostEqual(A.sum(), B.sum(), places = 4)
A = CuMatrix.new_from_size(rows, cols)
A.set_randn()
B = CuMatrix.new_from_size(rows, cols)
B.copy_from_cu_mat(A)
self.assertAlmostEqual(A.sum(), B.sum(), places = 4)
@unittest.skip("hard-crashes")
def test__getitem(self):
A = CuMatrix.new_from_matrix(Matrix.new(np.arange(10).reshape((5, 2))))
self.assertEqual(0.0, A.__getitem(0, 0))
self.assertEqual(1.0, A.__getitem(0, 1))
self.assertEqual(2.0, A.__getitem(1, 0))
self.assertEqual(3.0, A.__getitem(1, 1))
self.assertEqual(4.0, A.__getitem(2, 0))
# This should hard crash
with self.assertRaises(IndexError):
self.assertEqual(0.0, A.__getitem(0, 2))
def testSameDim(self):
A = CuMatrix()
B = CuMatrix()
self.assertTrue(same_dim_cu_matrix(A, B))
A = CuMatrix.new_from_size(10, 10)
B = CuMatrix.new_from_size(10, 9)
self.assertFalse(same_dim_cu_matrix(A, B))
@unittest.skip("FIXME")
def testApproxEqual(self):
A = CuMatrix()
B = CuMatrix()
self.assertTrue(approx_equal_cu_matrix(A, B))
A.SetZero()
B.SetZero()
self.assertTrue(approx_equal_cu_matrix(A, B))
B.set_randn()
B.Scale(10.0)
self.assertFalse(approx_equal_cu_matrix(A, B))
if __name__ == '__main__':
if cuda_available():
from kaldi.cudamatrix import CuDevice
for i in range(2):
CuDevice.instantiate().set_debug_stride_mode(True)
if i == 0:
CuDevice.instantiate().select_gpu_id("no")
else:
CuDevice.instantiate().select_gpu_id("yes")
unittest.main()
CuDevice.instantiate().print_profile()
else:
unittest.main()
| StarcoderdataPython |
153416 | <reponame>SenHuang19/EnergyPlus-Volttron-Toolkit<filename>dashboard/src/zone_data.py
from flask import request
from flask_restful import Resource
import os
import json
import pytz
import sqlite3
import pandas as pd
from utils import *
from eplus_tmpl import EPlusTmpl
class ZoneData(Resource):
def __init__(self):
self.sim_folder_path = get_sim_folder_path()
def get(self, resource_id):
"""
Get zone data
:param resource_id: 1: all zones ---- 2: a specific zone
:return:
"""
ret_val = []
bldg = request.args.get('bldg')
sim = request.args.get('sim')
# Get all zones
if resource_id == 1:
ret_val = self.get_all_zones(bldg, sim)
# Get zone data
elif resource_id == 2:
zone_name = request.args.get('zone')
ret_val = self.get_data(bldg, sim, zone_name)
return ret_val
def get_all_zones(self, bldg, sim):
ret_val = []
sim_path = get_sim_file_path(bldg, sim)
if os.path.isfile(sim_path):
try:
# Open connection
conn = sqlite3.connect(sim_path)
c = conn.cursor()
# Query
c.execute(EPlusTmpl.zones_tmpl)
rows = c.fetchall()
for row in rows:
ret_val.append(row[0])
except Exception as e:
# logging
print(e.message)
finally:
# Close connection
conn.close()
return ret_val
def get_data(self, bldg, sim, zone_name):
ret_val = []
sim_path = get_sim_file_path(bldg, sim)
if os.path.isfile(sim_path):
try:
# Open connection
conn = sqlite3.connect(sim_path)
# Get zone comfort level
low_limit, high_limit = get_tcc_comfort(bldg, sim, zone_name)
# Query data to dfs
temp_point_query = EPlusTmpl.get_zone_temp_query(bldg, zone_name, 'temp')
clg_sp_query = EPlusTmpl.get_zone_cooling_sp_query(bldg, zone_name, 'clg_sp')
#htg_sp_query = EPlusTmpl.get_zone_heating_sp_query(bldg, zone_name, 'htg_sp')
df_temp = get_sim_data(bldg, sim, temp_point_query)
add_ts_col(df_temp)
df = df_temp
df_clg_sp = get_sim_data(bldg, sim, clg_sp_query)
#df_htg_sp = get_sim_data(bldg, sim, htg_sp_query)
if df_clg_sp is not None and not df_clg_sp.empty:
add_ts_col(df_clg_sp)
df = pd.merge(df_temp, df_clg_sp, how='left', on='ts')
else:
df['clg_sp'] = -9999
# Filter needed columns
# df = df[['ts', 'temp', 'clg_sp', 'htg_sp']]
df = df[['ts', 'temp', 'clg_sp']]
# Reformat ts column
df['ts'] = df['ts'].dt.strftime('%Y-%m-%d %H:%M:%S')
# Add zone_comfort column
df['low_limit'] = low_limit
df['high_limit'] = high_limit
# Drop nan
df = df.dropna()
# Convert to/from JSON to respond to client
ret_val = df.to_json(orient='records')
ret_val = json.loads(ret_val)
except Exception as e:
# logging
print(e.message)
finally:
# Close connection
conn.close()
return ret_val
if __name__ == '__main__':
p = ZoneData()
#print(p.get_data('small_office', 'sim2', 'SOUTH PERIM SPC GS1'))
print(p.get_data('building1_tcc_fd', 'simx', 'ZONE-VAV-143'))
| StarcoderdataPython |
4834899 | <reponame>zidingz/datasets<filename>tests/test_metadata_util.py
import re
import tempfile
import unittest
from dataclasses import asdict
from pathlib import Path
from datasets.utils.metadata import (
DatasetMetadata,
metadata_dict_from_readme,
tagset_validator,
validate_metadata_type,
yaml_block_from_readme,
)
def _dedent(string: str) -> str:
indent_level = min(re.search("^ +", t).end() if t.startswith(" ") else 0 for t in string.splitlines())
return "\n".join([line[indent_level:] for line in string.splitlines() if indent_level < len(line)])
README_YAML = """\
---
languages:
- zh
- en
task_ids:
- sentiment-classification
---
# Begin of markdown
Some cool dataset card
"""
README_EMPTY_YAML = """\
---
---
# Begin of markdown
Some cool dataset card
"""
README_NO_YAML = """\
# Begin of markdown
Some cool dataset card
"""
class TestMetadataUtils(unittest.TestCase):
def test_validate_metadata_type(self):
metadata_dict = {
"tag": ["list", "of", "values"],
"another tag": ["Another", {"list"}, ["of"], 0x646D46736457567A],
}
with self.assertRaises(TypeError):
validate_metadata_type(metadata_dict)
metadata_dict = {"tag1": []}
with self.assertRaises(TypeError):
validate_metadata_type(metadata_dict)
metadata_dict = {"tag1": None}
with self.assertRaises(TypeError):
validate_metadata_type(metadata_dict)
def test_tagset_validator(self):
name = "test_tag"
url = "https://dummy.hf.co"
items = ["tag1", "tag2", "tag2", "tag3"]
reference_values = ["tag1", "tag2", "tag3"]
returned_values, error = tagset_validator(items=items, reference_values=reference_values, name=name, url=url)
self.assertListEqual(returned_values, items)
self.assertIsNone(error)
items = []
reference_values = ["tag1", "tag2", "tag3"]
items, error = tagset_validator(items=items, reference_values=reference_values, name=name, url=url)
self.assertListEqual(items, [])
self.assertIsNone(error)
items = []
reference_values = []
returned_values, error = tagset_validator(items=items, reference_values=reference_values, name=name, url=url)
self.assertListEqual(returned_values, [])
self.assertIsNone(error)
items = ["tag1", "tag2", "tag2", "tag3", "unknown tag"]
reference_values = ["tag1", "tag2", "tag3"]
returned_values, error = tagset_validator(items=items, reference_values=reference_values, name=name, url=url)
self.assertListEqual(returned_values, [])
self.assertEqual(error, f"{['unknown tag']} are not registered tags for '{name}', reference at {url}")
def predicate_fn(string):
return "ignore" in string
items = ["process me", "process me too", "ignore me"]
reference_values = ["process me too"]
returned_values, error = tagset_validator(
items=items,
reference_values=reference_values,
name=name,
url=url,
escape_validation_predicate_fn=predicate_fn,
)
self.assertListEqual(returned_values, [])
self.assertEqual(error, f"{['process me']} are not registered tags for '{name}', reference at {url}")
items = ["process me", "process me too", "ignore me"]
reference_values = ["process me too", "process me"]
returned_values, error = tagset_validator(
items=items,
reference_values=reference_values,
name=name,
url=url,
escape_validation_predicate_fn=predicate_fn,
)
self.assertListEqual(returned_values, items)
self.assertIsNone(error)
items = ["ignore me too", "ignore me"]
reference_values = ["process me too"]
returned_values, error = tagset_validator(
items=items,
reference_values=reference_values,
name=name,
url=url,
escape_validation_predicate_fn=predicate_fn,
)
self.assertListEqual(returned_values, items)
self.assertIsNone(error)
def test_yaml_block_from_readme(self):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(README_YAML)
yaml_block = yaml_block_from_readme(path=path)
self.assertEqual(
yaml_block,
_dedent(
"""\
languages:
- zh
- en
task_ids:
- sentiment-classification
"""
),
)
with open(path, "w+") as readme_file:
readme_file.write(README_EMPTY_YAML)
yaml_block = yaml_block_from_readme(path=path)
self.assertEqual(
yaml_block,
_dedent(
"""\
"""
),
)
with open(path, "w+") as readme_file:
readme_file.write(README_NO_YAML)
yaml_block = yaml_block_from_readme(path=path)
self.assertIsNone(yaml_block)
def test_metadata_dict_from_readme(self):
with tempfile.TemporaryDirectory() as tmp_dir:
path = Path(tmp_dir) / "README.md"
with open(path, "w+") as readme_file:
readme_file.write(README_YAML)
metadata_dict = metadata_dict_from_readme(path)
self.assertDictEqual(metadata_dict, {"languages": ["zh", "en"], "task_ids": ["sentiment-classification"]})
with open(path, "w+") as readme_file:
readme_file.write(README_EMPTY_YAML)
metadata_dict = metadata_dict_from_readme(path)
self.assertDictEqual(metadata_dict, {})
with open(path, "w+") as readme_file:
readme_file.write(README_NO_YAML)
metadata_dict = metadata_dict_from_readme(path)
self.assertIsNone(metadata_dict)
def test_from_yaml_string(self):
valid_yaml_string = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
DatasetMetadata.from_yaml_string(valid_yaml_string)
valid_yaml_string_with_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
en:
- en
fr:
- fr
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
DatasetMetadata.from_yaml_string(valid_yaml_string_with_configs)
invalid_tag_yaml = _dedent(
"""\
annotations_creators:
- found
language_creators:
- some guys in Panama
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(invalid_tag_yaml)
metadata.validate()
missing_tag_yaml = _dedent(
"""\
annotations_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(missing_tag_yaml)
metadata.validate()
duplicate_yaml_keys = _dedent(
"""\
annotations_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(duplicate_yaml_keys)
metadata.validate()
valid_yaml_string_with_duplicate_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
en:
- en
en:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(valid_yaml_string_with_duplicate_configs)
metadata.validate()
valid_yaml_string_with_paperswithcode_id = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id: squad
"""
)
DatasetMetadata.from_yaml_string(valid_yaml_string_with_paperswithcode_id)
valid_yaml_string_with_null_paperswithcode_id = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id: null
"""
)
DatasetMetadata.from_yaml_string(valid_yaml_string_with_null_paperswithcode_id)
valid_yaml_string_with_list_paperswithcode_id = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id:
- squad
"""
)
with self.assertRaises(TypeError):
metadata = DatasetMetadata.from_yaml_string(valid_yaml_string_with_list_paperswithcode_id)
metadata.validate()
def test_get_metadata_by_config_name(self):
valid_yaml_with_multiple_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
en:
- en
fr:
- fr
licenses:
- unknown
multilinguality:
- monolingual
pretty_name:
en: English Test Dataset
fr: French Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id:
- squad
"""
)
metadata = DatasetMetadata.from_yaml_string(valid_yaml_with_multiple_configs)
en_metadata = metadata.get_metadata_by_config_name("en")
self.assertEqual(
asdict(en_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["en"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "English Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
fr_metadata = metadata.get_metadata_by_config_name("fr")
self.assertEqual(
asdict(fr_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["fr"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "French Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
valid_yaml_with_single_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
- en
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id:
- squad
"""
)
metadata = DatasetMetadata.from_yaml_string(valid_yaml_with_single_configs)
en_metadata = metadata.get_metadata_by_config_name("en")
self.assertEqual(
asdict(en_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["en"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
fr_metadata = metadata.get_metadata_by_config_name("fr")
self.assertEqual(
asdict(fr_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["en"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
invalid_yaml_with_multiple_configs = _dedent(
"""\
annotations_creators:
- found
language_creators:
- found
languages:
en:
- en
zh:
- zh
licenses:
- unknown
multilinguality:
- monolingual
pretty_name: Test Dataset
size_categories:
- 10K<n<100K
source_datasets:
- extended|other-yahoo-webscope-l6
task_categories:
- question-answering
task_ids:
- open-domain-qa
paperswithcode_id:
- squad
"""
)
metadata = DatasetMetadata.from_yaml_string(invalid_yaml_with_multiple_configs)
en_metadata = metadata.get_metadata_by_config_name("en")
self.assertEqual(
asdict(en_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["en"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
zh_metadata = metadata.get_metadata_by_config_name("zh")
self.assertEqual(
asdict(zh_metadata),
{
"annotations_creators": ["found"],
"language_creators": ["found"],
"languages": ["zh"],
"licenses": ["unknown"],
"multilinguality": ["monolingual"],
"pretty_name": "Test Dataset",
"size_categories": ["10K<n<100K"],
"source_datasets": ["extended|other-yahoo-webscope-l6"],
"task_categories": ["question-answering"],
"task_ids": ["open-domain-qa"],
"paperswithcode_id": ["squad"],
},
)
with self.assertRaises(TypeError):
fr_metadata = metadata.get_metadata_by_config_name("fr")
| StarcoderdataPython |
3360752 | #!/usr/bin/env python
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from std_msgs.msg import Int32
from scipy.spatial import KDTree
import numpy as np
import time
import thread
import math
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
CONSTANT_DECEL = 1 / LOOKAHEAD_WPS # Deceleration constant for smoother braking
PUBLISHING_RATE = 20 # Rate (Hz) of waypoint publishing
STOP_LINE_MARGIN = 4 # Distance in waypoints to pad in front of the stop line
MAX_DECL = 0.5
MAX_TRAFFIC_WP_TIMEOUT = 10 # timout for traffic_waypoint is 1000ms
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('[waypoint_updater] waypoint_updater', log_level=rospy.DEBUG)
rospy.loginfo("Welcome to waypoint_updater")
# TODO: Add other member variables you need below
self.pose = None
self.base_waypoints = None
self.stopline_wp_idx = -99
self.waypoints_2d = None
self.waypoint_tree = None
self.thread_working = False
self.waypoint_delay_time = 0
self.pose_delay_time = 0
self.traffic_waypoint_timout = MAX_TRAFFIC_WP_TIMEOUT
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.base_waypoints_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
self.msg_timeout_detector()
rospy.spin()
#self.loop()
def loop(self):
rate = rospy.Rate(5)
while not rospy.is_shutdown():
if self.pose and self.base_waypoints:
self.publish_waypoints()
rate.sleep()
def msg_timeout_detector(self):
rate = rospy.Rate(10)
while not rospy.is_shutdown():
self.traffic_waypoint_timout -= 1
if self.traffic_waypoint_timout <= 0:
self.traffic_waypoint_timout = 0
#rospy.logwarn("[waypoint_updater] traffic_waypoint topic timeout")
# self.stopline_wp_idx = -99
rate.sleep()
def get_closest_waypoint_id(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
if self.waypoint_tree != None:
closest_idx = self.waypoint_tree.query([x,y],1)[1]
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx-1]
cl_vect = np.array(closest_coord)
prev_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect-prev_vect, pos_vect-cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
else:
closest_idx = -1
return closest_idx
def publish_waypoints(self):
final_wp = self.generate_wp()
if final_wp != None:
self.final_waypoints_pub.publish(final_wp)
else:
rospy.logdebug("[waypoint_updater] No waypoint available")
self.thread_working = False
def generate_wp(self):
lane = Lane()
car_wp_idx = self.get_closest_waypoint_id()
if car_wp_idx >= 0 and self.stopline_wp_idx != -99:
farthest_wp_idx = car_wp_idx + LOOKAHEAD_WPS
waypoints_range = self.base_waypoints.waypoints[car_wp_idx:farthest_wp_idx]
if self.stopline_wp_idx == -1 or self.stopline_wp_idx >= farthest_wp_idx:
lane.waypoints = waypoints_range
else:
lane.waypoints = self.decelerate_waypoints(waypoints_range, car_wp_idx)
rospy.logdebug("[waypoint_updater] car_wp:%d, stopline_wp:%d", car_wp_idx, self.stopline_wp_idx)
return lane
else:
return None
def decelerate_waypoints(self, waypoints, car_wp_idx):
decl_wp = []
for i in range(len(waypoints)):
p = Waypoint()
p.pose = waypoints[i].pose
stop_idx = max(self.stopline_wp_idx - car_wp_idx - STOP_LINE_MARGIN, 0)
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECL * dist)
if vel < 1.0:
vel = 0
p.twist.twist.linear.x = min(vel, waypoints[i].twist.twist.linear.x)
decl_wp.append(p)
return decl_wp
def pose_cb(self, msg):
# TODO: Implement
current_time = time.time()
#rospy.logdebug("[waypoint_updater] Pose update time:%.4f s", current_time - self.pose_delay_time)
self.pose_delay_time = current_time
self.pose = msg
if not self.thread_working:
if self.base_waypoints:
self.thread_working = True
self.waypoint_delay_time = time.time()
thread.start_new_thread( self.publish_waypoints, ())
else:
pass
current_time = time.time()
self.last_pose_time = current_time
def base_waypoints_cb(self, waypoints):
# TODO: Implement
rospy.loginfo("[waypoint_updater] Base waypoint Callback")
self.base_waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
if self.stopline_wp_idx != msg.data:
rospy.logdebug("[waypoint_updater] stopline_wp_idx updated: %s.", msg.data)
self.stopline_wp_idx = msg.data
self.traffic_waypoint_timout = MAX_TRAFFIC_WP_TIMEOUT
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
pass
def get_waypoint_velocity(self, waypoint):
return waypoint.twist.twist.linear.x
def set_waypoint_velocity(self, waypoints, waypoint, velocity):
waypoints[waypoint].twist.twist.linear.x = velocity
def distance(self, waypoints, wp1, wp2):
dist = 0
dl = lambda a, b: math.sqrt((a.x-b.x)**2 + (a.y-b.y)**2 + (a.z-b.z)**2)
for i in range(wp1, wp2+1):
dist += dl(waypoints[wp1].pose.pose.position, waypoints[i].pose.pose.position)
wp1 = i
return dist
if __name__ == '__main__':
try:
WaypointUpdater()
except rospy.ROSInterruptException:
rospy.logerr('Could not start waypoint updater node.')
| StarcoderdataPython |
1621011 | # test2_pyganim.py - A pyganim test program.
#
# This program shows off a lot more of Pyganim features, and offers some interactivity.
#
# The animation images come from POW Studios, and are available under an Attribution-only license.
# Check them out, they're really nice.
# http://powstudios.com/
import pygame
from pygame.locals import *
import sys
import time
import pyganim
pygame.init()
# set up the window
windowSurface = pygame.display.set_mode((640, 480), 0, 32)
pygame.display.set_caption('Pyganim Test 3')
# create the animation objects
boltAnim0 = pyganim.PygAnimation([('testimages/bolt_strike_0001.png', 0.1),
('testimages/bolt_strike_0002.png', 0.1),
('testimages/bolt_strike_0003.png', 0.1),
('testimages/bolt_strike_0004.png', 0.1),
('testimages/bolt_strike_0005.png', 0.1),
('testimages/bolt_strike_0006.png', 0.1),
('testimages/bolt_strike_0007.png', 0.1),
('testimages/bolt_strike_0008.png', 0.1),
('testimages/bolt_strike_0009.png', 0.1),
('testimages/bolt_strike_0010.png', 0.1)])
# create some copies of the bolt animation
boltAnim1, boltAnim2, boltAnim3, boltAnim4 = boltAnim0.getCopies(4)
boltAnim3.rate = 0.5
boltAnim4.rate = 2.0
bolts = [boltAnim0, boltAnim1, boltAnim2, boltAnim3, boltAnim4]
# supply a "start time" argument to play() so that the bolt animations are
# all in sync with each other.
rightNow = time.time()
for i in range(len(bolts)):
if i == 2:
continue # we're not going to call play() on boltAnim2
bolts[i].play(rightNow) # make sure they all start in sync
# create the fire animation
fireAnim = pyganim.PygAnimation([('testimages/flame_a_0001.png', 0.1),
('testimages/flame_a_0002.png', 0.1),
('testimages/flame_a_0003.png', 0.1),
('testimages/flame_a_0004.png', 0.1),
('testimages/flame_a_0005.png', 0.1),
('testimages/flame_a_0006.png', 0.1)])
fireAnim2 = fireAnim.getCopy()
fireAnim3 = fireAnim.getCopy()
spinningFireAnim = fireAnim.getCopy()
# do some transformation on the other two fire animation objects
fireAnim2.smoothscale((200, 200))
fireAnim3.rotate(50)
fireAnim3.smoothscale((256, 360))
fireAnim.rate = 1.2 # make the smaller fire slightly faster
# start playing the fire animations
fireAnim.play()
fireAnim2.play()
fireAnim3.play()
spinningFireAnim.play()
# You can also use pygame.Surface objects in the constructor instead of filename strings.
smokeSurf1 = pygame.image.load('testimages/smoke_puff_0001.png')
smokeSurf2 = pygame.image.load('testimages/smoke_puff_0002.png')
smokeSurf3 = pygame.image.load('testimages/smoke_puff_0003.png')
smokeSurf4 = pygame.image.load('testimages/smoke_puff_0004.png')
smokeAnim = pyganim.PygAnimation([(smokeSurf1, 0.1),
(smokeSurf2, 0.1),
(smokeSurf3, 0.1),
(smokeSurf4, 0.1),
('testimages/smoke_puff_0005.png', 0.1),
('testimages/smoke_puff_0006.png', 0.1),
('testimages/smoke_puff_0007.png', 0.1),
('testimages/smoke_puff_0008.png', 0.3),
('testimages/smoke_puff_0009.png', 0.3),
('testimages/smoke_puff_0010.png', 0.3)], loop=False)
smokeAnim.play() # start playing the smoke animation
# creating an animation object from an image that doesn't have transparent
# pixels so that the alpha values can be changed.
alAnim = pyganim.PygAnimation([('testimages/alsweigart1.jpg', 0.5),
('testimages/alsweigart2.jpg', 0.5)])
alAnim.set_alpha(50)
alAnim.play()
BASICFONT = pygame.font.Font('freesansbold.ttf', 16)
WHITE = (255, 255, 255)
BGCOLOR = (100, 50, 50)
instructionSurf = BASICFONT.render('P to toggle Play/Pause, S to stop, R to reverse, LEFT/RIGHT to rewind/ff.', True, WHITE)
instructionRect = instructionSurf.get_rect()
instructionRect.topleft = (10, 128)
instructionSurf2 = BASICFONT.render('O to replay smoke. I to toggle fire visibility. Esc to quit.', True, WHITE)
instructionRect2 = instructionSurf2.get_rect()
instructionRect2.topleft = (10, 148)
instructionSurf3 = BASICFONT.render('Note the 3rd bolt doesn\'t play because play() wasn\'t called on it.', True, WHITE)
instructionRect3 = instructionSurf2.get_rect()
instructionRect3.topleft = (10, 168)
mainClock = pygame.time.Clock()
spinAmt = 0
while True:
windowSurface.fill(BGCOLOR)
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == KEYDOWN:
if event.key == K_ESCAPE:
pygame.quit()
sys.exit()
if event.key == K_p:
boltAnim0.togglePause()
if event.key == K_s:
boltAnim0.stop()
if event.key == K_LEFT:
boltAnim0.prevFrame()
if event.key == K_RIGHT:
boltAnim0.nextFrame()
if event.key == K_r:
boltAnim0.reverse()
if event.key == K_o:
smokeAnim.play()
if event.key == K_i:
fireAnim.visibility = not fireAnim.visibility
fireAnim2.visibility = not fireAnim2.visibility
fireAnim3.visibility = not fireAnim3.visibility
# draw the animations to the screen
for i in range(len(bolts)):
bolts[i].blit(windowSurface, ((i*133), 0))
fireAnim3.blit(windowSurface, (30, 130))
fireAnim2.blit(windowSurface, (116, 226))
fireAnim.blit(windowSurface, (178, 278))
smokeAnim.blit(windowSurface, (350, 250))
# handle the spinning fire
spinAmt += 1
spinningFireAnim.clearTransforms()
spinningFireAnim.rotate(spinAmt)
curSpinSurf = spinningFireAnim.getCurrentFrame() # gets the current
w, h = curSpinSurf.get_size()
# technically, in the time span between the getCurrentFrame() call and
# the following blit() call, enough time could have passed where it
# has the width and height for the wrong frame. It's unlikely though.
# But if you want to account for this, just use the blitFrameAtTime()
# or blitFrameNum() methods instead of blit().
spinningFireAnim.blit(windowSurface, (550 - int(w/2), 350 - int(h/2)))
# draw the semitransparent "picture of Al" animation on top of the spinning fire
alAnim.blit(windowSurface, (512, 352))
# draw the instructional text
windowSurface.blit(instructionSurf, instructionRect)
windowSurface.blit(instructionSurf2, instructionRect2)
windowSurface.blit(instructionSurf3, instructionRect3)
pygame.display.update()
mainClock.tick(30) # Feel free to experiment with any FPS setting. | StarcoderdataPython |
62351 | import numpy
from chainer import functions
from chainer import testing
@testing.parameterize(*(testing.product({
'batchsize': [1, 5],
'size': [10, 20],
'dtype': [numpy.float32],
'eps': [1e-5, 1e-1],
})))
@testing.inject_backend_tests(
None,
# CPU tests
[
{},
]
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
'cuda_device': [0, 1],
})
# ChainerX tests
+ [
{'use_chainerx': True, 'chainerx_device': 'native:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:0'},
{'use_chainerx': True, 'chainerx_device': 'cuda:1'},
]
)
class TestLayerNormalization(testing.FunctionTestCase):
def setUp(self):
self.check_forward_options = {'atol': 1e-4, 'rtol': 1e-3}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
if self.dtype == numpy.float16:
self.check_forward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
self.check_double_backward_options = {'atol': 1e-3, 'rtol': 1e-2}
def generate_inputs(self):
shape = self.batchsize, self.size
size = numpy.prod(shape) // shape[0]
x = numpy.random.uniform(-1, 1, shape).astype(self.dtype)
gamma = numpy.random.uniform(-1, 1, size).astype(self.dtype)
beta = numpy.random.uniform(-1, 1, size).astype(self.dtype)
return x, gamma, beta
def forward_expected(self, inputs):
x, gamma, beta = inputs
mean = numpy.mean(x, axis=1, keepdims=True)
var = numpy.mean(numpy.square(x - mean), axis=1, keepdims=True)
std = numpy.sqrt(var + self.eps)
y_expected = (
numpy.expand_dims(gamma, axis=0) * (x - mean) / std
+ numpy.expand_dims(beta, axis=0))
return y_expected,
def forward(self, inputs, device):
x, gamma, beta = inputs
y = functions.layer_normalization(x, gamma, beta, eps=self.eps)
return y,
testing.run_module(__name__, __file__)
| StarcoderdataPython |
3270230 | <gh_stars>1-10
import boto3
from .client import logger, BatchSourceBase, BatchDestinationBase
class SQSClientBase():
def __init__(self, queue_name, region_name="us-east-1"):
logger.info(f"Connecting to SQS queue with name '{queue_name}' in region '{region_name}'.")
self.sqs = boto3.client("sqs", region_name=region_name)
self.queue_name = queue_name
self.queue_url = self.sqs.get_queue_url(QueueName=queue_name)["QueueUrl"]
class SQSBatchSource(SQSClientBase, BatchSourceBase):
"""
Retrieves batch files from AWS SQS.
AWS credentials should be stored in a format compatible with boto3,
such as environment variables or a credentials file. For more information, see:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
Each instance of this class should only have ONE consumer.
"""
def __init__(self, queue_name, region_name="us-east-1"):
super().__init__(queue_name, region_name)
self.message = None
def get_next_batch(self):
try:
messages = self.sqs.receive_message(QueueUrl=self.queue_url, MaxNumberOfMessages=1)
if "Messages" in messages:
self.message = messages["Messages"][0]
return self.message["Body"]
except Exception as e:
logger.error(f"Failed to receive batch from SQS: {e}")
return None
def mark_batch_as_complete(self):
try:
message_receipt_handle = self.message["ReceiptHandle"]
self.sqs.delete_message(QueueUrl=self.queue_url, ReceiptHandle=message_receipt_handle)
except Exception as e:
logger.error(f"Failed to delete batch from SQS: {e}")
return
class SQSBatchDestination(SQSClientBase, BatchDestinationBase):
"""
Writes batch files to AWS SQS.
AWS credentials should be stored in a format compatible with boto3,
such as environment variables or a credentials file. For more information, see:
https://boto3.amazonaws.com/v1/documentation/api/latest/guide/configuration.html
"""
def __init__(self, queue_name, region_name="us-east-1"):
super().__init__(queue_name, region_name)
def publish_batch_results(self, results, target_file_name=None):
try:
self.sqs.send_message(QueueUrl=self.queue_url, MessageBody=results)
except Exception as e:
logger.error(f"Failed to send batch to SQS: {e}")
return
| StarcoderdataPython |
3339456 | """
Replay Buffer for Deep Reinforcement Learning
"""
from collections import deque
import random
import numpy as np
class ReplayBuffer:
def __init__(self, size_buffer, random_seed=None):
if random_seed:
random.seed(random_seed)
np.random.seed(random_seed)
self._size_bf = size_buffer
self._length = 0
self._buffer = deque()
@property
def buffer(self):
return self._buffer
def add(self, state, action, reward, state_next, done):
exp = (state, action, reward, state_next, done)
if self._length < self._size_bf:
self._buffer.append(exp)
self._length += 1
else:
self._buffer.popleft()
self._buffer.append(exp)
def add_batch(self, batch_s, batch_a, batch_r, batch_sn, batch_d):
for i in range(len(batch_s)):
self.add(batch_s[i], batch_a[i], batch_r[i], batch_sn[i], batch_d)
def add_samples(self, samples):
for s, a, r, sn, d in samples:
self.add(s, a, r, sn, d)
def __len__(self):
return self._length
def sample_batch(self, size_batch):
if self._length < size_batch:
batch = random.sample(self._buffer, self._length)
else:
batch = random.sample(self._buffer, size_batch)
batch_s = np.array([d[0] for d in batch])
batch_a = np.array([d[1] for d in batch])
batch_r = np.array([d[2] for d in batch])
batch_sn = np.array([d[3] for d in batch])
batch_d = np.array([d[4] for d in batch])
return batch_s, batch_a, batch_r, batch_sn, batch_d
def clear(self):
self._buffer.clear()
| StarcoderdataPython |
1663698 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Mar 18 12:47:18 2022
A small investigation into the correlation of errors in the dD and d18O is performed
from the snow cores at EastGRIP.
@author: michaeltown
"""
import pandas as pd
import statsmodels.api as sm
import seaborn as sns
import numpy as np
import datetime as dt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from sklearn.metrics import mean_squared_error
import scipy.stats as stats
def plotResid(yTrue,yResid,xlabelR,ylabelR,titleR):
fig = plt.figure();
zeroLine = yTrue*0;
plt.plot(yTrue,yResid,color = 'blue',marker = 'o',alpha = 0.5,ls = 'None');
plt.plot(yTrue,zeroLine,'k--')
plt.xlabel(xlabelR);
plt.ylabel(ylabelR);
plt.grid;
plt.title(titleR);
plt.show;
def regressPipeline_dDd18O(x, y, ts,titleStr, xlab, ylab, xlim, ylim):
xTrain, xTest, yTrain, yTest = train_test_split(x,y,random_state = 42,train_size = ts);
model = sm.OLS(yTrain,xTrain);
results = model.fit();
yNew = results.predict(xTest);
residTest = yNew - yTest;
slopes = results.params[1];
intercept = results.params.const;
r2score= results.rsquared;
# plot a figure
fig1 = plt.figure();
plt.plot(x.iloc[:,1],y,'.',color = 'blue',alpha = 0.2);
plt.plot(xlim,slopes*xlim+intercept,'--',color = 'red',alpha = 0.5)
plt.plot(xlim,8*xlim,'--',color = 'black',alpha = 0.3) # for equilibrium slope
plt.title(titleStr);
plt.xlim(xlim)
plt.ylim(ylim)
plt.ylabel(ylab);
plt.xlabel(xlab);
plt.grid();
xval = xlim[0];
if xval < 0:
xval = xlim[1]*1.3
else:
xval = xlim[1]*0.8
plt.text(xval, ylim[1]*0.8, 'm = ' + str(np.round(slopes,2)))
plt.text(xval, ylim[1]*0.7, 'b = ' + str(np.round(intercept,2)))
plt.text(xval, ylim[1]*0.6, 'r\u00b2 = ' + str(np.round(r2score,2)))
# return the params
return slopes, intercept, r2score
# useful stuff
#symbols
d18Osym = '$\delta^{18}$O'
dDsym = '$\delta$D'
pptsym = 'ppt' # '\textperthousand'
#******************************************
# main
#******************************************
fileLoc = '/home/michaeltown/work/projects/snowiso/data/EastGRIP/';
figureLoc ='/home/michaeltown/work/projects/snowiso/figures/EastGRIP/'
fileNameIso = 'eastGRIP_SCisoData_2016-2019.pkl'
df_iso = pd.read_pickle(fileLoc+fileNameIso);
df_iso.dropna(inplace = True)
testSize = 0.3;
m, b, r2 = regressPipeline_dDd18O(sm.add_constant(df_iso.d18O_std),df_iso.dD_std, testSize, 'dDstd vs d18Ostd for all EastGRIP Snow Core data',
d18Osym + ' std (ppt)', dDsym + ' std (ppt)', np.asarray([0, 0.15]), np.asarray([0, 1]))
plt.legend(['scatter','regression','eq water line'],loc = 'lower right')
plt.savefig(figureLoc+'errorCorrelation_dDstdVsd18Ostd_EastGRIP_2016-2019.jpg')
m, b, r2 = regressPipeline_dDd18O(sm.add_constant(df_iso.d18O),df_iso.d18O_std, testSize, 'd18Ostd vs d18O for all EastGRIP Snow Core data',
d18Osym + ' (ppt)', d18Osym + ' std (ppt)', np.asarray([-50,-20]), np.asarray([0, 0.15]))
plt.legend(['scatter','regression'],loc = 'lower right')
plt.savefig(figureLoc+'errorCorrelation_d18OstdVsd18O_EastGRIP_2016-2019.jpg')
m, b, r2 = regressPipeline_dDd18O(sm.add_constant(df_iso.dD),df_iso.dD_std, testSize, 'dDstd vs dD for all EastGRIP Snow Core data',
dDsym + ' (ppt)', dDsym + ' std (ppt)', np.asarray([-380,-150]), np.asarray([0, 1]))
plt.legend(['scatter','regression'],loc = 'lower right')
plt.savefig(figureLoc+'errorCorrelation_dDstdVsdD_EastGRIP_2016-2019.jpg')
| StarcoderdataPython |
3314952 | import numpy as np
import dircache
from sets import Set
import time
import tables as pt
import sys
import time
import os
from optparse import OptionParser
class TimestampsModel (pt.IsDescription):
timestamp = pt.Time64Col()
#class TimestampsModel ends
class StrategyDataModel(pt.IsDescription):
symbol = pt.StringCol(30) #30 char string; Ticker
exchange = pt.StringCol(10) #10 char string; NYSE, NASDAQ, etc.
adj_high = pt.Float32Col()
adj_low = pt.Float32Col()
adj_open = pt.Float32Col()
adj_close = pt.Float32Col()
close = pt.Float32Col()
volume = pt.Float32Col() #Changing from Int32Col()
timestamp = pt.Time64Col()
date = pt.Int32Col()
interval = pt.Time64Col()
#class StrategyDataModel done
class StockPriceData:
def __init__(self):
self.filt_list=[]
self.timestamps=[]
def getSymbols(self, listOfPaths, fileExtensionToRemove):
'''
@bug: This might not work if the path contains a folder whose name has .csv, or is some random file that has a .csv in it..
So, lets assume that whoever is using this is not going to "cheat" us
'''
listOflistOfStocks=list()
for path in listOfPaths:
stocksAtThisPath=list ()
stocksAtThisPath= dircache.listdir(str(path))
#Next, throw away everything that is not a .csv And these are our stocks!
stocksAtThisPath = filter (lambda x:(str(x).find(str(fileExtensionToRemove)) > -1), stocksAtThisPath)
#Now, we remove the .csv to get the name of the stock
stocksAtThisPath = map(lambda x:(x.partition(str(fileExtensionToRemove))[0]),stocksAtThisPath)
#Then add that list to listOflistOfStocks
listOflistOfStocks.append(stocksAtThisPath)
return listOflistOfStocks
#getSymbols done
#build the array
def getData(self, listOfListOfStocks, listOfInputPaths, startDate, endDate, listOfOutputPaths):
'''
@summary: This is where all the work happens
@attention: Assumption here is that past data never changes
@bug: The exchange is currently set pretty randomly
'''
#Finding no. of stocks
noOfStocks=0
for stock_list in listOfListOfStocks:
noOfStocks+= len (stock_list)
#for stock in stock_list:
#print str(stock)
print "No. of stocks: " + str(noOfStocks)
print "No. of timestamps: " + str(len(self.timestamps))
listIndex=-1
ctr=1;
for inputFileFolder in listOfInputPaths:
listIndex+=1
outputFileFolder= str(listOfOutputPaths[listIndex])
stocks_list= listOfListOfStocks[listIndex]
for i in range(0, len(stocks_list)): # - self.count_of_non_existent_stocks):
print str(stocks_list[i]) +" "+str(ctr)+" of "+ str(noOfStocks)+" "+ str(time.strftime("%H:%M:%S"))
ctr= ctr+1
beginTS= startDate
#Check if the file exists
if (os.path.exists(str(outputFileFolder) + str(stocks_list[i]+".h5"))):
#Checking the last timestamp in the hdf file
h5f=pt.openFile(outputFileFolder + str(stocks_list[i]+".h5"), mode = "a")
print "Updating " +str(outputFileFolder + str(stocks_list[i]+".h5"))
table= h5f.getNode('/StrategyData', 'StrategyData')
beginTS= int(time.strftime("%Y%m%d", time.gmtime(table[table.nrows-1]['timestamp']))) #+ 1 #POSSIBLE BUG?
if (str(beginTS) >= self.timestamps[len(self.timestamps)-1]): #if (os.path.getmtime(str(outputFileFolder)+str(stocks_list[i])+".h5") > os.path.getmtime(str(self.dirname+ "/"+ str(stocks_list[i]+".CSV")))):
#The hdf5 file for this stock has been modified after the CSV file was modified. Ergo- no changes need to be made to it now..
print str(stocks_list[i])+".h5 already is up to date. "+ str(time.strftime("%H:%M:%S"))
h5f.close()
continue
else:
#File is present but not upto date
beginTS= int(time.strftime("%Y%m%d", time.gmtime(table[table.nrows-1]['timestamp'])))
else:
#The only foreseeable reason why there might be an exception here is that the hdf file does not exist. So, creating it.
print "Creating file: " + str(outputFileFolder) + str(stocks_list[i]+".h5")+" "+ str(time.strftime("%H:%M:%S"))
h5f = pt.openFile(str(outputFileFolder) + str(stocks_list[i]+".h5"), mode = "w")
group = h5f.createGroup("/", 'StrategyData')
table = h5f.createTable(group, 'StrategyData', StrategyDataModel)
beginTS= startDate
#else done
f=open(str(inputFileFolder)+str(stocks_list[i]+str(".CSV")))
jk=f.readlines()
f.close()
jk.pop(0)
self.filt_list=list()
filt_list_temp=filter(lambda x: (int(x.split(',')[1])> int(beginTS)) ,jk) #Because we only want timestamps strictly greater than the last timestamp currently in the file.
filt_list_temp=filter(lambda x: (int(x.split(',')[1])<= int(endDate)) ,filt_list_temp)
filt_list_temp=map(lambda x:(x.split(',')[0],x.split(',')[1],x.split(',')[2],x.split(',')[3],x.split(',')[4],x.split(',')[5],x.split(',')[6],(x.split(',')[7]).strip()),filt_list_temp)
self.filt_list.append(filt_list_temp)
if (table.nrows > 0):
#we are appending to an old file and not creating a new file..
tsStartIndex= np.array(self.timestamps).searchsorted(beginTS) +1
else:
#creating a new file...
tsStartIndex =0
#if (table.nrows > 0) done
k = 0
for j in range(tsStartIndex, len(self.timestamps)):
if (k< len(self.filt_list[0])):
if((self.timestamps[j])< (self.filt_list[0][k][1])):
row=table.row
row['exchange'] = 'NYSE'
row['symbol'] = self.filt_list[0][k][0]
row['adj_open'] = np.NaN
row['adj_close'] = np.NaN
row['adj_high'] = np.NaN
row['adj_low'] = np.NaN
row['close'] = np.NaN
row['volume'] = np.NaN
parseddate = time.strptime(self.timestamps[j],'%Y%m%d')
# row['date'] = self.timestamps[j]
row['timestamp'] = time.mktime(parseddate)
row.append()
elif(self.timestamps[j]==self.filt_list[0][k][1]):
row=table.row
row['exchange'] = 'NASDAQ'
row['symbol'] = self.filt_list[0][k][0]
row['adj_open'] = float(self.filt_list[0][k][2])
row['adj_close'] = float(self.filt_list[0][k][5])
row['adj_high'] = float(self.filt_list[0][k][3])
row['adj_low'] = float(self.filt_list[0][k][4])
row['close'] = float(self.filt_list[0][k][7])
row['volume'] = int(self.filt_list[0][k][6])
parseddate = time.strptime(self.timestamps[j],'%Y%m%d')
# row['date'] = self.timestamps[j]
row['timestamp'] = time.mktime(parseddate)
row.append()
k=k+1
else:
print"###############Something has gone wrong. A stock had a timestamp which was not in the timestamp list..."
print "TS: " + str(self.timestamps[j]) + ", Stock: " + str (self.filt_list[0][k][1])
k=k+1
#should stop executing here? Naah
# sys.exit()
else:
row=table.row
row['exchange'] = 'NYSE'
row['symbol'] = stocks_list[i] #self.filt_list[0][len(self.filt_list[0])-1][0] ####NOTE. POSSIBLE BUG?
row['adj_open'] = np.NaN
row['adj_close'] = np.NaN
row['adj_high'] = np.NaN
row['adj_low'] = np.NaN
row['close'] = np.NaN
row['volume'] = np.NaN
parseddate = time.strptime(self.timestamps[j],'%Y%m%d')
# row['date'] = self.timestamps[j]
row['timestamp'] = time.mktime(parseddate)
# row['interval'] = 86400
row.append()
#for j in range(len(self.timestamps)) ends
table.flush()
h5f.close()
#for i in range(0, stocks.size) done
print "Writing data done. "+ str(time.strftime("%H:%M:%S"))
def makeOrUpdateTimestampsFile(self, fileName, listOflistOfStocks, listOfInputPaths, startDate, endDate):
'''
@bug: Formerly did not take care of DST
@attention: fixed DST bug. No known DST problems now.
'''
DAY=86400
if (os.path.exists(fileName)):
print "Updating timestamps"
h5f = pt.openFile(str(fileName), mode = "a")
table=h5f.getNode('/timestamps','timestamps')
lastTSFromFile= str(time.strftime("%Y%m%d", time.gmtime(table[table.nrows-1]['timestamp'])))
if (str(startDate)<= lastTSFromFile):
startDate=str(time.strftime("%Y%m%d", time.gmtime(table[table.nrows-1]['timestamp']))) # TO FIX DST BUG
else:
print "Creating new timestamp file"
h5f = pt.openFile(str(fileName), mode = "w")
group = h5f.createGroup("/", 'timestamps')
table = h5f.createTable(group, 'timestamps', TimestampsModel)
print "start: " + str(startDate)+", end: "+ str(endDate)
tslist=list()
ctr=1
if (str(startDate) <= str(endDate)):
listIndex=-1
for path in listOfInputPaths:
listIndex+=1
for stock in listOflistOfStocks[listIndex]:
#print str(stock)+" "+str(ctr)+" "+str(time.strftime("%H:%M:%S"))
ctr+=1
f=open(str(path)+str(stock+str(".CSV")))
j=f.readlines()
j.pop(0) #To remove the "header" row
f.close()
filt_list_temp=filter(lambda x: (int(x.split(',')[1])> int(startDate)) ,j) # To fix DST bug
filt_list_temp=filter(lambda x: (int(x.split(',')[1])<= int(endDate)) ,filt_list_temp)
if not (filt_list_temp):
print str(stock.split('.')[0]) + " didn't exist in this period\n"
#ENHANCEMENT- CAN ALSO REMOVE STOCK FROM THE STOCKLIST
#This can not be done right now- because if a stock did not exist- but another stock did exist then NaNs have to be added to the stock that did not exist.
else:
#it existed and now we need the timestamps
filt_list_temp=map(lambda x:(x.split(',')[1]),filt_list_temp)
filt_list_temp= map(lambda item:(time.mktime(time.strptime(item,'%Y%m%d'))), filt_list_temp)
for item in filt_list_temp:
try:
tslist.index(int(item))
except:
tslist.append(int(item))
if (len(tslist)>0):
if (self.continueChecking(tslist, startDate, endDate)== False):
break #All dates are covered..
#for stock in stocks_list done
tslist.sort() #this should all fit into memory
for ts in tslist:
row= table.row
row['timestamp']= ts
#print "Adding timestamp " + str (ts)
row.append()
#for ts in tslist ends
table.flush()
h5f.close()
#makeTimestampsFile ends
def continueChecking(self, tsList, beginTS, endTS):
'''
@summary: This function basically checks if a day that we haven't found any trades on is a weekend. If so- we don't need to keep looking. The converter will work just fine even without this function- but it will take more time- because it will keep looking for timestamps that it is not going to find.
@bug: There is a Daylight savings time bug here too- but it won't adversely affect anything because DST always happens over the weekends! Though if the time change happens on a weekday sometime in the distant past/future this function may break.
'''
index=1
DAY=86400
while (index < len(tsList)):
if (int(tsList[index])- int(tsList[index -1]) > DAY):
tempTS= tsList[index-1] + DAY
while (tempTS< tsList[index]):
timeStruct= time.gmtime(tempTS)
if not ((timeStruct[6] == 5) or (timeStruct[6] == 6)):
#Keep looking
return True #if its not a Saturday or a Sunday then keep looking
tempTS+=DAY
#while (tempTS< tsList[index]) ends
index+=1
#while ends
#Checking from beginTS to start of list
tempTS=time.mktime(time.strptime(str(beginTS),'%Y%m%d'))
while (int(tsList[0])- int(tempTS) > DAY):
timeStruct= time.gmtime((tempTS))
if not ((timeStruct[6] == 5) or (timeStruct[6] == 6)):
return True
#if not... ends
tempTS+=DAY
#while (tsList[0]- tempTS > DAY) ends
#Checking from endTS to end of list
tempTS=time.mktime(time.strptime(str(endTS),'%Y%m%d'))
while (int(tempTS)- int(tsList[len(tsList)-1]) > DAY):
timeStruct= time.gmtime(tempTS)
if not ((timeStruct[6] == 5) or (timeStruct[6] == 6)):
return True
#if not... ends
tempTS+=DAY
#while (tempTS- tsList[len(tsList)-1] > DAY) ends
print "Smartly figured out that we don't need to continue"
return False #stop looking for more timestamps because all the timestamps that can be in the list are now there..
#we will not get any more timestamps by looking for more..because there just aren't any left... cool huh?
#continueChecking ends
def readTimestampsFromFile(self, fileName, beginTS, endTS):
h5f = pt.openFile(str(fileName), mode = "a")
fileIterator= h5f.root.timestamps.timestamps
tslist=[]
for row in fileIterator.iterrows():
temp= str(time.strftime("%Y%m%d", time.gmtime(row['timestamp'])))
if (temp>= str(beginTS)) and (temp <= str(endTS)):
tslist.append(temp)
if (temp > str(endTS)):
break
h5f.close()
self.timestamps=tslist
#readTimestampsFromFile ends
def keepHDFFilesInSyncWithCSV(self, listOfInputPaths, listOfOutputPaths):
'''
@summary: This function removes HDF files that correspond to CSV files that existed in the past- but don't exist anymore. Possibly because the stock was delisted or something like that.
'''
print "Removing HDF files for which there is no corresponding CSV file"
listOfListOfHdfFiles=self.getSymbols(listOfOutputPaths, ".h5")
listOfListOfCsvFiles=self.getSymbols(listOfInputPaths, ".csv") #I guess this isn't really necessary, we could just reuse the stock list or something
#but let's just keep things "proper"
ctr=-1
for listofHDFFiles in listOfListOfHdfFiles:
ctr+=1
for hdfFile in listofHDFFiles:
try:
#Check if the HDF file exists...
listOfListOfCsvFiles[ctr].index(hdfFile)
except:
print "Removing "+str(listOfOutputPaths[ctr]) + str(hdfFile)+".h5"
os.remove(str(listOfOutputPaths[ctr]) + str(hdfFile)+".h5")
#if ends
#for hdfFile in listOfListOfHdfFiles ends
#for listofHDFFiles in listOfListOfHdfFiles ends
print "Done removing HDF files (if any)"
#keepHDFFilesInSyncWithCSV done
if __name__ == "__main__":
'''
@attention: The HDF file containing the timestamps should not be in any of the output paths because, if it is, then it will be deleted at the end.
'''
print "Starting..."+ str(time.strftime("%H:%M:%S"))
parser = OptionParser()
args = parser.parse_args()[1]
endDate= args[0]
print "End date is: " + str (endDate)
#Date to start reading data Format: YYYYMMDD
startDate = 19840101
#Date to end reading data Format: YYYYMMDD
#endDate = 20100831
#The complete path to the file containing the list of timestamps. This should not be in the output folder because it will be removed by the keepHDFFilesInSyncWithCSV function!
timestampsFile="C:\\generated data files\\timestamp files\\timestamps.h5"
spd = StockPriceData()
#Remember the '\\' at the end...
listOfInputPaths= list()
listOfInputPaths.append("C:\\Trading data text\\Stocks\\Delisted Securities\\US Recent\\")
listOfInputPaths.append ("C:\\Trading data text\\Stocks\\US\\AMEX\\")
listOfInputPaths.append ("C:\\Trading data text\\Stocks\\US\\Delisted Securities\\")
listOfInputPaths.append ("C:\\Trading data text\\Stocks\\US\OTC\\")
listOfInputPaths.append ("C:\\Trading data text\\Stocks\\US\\NASDAQ\\")
listOfInputPaths.append ("C:\\Trading data text\\Stocks\\US\NYSE\\")
listOfInputPaths.append ("C:\\Trading data text\\Stocks\\US\\NYSE Arca\\")
listOfOutputPaths= list()
listOfOutputPaths.append("C:\\generated data files\\one stock per file\\maintain folder structure\\Delisted_US_Recent\\")
listOfOutputPaths.append("C:\\generated data files\\one stock per file\\maintain folder structure\\US_AMEX\\")
listOfOutputPaths.append("C:\\generated data files\\one stock per file\\maintain folder structure\\US_Delisted\\")
listOfOutputPaths.append("C:\\generated data files\\one stock per file\\maintain folder structure\OTC\\")
listOfOutputPaths.append("C:\\generated data files\\one stock per file\\maintain folder structure\\US_NASDAQ\\")
listOfOutputPaths.append("C:\\generated data files\\one stock per file\\maintain folder structure\\US_NYSE\\")
listOfOutputPaths.append("C:\\generated data files\\one stock per file\\maintain folder structure\\US_NYSE Arca\\")
#If the output paths don't exist, then create them...
for path in listOfOutputPaths:
if not (os.access(path, os.F_OK)):
#Path does not exist, so create it
os.makedirs(path)
#done making all output paths!
if (len(listOfInputPaths)!= len(listOfOutputPaths)):
print "No. of input paths not equal to the number of output paths.. quitting"
sys.exit("FAILURE")
listOfListOfStocks=spd.getSymbols(listOfInputPaths, ".csv")
if(endDate<startDate):
print "Error: enddate earlier than startdate"
sys.exit(0)
spd.makeOrUpdateTimestampsFile(timestampsFile, listOfListOfStocks, listOfInputPaths, startDate, endDate)
spd.readTimestampsFromFile(timestampsFile, startDate, endDate)
spd.getData(listOfListOfStocks, listOfInputPaths, startDate, endDate, listOfOutputPaths)
spd.keepHDFFilesInSyncWithCSV(listOfInputPaths, listOfOutputPaths)
print "All Done. Conversion from CSV to HDF5 is complete."
| StarcoderdataPython |
3331234 | import re
########
# PART 1
def extra_space(dimensions):
sortedDimensions = list(dimensions)
sortedDimensions.sort()
return sortedDimensions[0] * sortedDimensions[1]
def needed_paper(dimensions):
l = dimensions[0]
w = dimensions[1]
h = dimensions[2]
# 2*l*w + 2*w*h + 2*h*l
return 2*l*w + 2*w*h + 2*h*l + extra_space(dimensions)
########
# PART 2
def needed_ribbon(dimensions):
sortedDimensions = list(dimensions)
sortedDimensions.sort()
prod = 1
for i in sortedDimensions: prod *= i
return 2*sortedDimensions[0] + 2*sortedDimensions[1] + prod
def main():
with open('event2015/day02/input.txt') as f:
totalPaper = 0;
totalRibbon = 0;
for line in f:
dimensions = tuple(int(i) for i in re.match(r"(\d+)x(\d+)x(\d+)", line).groups())
totalPaper += needed_paper(dimensions)
totalRibbon += needed_ribbon(dimensions)
#print("%sx%sx%s=%s" % (dimensions + (neededPaper(dimensions),)))
print("Part 1 =", totalPaper)
assert totalPaper == 1598415 # check with accepted answer
print("Part 2 =", totalRibbon)
assert totalRibbon == 3812909 # check with accepted answer
main()
| StarcoderdataPython |
1657614 | #!/usr/bin/env python3
import connexion
import logging
from swagger_server import encoder
def create_app():
#logging.getLogger('connexion.operation').setLevel('ERROR')
app = connexion.App(__name__, specification_dir='./swagger/')
app.app.json_encoder = encoder.JSONEncoder
app.add_api('swagger.yaml', arguments={'title': 'Optimization framework service'})
return app
def main():
create_app().run(port=8080)
if __name__ == '__main__':
create_app() | StarcoderdataPython |
1608127 | <gh_stars>10-100
def linear_search(values, search_for):
search_at = 0
search_res = False
while search_at < len(values) and search_res is False:
if values[search_at] == search_for:
search_res = True
else:
search_at = search_at + 1
return search_res
l = [64, 34, 25, 12, 22, 11, 90]
print(linear_search(l, 12))
print(linear_search(l, 91))
| StarcoderdataPython |
1747467 | <filename>tests/test_job.py
from unittest.mock import patch
from digester.job import run
@patch('digester.job.get_recently_played')
@patch('digester.job.send_email')
def test_run(send_email, get_recently_played):
run()
get_recently_played.assert_called()
send_email.assert_called()
| StarcoderdataPython |
1784079 | <reponame>larsoner/genz-1<gh_stars>1-10
__version__ = '2.0.0.dev0+fa29bb7'
| StarcoderdataPython |
3291056 | import os
import pickle
import pandas as pd
from . import feature_selection
PATRIC_FILE_EXTENSION_TO_PGFAM_COL = {'.txt' : 'pgfam', '.tab' : 'pgfam_id'}
GENOME_ID = 'Genome ID'
LABEL = 'Label'
HP = 'HP'
NHP = 'NHP'
def read_merged_file(file_path):
"""
Reads genomes merged file into pd.Series object
Parameters
----------
file_path - path to a merged input *.fasta file
Returns
----------
pd.Series object that represents the all input genomes of the merged file
"""
genomes_order = []
genome_to_pgfams = {}
with open(file_path) as f:
genome_id = ''
for line in f:
if line.startswith('>'):
genome_id = line.strip()[1:]
genomes_order.append(genome_id)
else:
pgfam_id = line.strip()
genome_to_pgfams.setdefault(genome_id, []).append(pgfam_id)
genomes_pgfams = [' '.join(genome_to_pgfams[genome]) for genome in genomes_order]
return pd.Series(genomes_pgfams, index=genomes_order, dtype="string")
def read_genome_file(file_entry, pgfam_col):
"""
Reads a single genome file and returns its contained pgfams
Parameters
----------
file_entry - entry to an input genome file
Returns
----------
pd.Series object that represents all the input genomes in the directory
"""
pgfams = pd.read_csv(file_entry, usecols=[pgfam_col], sep='\t').dropna()
pgfams = ' '.join(list(pgfams[pgfam_col]))
return pgfams
def read_files_in_dir(dir_path):
"""
Reads all genomes *.txt/*.tab files in a directory into pd.Series object
Parameters
----------
dir_path - a path to an input directory with genome *.txt/*.tab files
Returns
----------
pd.Series object that represents all the input genomes in the directory
"""
genomes_ids = []
genomes_pgfams = []
with os.scandir(dir_path) as entries:
for entry in entries:
if entry.is_file():
for extension, pgfam_col in PATRIC_FILE_EXTENSION_TO_PGFAM_COL.items():
if entry.name.endswith(extension):
genome_id = entry.name.split(extension)[0]
pgfams = read_genome_file(entry, pgfam_col)
genomes_ids.append(genome_id)
genomes_pgfams.append(pgfams)
break
return pd.Series(genomes_pgfams, index=genomes_ids, dtype="string")
def read_genomes(path):
"""
Reads all genomes information from an input directory with genome *.txt/*.tab files or a merged input *.fasta file
Parameters
----------
path - a path to an input directory with genome *.txt/*.tab files or a merged input *.fasta file
Returns
----------
pd.Series object that represents all the input genomes in the directory
"""
if os.path.isdir(path):
return read_files_in_dir(path)
elif os.path.isfile(path):
return read_merged_file(path)
def read_labels(path):
"""
Reads csv file with labels from the given path
Parameters
----------
path - path to *.csv file with labels
Returns
----------
labels - series object with the genomes labels
"""
label_to_int = {HP: 1, NHP: 0, '1': 1, '0': 0}
labels_df = pd.read_csv(path, dtype=str).set_index(GENOME_ID)
labels = labels_df[LABEL].apply(lambda label: label_to_int.get(label.upper(), -1))
return labels
def load_model(model_path):
"""
Loads existing model from a model_path
Parameters
----------
model_path - path to the model file
Returns
----------
loaded model
"""
with open(model_path, 'rb') as f:
return pickle.load(f)
def load_model_str(data_str):
"""
Loads existing model from data_str
Parameters
----------
data_str - pickled representation data of the model
Returns
----------
loaded model
"""
return pickle.loads(data_str)
| StarcoderdataPython |
105123 | from django.core.management.base import BaseCommand, CommandError
from dashboard.models import Bin, Dataset
class Command(BaseCommand):
"""for testing only!!"""
help = 'delete all bins'
def add_arguments(self, parser):
parser.add_argument('-ds', '--dataset', type=str, help='name of dataset')
def handle(self, *args, **options):
ds_name = options.get('dataset')
if ds_name is not None:
ds = Dataset.objects.get(name=ds_name)
ds.bins.all().delete()
else:
Bin.objects.all().delete()
| StarcoderdataPython |
1797635 | from rest_framework import viewsets
from rest_framework.generics import ListAPIView
from django.shortcuts import get_object_or_404
from rest_framework.response import Response
from .models import Favourite, Category, Metadata
from .serializers import FavouriteSerializer, CategorySerializer, MetadataSerializer
class FavouriteViewSet(viewsets.ModelViewSet):
queryset = Favourite.objects.all()
serializer_class = FavouriteSerializer
class CategoryViewSet(viewsets.ModelViewSet):
queryset = Category.objects.all()
serializer_class = CategorySerializer
class CategoryFavouriteViewSet(ListAPIView):
def get(self, request, pk):
category = get_object_or_404(Category, pk=pk)
queryset = category.favourites.all()
data = FavouriteSerializer(queryset, many=True).data
return Response(data)
class MetadataViewSet(viewsets.ModelViewSet):
queryset = Metadata.objects.all()
serializer_class = MetadataSerializer
| StarcoderdataPython |
57247 | <gh_stars>0
# -*- coding:utf-8 -*-
__author__ = 'zhangzhibo'
__date__ = '202018/5/18 16:56'
| StarcoderdataPython |
1780984 | <reponame>anasf97/drug_learning
import drug_learning.two_dimensions.Input.fingerprints as fp
def sdf_to_fingerprint(input_file, fp_list, format_dict, urdkit_voc=None):
for fp_class in fp_list:
if fp_class:
if fp_class == fp.UnfoldedRDkitFP:
fingerprint = fp_class(urdkit_voc)
else:
fingerprint = fp_class()
fingerprint.fit(input_file)
fingerprint.transform()
fingerprint.save(**format_dict)
| StarcoderdataPython |
3272917 |
import sys
import subprocess as sp
import networkx as nx
import os
from itertools import combinations
import glob
from matplotlib import pyplot as plt
import numpy as np
from matplotlib_venn import venn3, venn3_circles
file_list=sorted(glob.glob('/home/fast2/onimaru/DeepGMAP-dev/data/predictions/quick_benchmark/bed_comp_50_es/*'))
file_combination=[]
node_list=[]
peak_counts={}
path_sep=os.path.sep
peak_count={}
peak_count_dict={}
for i in file_list:
with open(i, 'r') as j:
peak_count=len(j.readlines())
file_name=i.split(path_sep)
file_name=file_name[-1].split('.')
node1=file_name[0]
peak_counts[node1]=peak_count
node_list.append(node1)
ABout=open('./intersectAB.bed', 'w')
sp.check_call(["bedtools", "intersect","-u","-F","1.0","-f","1.0", "-a", str(file_list[0]), "-b", str(file_list[1])], stdout=ABout)
ABout.close()
fAB=open('./intersectAB.bed', 'r')
AB=len(fAB.readlines())
fAB.close()
#print AB, peak_counts[node_list[0]]
ABout_=open('./intersectAB_.bed', 'w')
sp.check_call(["bedtools", "intersect","-u","-F","1.0","-f","1.0", "-a", str(file_list[1]), "-b", str(file_list[0])], stdout=ABout_)
ABout_.close()
fAB_=open('./intersectAB_.bed', 'r')
AB_=len(fAB_.readlines())
fAB_.close()
#print AB_, peak_counts[node_list[1]]
if AB>AB_:
AB=AB_
ACout=open('./intersectAC.bed', 'w')
sp.check_call(["bedtools", "intersect","-u","-F","1.0","-f","1.0", "-a", str(file_list[0]), "-b", str(file_list[2])], stdout=ACout)
ACout.close()
fAC=open('intersectAC.bed', 'r')
AC=len(fAC.readlines())
fAC.close()
#print AC, peak_counts[node_list[2]]
ACout_=open('./intersectAC_.bed', 'w')
sp.check_call(["bedtools", "intersect","-u","-F","1.0","-f","1.0", "-a", str(file_list[2]), "-b", str(file_list[0])], stdout=ACout_)
ACout_.close()
fAC_=open('intersectAC_.bed', 'r')
AC_=len(fAC_.readlines())
fAC_.close()
#print AC_
if AC>AC_:
AC=AC_
BCout=open('./intersectBC.bed', 'w')
sp.check_call(["bedtools", "intersect","-u","-F","1.0","-f","1.0", "-a", str(file_list[2]), "-b", str(file_list[1])], stdout=BCout)
BCout.close()
fBC=open('intersectBC.bed', 'r')
BC=len(fBC.readlines())
fBC.close()
#print BC
BCout_=open('./intersectBC_.bed', 'w')
sp.check_call(["bedtools", "intersect","-u","-F","1.0","-f","1.0", "-a", str(file_list[1]), "-b", str(file_list[2])], stdout=BCout_)
BCout_.close()
fBC_=open('intersectBC_.bed', 'r')
BC_=len(fBC_.readlines())
fBC_.close()
#print BC_
if BC>BC_:
BC=BC_
ABCout=open('./intersectABC.bed', 'w')
sp.check_call(["bedtools", "intersect","-u","-F","1.0","-f","1.0", "-a", 'intersectAB.bed', "-b", str(file_list[2])],stdout=ABCout)
ABCout.close()
fABC=open('intersectABC.bed', 'r')
ABC=len(fABC.readlines())
fABC.close()
ABCout_=open('./intersectABC_.bed', 'w')
sp.check_call(["bedtools", "intersect","-u","-F","1.0","-f","1.0", "-b", 'intersectAB.bed', "-a", str(file_list[2])],stdout=ABCout_)
ABCout_.close()
fABC_=open('intersectABC_.bed', 'r')
ABC_=len(fABC_.readlines())
fABC_.close()
if ABC>ABC_:
ABC=ABC_
Abc=peak_counts[node_list[0]]-AB-AC+ABC
ABc=AB-ABC
AbC=AC-ABC
aBc=peak_counts[node_list[1]]-AB-BC+ABC
aBC=BC-ABC
abC=peak_counts[node_list[2]]-AC-BC+ABC
plt.figure(figsize=(4,4))
v = venn3(subsets=(Abc, aBc, ABc, abC, AbC, aBC, ABC), set_labels = (node_list[0], node_list[1], node_list[2]))
v.get_patch_by_id('100').set_alpha(1.0)
plt.title("Venn diagram")
plt.show()
| StarcoderdataPython |
154563 | from abc import abstractmethod, ABC
import torch
from dpm.distributions import (
Distribution, Normal, Data,
GumbelSoftmax, ConditionalModel,
Categorical
)
from dpm.distributions import MixtureModel
from dpm.train import train
from dpm.criterion import cross_entropy, ELBO
from torch.nn import Softmax, ModuleList
from functools import partial
import numpy as np
class GaussianMixtureModel(Distribution):
def __init__(self, n_components=2, n_dims=1):
super().__init__()
self.n_components = n_components
self.n_dims = n_dims
self.model = MixtureModel([Normal(torch.randn(n_dims), torch.eye(n_dims))
for _ in range(n_components)],
[1.0 / n_components for _ in range(n_components)])
def log_prob(self, value):
return self.model.log_prob(value)
def sample(self, batch_size):
return self.model.sample(batch_size)
def fit(self, x, **kwargs):
data = Data(x)
stats = train(data, self.model, cross_entropy, **kwargs)
return stats
def predict(self, x):
log_probs = torch.stack([sub_model.log_prob(x)
for sub_model in self.model.models])
_, labels = log_probs.max(dim=0)
return labels
class VariationalCategorical(ConditionalModel):
has_latents = True
def __init__(self, conditional_kwargs={}):
preset_kwargs = {'input_dim':1, 'hidden_sizes':[24, 24], 'activation':'ReLU',
'output_shapes':[2], 'output_activations':[Softmax(dim=-1)],
'distribution':partial(GumbelSoftmax, temperature=1.0,
hard=True, learnable=False)}
preset_kwargs.update(conditional_kwargs)
super().__init__(**preset_kwargs)
class VariationalGaussianMixtureModel(Distribution):
has_latents = True
def __init__(self, n_components=2, n_dims=1, variational_kwargs={}, elbo_kwargs={}):
super().__init__()
self.n_components = n_components
self.n_dims = n_dims
self.normals = ModuleList([Normal(torch.randn(n_dims), torch.eye(n_dims))
for _ in range(n_components)])
variational_kwargs.update({'input_dim':n_dims,
'output_shapes':[n_components]})
self.variational_kwargs = variational_kwargs
self.elbo_kwargs = elbo_kwargs
self.categorical = VariationalCategorical(variational_kwargs)
self.criterion = ELBO(self.categorical, **elbo_kwargs)
self.prior = Categorical([1.0 / n_components for _ in range(n_components)],
learnable=False)
def log_prob(self, X, Z=None, n_iter=10):
if Z is None:
# Z = self.categorical.sample(X.expand(n_iter, *X.shape))
# print(Z.shape)
# raise ValueError()
Z = self.categorical.sample(X)
for _ in range(n_iter - 1):
Z = Z + self.categorical.sample(X)
Z = Z / n_iter
latent_probs = self.prior.log_prob(Z)
log_probs = torch.stack([sub_model.log_prob(X)
for sub_model in self.normals], dim=1)
return (log_probs * Z).sum(dim=-1) + latent_probs
def sample(self, batch_size):
indices = self.prior.sample(batch_size).view(-1).long()
samples = torch.stack([sub_model.sample(batch_size)
for sub_model in self.normals])
return samples[indices, np.arange(batch_size)]
def fit(self, x, **kwargs):
data = Data(x)
return train(data, self, self.criterion, **kwargs)
def predict(self, x):
log_probs = torch.stack([sub_model.log_prob(x)
for sub_model in self.normals])
_, labels = log_probs.max(dim=0)
return labels
def parameters(self):
for name, param in self.named_parameters(recurse=True):
if 'categorical' in name:
continue
yield param
# EOF
| StarcoderdataPython |
1660466 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if len(lists) == 0:
return
head = ListNode(0)
new_list = head
K = len(lists)
res = lists[0]
for i in range(1, K):
res = self.mergeTwoLists(res, lists[i])
return res
def mergeTwoLists(self, l1, l2):
"""
:type l1: ListNode
:type l2: ListNode
:rtype: ListNode
"""
new_head = ListNode(0)
new_list = new_head
while l1 and l2:
if l1.val < l2.val:
new_head.next = l1
l1 = l1.next
else:
new_head.next = l2
l2 = l2.next
new_head = new_head.next
if l1:
new_head.next = l1
if l2:
new_head.next = l2
return new_list.next
# 12.10%; 5,508ms.
| StarcoderdataPython |
1661850 | <filename>0001 Two Sum.py
# https://leetcode.com/problems/two-sum/
# brute force
# O(n^2) time | O(1) space
# Brute Force -- O(n^2) time | O(1) space
def twoSum(array, targetSum):
for i in range(len(array) - 1):
first_num = array[i]
for j in range(i + 1, len(array)):
second_num = array[j]
if first_num + second_num == targetSum:
return [first_num, second_num]
return []
# Two-pass Hash Table
# O(n) time | O(n) space
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
hashmap = {}
for i in range(len(nums)):
hashmap[nums[i]] = i
for i in range(len(nums)):
complement = target - nums[i]
if complement in hashmap and hashmap[complement] != i:
return [i, hashmap[complement]]
# One-pass Hash Table
# O(n) time | O(n) space
class Solution:
def twoSum(self, nums: List[int], target: int) -> List[int]:
# x + y = target
# nested for loops
seen = {}
# loop nums
for i in range(len(nums)):
complement = target - nums[i]
# check if nums[i] is in seen, use y = target - x
if complement in seen:
return [seen[complement], i]
# if not --> add it to seen
else:
seen[nums[i]] = i | StarcoderdataPython |
1788384 | from __future__ import absolute_import
# flake8: noqa
# import apis into api package
from swagger_client.api.core_employees_v2_api import CoreEmployeesV2Api
from swagger_client.api.core_me_api import CoreMeApi
| StarcoderdataPython |
3258713 | # Copyright (c) Lawrence Livermore National Security, LLC and other VisIt
# Project developers. See the top-level LICENSE file for dates and other
# details. No copyright assignment is required to contribute to VisIt.
"""
file: __init__.py
author: <NAME> <<EMAIL>>
created: 3/28/2012
description:
Init for 'visit_flow_vpe'.
"""
from .visit_flow_vpe import *
| StarcoderdataPython |
3213451 | <reponame>jiangdou2015/blog
from django.shortcuts import render_to_response, get_object_or_404
from djpjax import pjax
from blogpost.models import Blogpost
def index(request):
return render_to_response('index.html', {
'posts': Blogpost.objects.all()[:5]
})
@pjax(pjax_template="pjax.html", additional_templates={"#pjax-inner-content": "pjax_inner.html"})
def view_post(request, slug):
return render_to_response('blogpost_detail.html', {
'post': get_object_or_404(Blogpost, slug=slug)
})
| StarcoderdataPython |
78728 | <reponame>tbsschroeder/dbas
import dbas.handler.issue as ih
from dbas.database import DBDiscussionSession
from dbas.database.discussion_model import Issue, User, Language
from dbas.strings.translator import Translator
from dbas.tests.utils import construct_dummy_request, TestCaseWithConfig
class TestIssueDictByIssue(TestCaseWithConfig):
def test_get_issue_dict_for(self):
lang = 'en'
issue_cat: Issue = DBDiscussionSession.query(Issue).get(2)
response = ih.get_issue_dict_for(issue_cat,
issue_cat.uid, lang)
self.assertTrue(len(response) > 0)
self.assertTrue(len(response['error']) == 0)
class IssueHandlerTests(TestCaseWithConfig):
def test_set_issue(self):
db_lang = DBDiscussionSession.query(Language).filter_by(ui_locales='en').first()
info = 'infoinfoinfo'
long_info = 'long_infolong_infolong_info'
title = 'titletitletitle'
response = ih.set_issue(self.user_tobi, info, long_info, title, db_lang, False, False)
self.assertTrue(len(response['issue']) >= 0)
DBDiscussionSession.query(Issue).filter_by(title=title).delete()
def test_prepare_json_of_issue(self):
response = ih.prepare_json_of_issue(self.issue_town, self.user_anonymous)
self.assertTrue(len(response) > 0)
def test_get_number_of_arguments(self):
response = ih.get_number_of_arguments(0)
self.assertTrue(response == 0)
response = ih.get_number_of_arguments(1)
self.assertTrue(response > 0)
def test_get_number_of_statements(self):
response = ih.get_number_of_statements(0)
self.assertTrue(response == 0)
response = ih.get_number_of_statements(1)
self.assertTrue(response > 0)
def test_get_number_of_active_participation_users(self):
response = ih.get_number_of_authors(8)
self.assertTrue(response == 0)
response = ih.get_number_of_authors(2)
self.assertTrue(response > 0)
response = ih.get_number_of_authors(4)
self.assertTrue(response > 0)
def test_get_id_of_slug(self):
queried_issue = ih.get_id_of_slug(self.issue_cat_or_dog.slug)
self.assertEqual(queried_issue.uid, self.issue_cat_or_dog.uid)
def test_get_issue_id(self):
request = construct_dummy_request(matchdict={'issue': 1})
issue1 = ih.get_issue_id(request)
self.assertEqual(issue1, 1)
request = construct_dummy_request(params={'issue': 2})
issue2 = ih.get_issue_id(request)
self.assertEqual(issue2, 2)
request = construct_dummy_request(session={'issue': 3})
issue3 = ih.get_issue_id(request)
self.assertEqual(issue3, 3)
request = construct_dummy_request(json_body={'issue': 4})
issue4 = ih.get_issue_id(request)
self.assertEqual(issue4, 4)
def test_get_title_for_slug(self):
queried_title = ih.get_title_for_slug(self.issue_cat_or_dog.slug)
self.assertEqual(queried_title, self.issue_cat_or_dog.title)
def test_get_issues_overview(self):
response = ih.get_issues_overview_for(self.user_tobi, 'http://test.url')
self.assertIn('user', response)
self.assertIn('other', response)
self.assertTrue(len(response['user']) > 0)
self.assertTrue(len(response['other']) == 0)
response = ih.get_issues_overview_for(self.user_christian, 'http://test.url')
self.assertIn('user', response)
self.assertIn('other', response)
self.assertTrue(len(response['user']) == 0)
self.assertTrue(len(response['other']) > 0)
def test_get_issues_overview_on_start(self):
response = ih.get_issues_overview_on_start(self.user_tobi)
self.assertIn('issues', response)
self.assertIn('readable', response['issues'])
self.assertIn('writable', response['issues'])
self.assertIn('data', response)
def test_set_discussions_properties(self):
db_walter = DBDiscussionSession.query(User).filter_by(nickname='Walter').one_or_none()
issue_slug = 'cat-or-dog'
db_issue = DBDiscussionSession.query(Issue).filter_by(slug=issue_slug).one()
translator = Translator('en')
enable = True
response = ih.set_discussions_properties(db_walter, db_issue, enable, 'somekeywhichdoesnotexist', translator)
self.assertTrue(len(response['error']) > 0)
db_christian = DBDiscussionSession.query(User).filter_by(nickname='Christian').one_or_none()
response = ih.set_discussions_properties(db_christian, db_issue, enable, 'somekeywhichdoesnotexist', translator)
self.assertTrue(len(response['error']) > 0)
response = ih.set_discussions_properties(db_christian, db_issue, enable, 'somekeywhichdoesnotexist', translator)
self.assertTrue(len(response['error']) > 0)
db_tobias = DBDiscussionSession.query(User).filter_by(nickname='Tobias').one_or_none()
response = ih.set_discussions_properties(db_tobias, db_issue, enable, 'enable', translator)
self.assertTrue(len(response['error']) == 0)
self.assertTrue(DBDiscussionSession.query(Issue).filter_by(slug=issue_slug).one().is_disabled is False)
enable = False
response = ih.set_discussions_properties(db_tobias, db_issue, enable, 'enable', translator)
self.assertTrue(len(response['error']) == 0)
self.assertTrue(DBDiscussionSession.query(Issue).filter_by(slug=issue_slug).one().is_disabled is True)
enable = True
response = ih.set_discussions_properties(db_tobias, db_issue, enable, 'enable', translator)
self.assertTrue(len(response['error']) == 0)
self.assertTrue(DBDiscussionSession.query(Issue).filter_by(slug=issue_slug).one().is_disabled is False)
| StarcoderdataPython |
1615664 | <filename>models/fs_networks.py
"""
Copyright (C) 2019 NVIDIA Corporation. All rights reserved.
Licensed under the CC BY-NC-SA 4.0 license (https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
"""
import torch
import torch.nn as nn
class InstanceNorm(nn.Module):
def __init__(self, epsilon=1e-8):
"""
@notice: avoid in-place ops.
https://discuss.pytorch.org/t/encounter-the-runtimeerror-one-of-the-variables-needed-for-gradient-computation-has-been-modified-by-an-inplace-operation/836/3
"""
super(InstanceNorm, self).__init__()
self.epsilon = epsilon
def forward(self, x):
x = x - torch.mean(x, (2, 3), True)
tmp = torch.mul(x, x) # or x ** 2
tmp = torch.rsqrt(torch.mean(tmp, (2, 3), True) + self.epsilon)
return x * tmp
class ApplyStyle(nn.Module):
"""
@ref: https://github.com/lernapparat/lernapparat/blob/master/style_gan/pytorch_style_gan.ipynb
"""
def __init__(self, latent_size, channels):
super(ApplyStyle, self).__init__()
self.linear = nn.Linear(latent_size, channels * 2)
def forward(self, x, latent):
style = self.linear(latent) # style => [batch_size, n_channels*2]
shape = [-1, 2, x.size(1), 1, 1]
style = style.view(shape) # [batch_size, 2, n_channels, ...]
#x = x * (style[:, 0] + 1.) + style[:, 1]
x = x * (style[:, 0] * 1 + 1.) + style[:, 1] * 1
return x
class ResnetBlock_Adain(nn.Module):
def __init__(self, dim, latent_size, padding_type, activation=nn.ReLU(True)):
super(ResnetBlock_Adain, self).__init__()
p = 0
conv1 = []
if padding_type == 'reflect':
conv1 += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv1 += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv1 += [nn.Conv2d(dim, dim, kernel_size=3, padding = p), InstanceNorm()]
self.conv1 = nn.Sequential(*conv1)
self.style1 = ApplyStyle(latent_size, dim)
self.act1 = activation
p = 0
conv2 = []
if padding_type == 'reflect':
conv2 += [nn.ReflectionPad2d(1)]
elif padding_type == 'replicate':
conv2 += [nn.ReplicationPad2d(1)]
elif padding_type == 'zero':
p = 1
else:
raise NotImplementedError('padding [%s] is not implemented' % padding_type)
conv2 += [nn.Conv2d(dim, dim, kernel_size=3, padding=p), InstanceNorm()]
self.conv2 = nn.Sequential(*conv2)
self.style2 = ApplyStyle(latent_size, dim)
def forward(self, x, dlatents_in_slice):
y = self.conv1(x)
y = self.style1(y, dlatents_in_slice)
y = self.act1(y)
y = self.conv2(y)
y = self.style2(y, dlatents_in_slice)
out = x + y
return out
class Generator_Adain_Upsample(nn.Module):
def __init__(self, input_nc, output_nc, latent_size, n_blocks=6, deep=False,
norm_layer=nn.BatchNorm2d,
padding_type='reflect'):
assert (n_blocks >= 0)
super(Generator_Adain_Upsample, self).__init__()
activation = nn.ReLU(True)
self.deep = deep
self.first_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(input_nc, 64, kernel_size=7, padding=0),
norm_layer(64), activation)
### downsample
self.down1 = nn.Sequential(nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
norm_layer(128), activation)
self.down2 = nn.Sequential(nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),
norm_layer(256), activation)
self.down3 = nn.Sequential(nn.Conv2d(256, 512, kernel_size=3, stride=2, padding=1),
norm_layer(512), activation)
if self.deep:
self.down4 = nn.Sequential(nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1),
norm_layer(512), activation)
### resnet blocks
BN = []
for i in range(n_blocks):
BN += [
ResnetBlock_Adain(512, latent_size=latent_size, padding_type=padding_type, activation=activation)]
self.BottleNeck = nn.Sequential(*BN)
if self.deep:
self.up4 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(512), activation
)
self.up3 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(512, 256, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(256), activation
)
self.up2 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(256, 128, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(128), activation
)
self.up1 = nn.Sequential(
nn.Upsample(scale_factor=2, mode='bilinear'),
nn.Conv2d(128, 64, kernel_size=3, stride=1, padding=1),
nn.BatchNorm2d(64), activation
)
self.last_layer = nn.Sequential(nn.ReflectionPad2d(3), nn.Conv2d(64, output_nc, kernel_size=7, padding=0),
nn.Tanh())
def forward(self, input, dlatents):
x = input # 3*224*224
skip1 = self.first_layer(x)
skip2 = self.down1(skip1)
skip3 = self.down2(skip2)
if self.deep:
skip4 = self.down3(skip3)
x = self.down4(skip4)
else:
x = self.down3(skip3)
for i in range(len(self.BottleNeck)):
x = self.BottleNeck[i](x, dlatents)
if self.deep:
x = self.up4(x)
x = self.up3(x)
x = self.up2(x)
x = self.up1(x)
x = self.last_layer(x)
x = (x + 1) / 2
return x
class Discriminator(nn.Module):
def __init__(self, input_nc, norm_layer=nn.BatchNorm2d, use_sigmoid=False):
super(Discriminator, self).__init__()
kw = 4
padw = 1
self.down1 = nn.Sequential(
nn.Conv2d(input_nc, 64, kernel_size=kw, stride=2, padding=padw), nn.LeakyReLU(0.2, True)
)
self.down2 = nn.Sequential(
nn.Conv2d(64, 128, kernel_size=kw, stride=2, padding=padw),
norm_layer(128), nn.LeakyReLU(0.2, True)
)
self.down3 = nn.Sequential(
nn.Conv2d(128, 256, kernel_size=kw, stride=2, padding=padw),
norm_layer(256), nn.LeakyReLU(0.2, True)
)
self.down4 = nn.Sequential(
nn.Conv2d(256, 512, kernel_size=kw, stride=2, padding=padw),
norm_layer(512), nn.LeakyReLU(0.2, True)
)
self.conv1 = nn.Sequential(
nn.Conv2d(512, 512, kernel_size=kw, stride=1, padding=padw),
norm_layer(512),
nn.LeakyReLU(0.2, True)
)
if use_sigmoid:
self.conv2 = nn.Sequential(
nn.Conv2d(512, 1, kernel_size=kw, stride=1, padding=padw), nn.Sigmoid()
)
else:
self.conv2 = nn.Sequential(
nn.Conv2d(512, 1, kernel_size=kw, stride=1, padding=padw)
)
def forward(self, input):
out = []
x = self.down1(input)
out.append(x)
x = self.down2(x)
out.append(x)
x = self.down3(x)
out.append(x)
x = self.down4(x)
out.append(x)
x = self.conv1(x)
out.append(x)
x = self.conv2(x)
out.append(x)
return out | StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.