id stringlengths 1 265 | text stringlengths 6 5.19M | dataset_id stringclasses 7
values |
|---|---|---|
177970 |
# implementing RNN and LSTM
# %%
import pandas as pd
import numpy as np
import nltk
import sklearn
import matplotlib.pyplot as plt
import re
import tqdm
twitter_df = pd.read_csv('twitter_train.csv')
twitter_df = twitter_df.fillna('0')
twitter_df_test = pd.read_csv('twitter_test.csv')
twitter_df_test = twitter_df_test.fillna('0')
twitter_df = twitter_df.drop('location', axis = 1)
import json
"""with open('contractions.json', "w") as f:
json.dump(contractions, f)"""
with open('abbrevations.json') as f:
abbrevation = json.load(f)
# importing required libraries for RNN
import tensorflow as tf;
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
from nltk.corpus import stopwords
from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
# nltk.download('wordnet')
print("Tensorflow version", tf.__version__)
# preprocessing include lemmatizing, stemming, stopwords removal
# tokenizing, pad_sequences
# %%
lemmatizer = WordNetLemmatizer()
stop_words = stopwords.words('english')
stemmer = PorterStemmer()
from nltk import TweetTokenizer
wt = TweetTokenizer()
# Cleaning means to extract the useful information from the text data and removing those
# data that does not contribute to the LSTM and RNN learnings.
# The clean_text function lower cases the input text, removes the tags and mentions
# expands the contractions, can deal with emojis, non alphabets.
# It also removes the stop words and Lemmatizes the word to its root word.
def text_clean(text, abbrevations=abbrevation, stemmer=False):
# lower casing
text = text.lower()
for word in text.split(): # use the abbrevations dictionary to replace
if word in abbrevations.keys():
text = text.replace(word, abbrevations[word])
# removing URL, tags, non-alphabets, new-lines
text = re.sub(r'(https?://\S+|www\.\S+)', '', text) # removing http links
text = re.sub(r'@([a-zA-Z0-9:_]+)\s', '', text) # removing the hash tags and mentions
text = re.sub('[^\w\s]', ' ', text) # remove anything except words and spaces.
text = re.sub('\d', ' ', text) # removing digits
# text = re.sub('\b[a-z]{1,2}\b', ' ', text) # all words with 3 or less characters
text = re.sub('\n', ' ', text) # new line phrase
# Lemmatising and removing Stop_words
extended_stop_words_re = stop_words + ['&','rt','th','co', 're','ve','kim','daca','p.m.','retweet', 'ir']
if not stemmer:
text = ' '.join([lemmatizer.lemmatize(word) for word in text.split() if (not word in extended_stop_words_re)])
else:
text = ' '.join([stemmer.stem(word) for word in text.split() if (not word in extended_stop_words_re)])
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
"]+", flags=re.UNICODE)
text = emoji_pattern.sub(r'', text)
return text
# further cleaning is needed.
# tekenization of the cleaned text.
cleaned1 = lambda text: text_clean(text)
cleaned2 = lambda text: re.sub('\s[a-z]{,3}\s', ' ', text)
cleaned3 = lambda row: wt.tokenize(row) # tokenizing the row
# escented characters
# expanding contractions with the words in the contractions dictionary
# stemming, lemmatizing
# negated words can be used to get the sentiment
# extra spaces shouls also be removed
# %%
# Cleaning the input text with help of utility functions
twitter_df['cleaned_text'] = twitter_df['text'].apply(cleaned1)
twitter_df['cleaned_text_'] = twitter_df['cleaned_text'].apply(cleaned2)
twitter_df['tokenized_text'] = twitter_df['cleaned_text_'].apply(cleaned3)
twitter_df_test['cleaned_text'] = twitter_df_test['text'].apply(cleaned1)
twitter_df_test['cleaned_text_'] = twitter_df_test['cleaned_text'].apply(cleaned2)
twitter_df_test['tokenized_text'] = twitter_df_test['cleaned_text_'].apply(cleaned3)
# twitter_df[['id','text','cleaned_text','target']].iloc[50:110]
length = []
length = twitter_df.cleaned_text.apply(lambda x: len(x.split()))
np.max(length)
train_text = twitter_df.cleaned_text
train_label = twitter_df.target
test_text = twitter_df_test.cleaned_text
oov_tok = '<oov>'
padding_type = 'post'
trun_type = 'post'
max_length = 25
# %%
tokenizer = Tokenizer(num_words = 10000, oov_token = oov_tok)
tokenizer.fit_on_texts(train_text)
word_index = tokenizer.word_index
train_sequences = tokenizer.texts_to_sequences(train_text)
print("train sequences sample", train_sequences[10])
test_sequences = tokenizer.texts_to_sequences(test_text)
print("test sequences sample", test_sequences[1])
train_padded = pad_sequences(train_sequences, maxlen=max_length, padding=padding_type, truncating=trun_type)
test_padded = pad_sequences(test_sequences, maxlen=max_length, padding=padding_type, truncating=trun_type)
vocab_size = len(word_index) + 1
#
label_tokenizer = Tokenizer()
# %%
from gensim.models.word2vec import Word2Vec
embedding_index = {}
# for text in twitter_df:
# embedding = model.infer_vector(text)
# %%
# utility function to return the string of a list of numbers.
# this will be the input to the tokenizer methods
def return_str(text):
return text.apply(lambda x: str(x))
label_tokenizer.fit_on_texts(return_str(twitter_df.target))
train_label_seq = np.array(label_tokenizer.texts_to_sequences(return_str(train_label)))
print(train_label_seq.shape)
train_label_seq = train_label.values.reshape(-1,1)
# trying to explore the original tweet and tweet after padding
reverse_word_index = dict([(index, word) for word, index in word_index.items()])
def decode_article(text):
return ' '.join([reverse_word_index.get(i, '?') for i in text])
print(decode_article(train_padded[10]))
print('-----------------')
print(train_text.iloc[10])
# %%
from gensim.models.word2vec import Word2Vec
vector_size = 100
wv = Word2Vec(sentences=twitter_df.tokenized_text,size = vector_size, iter = 50)
from collections import Counter
embedding_matrix = np.zeros((vocab_size, vector_size))
# word_index.pop("''")
for word, index in word_index.items():
try:
embedding_vector = wv[word]
if embedding_vector is not None:
embedding_matrix[index] = embedding_vector
except:
pass
# %%
embedding_dim = 100
# building the Model Architecture
from tensorflow.keras import layers # importing Dense, Bidirectional, LSTM, Dropout, Embedding
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.optimizers import Adam, SGD, Nadam
# Word Embeddings are the inputs to the neural networks.
def create_embeddings(vocab_size, embedding_dim,):
return layers.Embedding(vocab_size, embedding_dim, embeddings_initializer='GlorotNormal', weights = [embedding_matrix])
# Utility function to get fully connected dense Layers
def create_dense(in_dim = embedding_dim, activ_in = 'relu'):
return layers.Dense(in_dim, activation = activ_in, use_bias = True)
# Create LSTM layers
def get_LSTM(embedding_dim, Bi_directional = True, dropout=0.2):
if Bi_directional:
layer = layers.Bidirectional(layers.LSTM(embedding_dim, recurrent_dropout = 0.3, dropout=dropout))
else:
layer = LSTM(embedding_dim, recurrent_dropout = 0.3, dropout=dropout)
return layer
# Dropout can be represented as one of the core layers.
# They handle overfitting of the neural networks by allowing all the nodes to learn the weights
# Lot of fine tuning can be done to the Dropout layer.
def drop_out(dropout_rate = 0.2):
dp = layers.Dropout(dropout_rate)
return dp
def get_seq_model(in_dim = embedding_dim,
out_class=2,
optimizers_ = 'sgd',
learning_rate = 0.000083,
activ_out = 'softmax'):
# frees up GPU memory everytime the code is run fresh
tf.keras.backend.clear_session()
model = tf.keras.Sequential([
# adding embedding layer, expecting input vocab size of 5000
create_embeddings(vocab_size, embedding_dim),
# adding Dropout layer
drop_out(0.3),
# Bi-Directional LSTM
get_LSTM(embedding_dim, dropout = 0.3),
# Fully connected dense layers
create_dense(embedding_dim),
# adding Dropout layer
drop_out(0.3),
# Fully connected dense layers
create_dense(embedding_dim),
# adding Dropout layer
drop_out(0.3),
# the final output layer
layers.Dense(out_class, activation = 'softmax', use_bias = True)
])
# Model fitting and defining callbacks
if optimizers_.lower() == "adam":
opt = Adam(lr=learning_rate)
elif optimizers_.lower() == "sgd":
opt = SGD(lr=learning_rate)
else:
opt = Nadam(lr=learning_rate)
model.compile(loss='sparse_categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
return model
# %%
# creating the model
num_epochs=30
model = get_seq_model(learning_rate=0.01)
# printing model summary to console
print(model.summary())
# callbacks stop the traiing after predefined patience
early_stopping = EarlyStopping(monitor='val_accuracy', patience=2)
# training the model
result = model.fit(train_padded, train_label_seq,
epochs=num_epochs,
# callbacks = [early_stopping],
verbose=2, validation_split=0.2
)
def plot_graphs(history, string, save = False):
plt.plot(result.history[string])
plt.plot(result.history['val_'+string])
plt.xlabel("Epochs")
plt.ylabel(string)
plt.legend([string, 'val_'+string])
title = "Bi-LSTM " + string
if save:
plt.savefig(title)
plt.show()
plot_graphs(result, "accuracy")
plot_graphs(result, "loss")
# %%
import csv
id_1 = twitter_df_test.id
def save_pred(model, id_ = id_1, name_ = "name_1.csv", vectors_ = test_padded):
predict = model.predict_classes(vectors_)
# checking if the predictions are correct format vector
assert len(predict) == 3263
# the file is saved in the current folder
with open(name_, 'w', newline='\n') as f:
writer = csv.writer(f)
writer.writerow(['id', 'target'])
for id_, target in zip(id_, predict):
writer.writerow([id_, target])
save_pred(model, id_=id_1,name_='rnn_bilstm_1.csv')
# %%
| StarcoderdataPython |
1742575 | <reponame>skimbrel/strowger<gh_stars>1-10
import re
from mock import Mock, patch
from nose.tools import assert_equal
from nose.tools import assert_is_instance
from unittest import TestCase
from flask import request
from twilio import twiml
from strowger import switch
TEST_TWILIO_REQUEST_DATA = {
'From': '+14155551234',
'To': '+14158675309',
'NumMedia': '0',
'MessageSid': 'MM{}'.format('a' * 34),
'AccountSid': 'AC{}'.format('a' * 34),
}
class SwitchTestCase(TestCase):
def setUp(self):
self.flask_patcher = patch('strowger.switch.Flask')
self.m_flask = self.flask_patcher.start()
self.map_patcher = patch('strowger.switch.Map')
self.m_map_class = self.map_patcher.start()
def tearDown(self):
self.map_patcher.stop()
self.flask_patcher.stop()
def test_init(self):
m_flask_app = Mock()
self.m_flask.return_value = m_flask_app
m_map = Mock()
self.m_map_class.return_value = m_map
s = switch.Switch(__name__)
assert_equal(s.app, m_flask_app)
m_flask_app.route.assert_called_with('/', methods=['GET', 'POST'])
assert_equal(s.mapping, m_map)
m_handler = Mock()
s = switch.Switch(__name__, default_handler=m_handler)
self.m_map_class.assert_called_with(default_handler=m_handler)
class SwitchWithFlaskAppTestCase(TestCase):
def setUp(self):
self.s = switch.Switch(__name__)
self.s.app.testing = True
self.app = self.s.app.test_client()
def test_basic_handler(self):
@self.s.connect('foobar')
def _verify_handler(request, response):
assert_equal(request.from_number, '+14155551234')
assert_equal(request.to_number, '+14158675309')
assert_equal(request.message_body, 'foobarbaz')
assert_equal(request.media_count, 0)
assert_is_instance(response, twiml.Response)
response.message(msg='quux')
return response
request_data = TEST_TWILIO_REQUEST_DATA.copy()
request_data['Body'] = 'foobarbaz'
response = self.app.post('/', data=request_data)
assert_equal(response.status_code, 200)
twiml_response = twiml.Response()
twiml_response.message(msg='quux')
assert_equal(response.get_data(), unicode(twiml_response))
def test_match_with_groups(self):
@self.s.connect('(?P<first>\w+) (?P<second>\w+)')
def _verify_handler(request, response, first, second):
assert_equal(first, 'foo')
assert_equal(second, 'bar')
assert_equal(request.message_body, 'foo bar')
return response
request_data = TEST_TWILIO_REQUEST_DATA.copy()
request_data['Body'] = 'foo bar'
response = self.app.post('/', data=request_data)
assert_equal(response.status_code, 200)
twiml_response = twiml.Response()
assert_equal(response.get_data(), unicode(twiml_response))
def test_twilio_message_request(self):
request_data = TEST_TWILIO_REQUEST_DATA.copy()
request_data.update(
Body='foo bar',
NumMedia=2,
MediaContentType0='image/jpeg',
MediaUrl0='http://example.com/image0',
MediaContentType1='image/png',
MediaUrl1='http://example.com/image1',
FromCity='San Francisco',
FromState='CA',
)
with self.s.app.test_request_context('/', query_string=request_data):
twilio_request = switch.TwilioMessageRequest(request)
assert_equal(twilio_request.message_body, 'foo bar')
assert_equal(twilio_request.from_number, '+14155551234')
assert_equal(twilio_request.to_number, '+14158675309')
assert_equal(twilio_request.message_sid, 'MM{}'.format('a' * 34))
assert_equal(twilio_request.account_sid, 'AC{}'.format('a' * 34))
assert_equal(twilio_request.media_count, 2)
media_items = twilio_request.media_items
assert_equal(
media_items[0],
switch.MediaItem('image/jpeg', 'http://example.com/image0'),
)
assert_equal(
media_items[1],
switch.MediaItem('image/png', 'http://example.com/image1'),
)
assert_equal(twilio_request.flask_request, request)
assert_equal(twilio_request.raw_values, request.values)
| StarcoderdataPython |
86096 | import pip_setup
pip_setup.install("moviepy")
pip_setup.install("pygame")
import Menu
import pygame
from moviepy.editor import *
import random
from Settings import *
from Sprites import *
from Menu import *
import time
import numpy as np
class GAME :
def __init__(self):
#GAME ~initialisation~
self.file = "highscore.txt"
pygame.init()
self.screen = pygame.display.set_mode((WIDTH,HEIGHT))
self.background = pygame.image.load("background.png").convert()
pygame.display.set_caption(TITLE)
self.clock = pygame.time.Clock()
self.running = True
self.game_music = pygame.mixer.Sound('bip_bip_bap_1.wav')
self.menu_music = pygame.mixer.Sound('bip_bip_bop_V4.wav')
self.gameover_music = pygame.mixer.Sound('bip_bip_bup.wav')
self.menu_music.set_volume(0.5)
self.game_music.set_volume(0.5)
self.gameover_music.set_volume(0.5)
def new_game(self):
#GAME ~new_game~
self.wave = 1
self.shoot = False
self.lastHitTimer = 0
self.lastHitTimer_e = 0
self.lastHitennemyTimer = 0
self.lastShootTimer = 0
self.last_boss_attack = 0
self.anim_jar = 0
self.last_jar_attack = 0
self.anim_player_attack = 0
self.anim_ennemi_attack = 0
self.boss_attack = False
self.all_sprites = pygame.sprite.Group()
self.platforms = pygame.sprite.Group()
self.ennemis = pygame.sprite.Group()
self.weapons = pygame.sprite.Group()
self.skulls = pygame.sprite.Group()
self.jars = pygame.sprite.Group()
self.fires = pygame.sprite.Group()
self.player = PLAYER(self)
self.ennemy_list = []
self.all_sprites.add(self.player)
self.ennemy_speed = 2
self.ennemy_range = 400
self.ennemy_attack = 30
self.life_multiplyer = 0.4
self.x_jar = round(PLATFORMS_LIST[0][0]) + 30
for nbr in range(3):
self.jar = JAR(self.platforms,(self.x_jar+(50*nbr)))
self.jars.add(self.jar)
self.all_sprites.add(self.jar)
for nbr in range(3):
self.dynamic_difficulty()
ennemi = ENNEMI(random.randrange(self.ennemy_speed-1,self.ennemy_speed+1), self.ennemy_range, self.ennemy_attack, self.platforms, 0.5 , 1)
self.ennemis.add(ennemi)
self.all_sprites.add(ennemi)
self.ennemy_list.append(ennemi)
for plat in PLATFORMS_LIST:
p = PLATFORM(*plat)
self.all_sprites.add(p)
self.platforms.add(p)
self.weapon = WEAPON(1)
self.weapons.add(self.weapon)
self.all_sprites.add(self.weapon)
self.skull = WEAPON(2)
self.skulls.add(self.skull)
self.all_sprites.add(self.skull)
self.fire = WEAPON(3)
self.fires.add(self.fire)
self.all_sprites.add(self.fire)
self.player.right = False
self.attacked = False
self.run()
def run(self):
#GAME ~loop~
self.play = True
while self.play:
self.clock.tick(FPS)
self.events()
self.update()
self.draw()
def update(self):
# GAME ~update~
self.all_sprites.update()
#jar animation
for self.jar in self.jars:
if self.anim_jar < 8:
self.jar.image = self.jar.sprites[int(self.anim_jar)]
self.anim_jar = self.anim_jar + 0.05
else :
self.anim_jar = 0
#management of the artificial intelligence of the ennemies
for ennemi in self.ennemis:
ennemi.AI(self.player.pos, self.platforms)
#gestion of the collision of the platform if falling
plat_hits = pygame.sprite.spritecollide(self.player , self.platforms , False)
if self.player.vel.y > 0:
if plat_hits :
#if the player is under the platform
self.player.pos.y = plat_hits[0].rect.top
self.player.vel.y = 0
#gestion of the collision of the player and ennimies
self.player_hits = pygame.sprite.spritecollide(self.player , self.ennemis , False)
self.player_toutch = pygame.sprite.spritecollide(self.player , self.jars , False)
self.player_toutch_fire = pygame.sprite.spritecollide(self.player , self.fires , False)
if (pygame.time.get_ticks() < self.lastHitTimer + 1000):
if self.player.right :
self.player.image = self.player.sprites_attaqued[1]
elif not self.player.right:
self.player.image = self.player.sprites_attaqued[0]
else:
if self.player_hits or self.player_toutch or self.player_toutch_fire :
if self.player.right :
self.player.image = self.player.sprites_attaqued[1]
elif not self.player.right:
self.player.image = self.player.sprites_attaqued[0]
self.lastHitTimer = pygame.time.get_ticks()
self.player.life -= 1
self.player.pos.x -= 3
for ennemi in self.ennemis:
ennemi_hit_together = pygame.sprite.collide_rect(self.player, ennemi)
if ennemi.right:
self.ennemi_looking_at = 1
else:
self.ennemi_looking_at = -1
if ennemi_hit_together and ennemi.type_en == 1:
if ennemi.attack_anim < 10 and self.ennemi_looking_at == 1:
ennemi.image = ennemi.sprites_attack_right[int(ennemi.attack_anim)]
ennemi.attack_anim = ennemi.attack_anim + 0.3
elif ennemi.attack_anim < 10 and self.ennemi_looking_at == -1:
ennemi.image = ennemi.sprites_attack_left[int(ennemi.attack_anim)]
ennemi.attack_anim = ennemi.attack_anim + 0.3
else :
ennemi.attack_anim = 0
#
#if ennemi_hit_together == 1 and (pygame.time.get_ticks() > self.lastHitennemyTimer + 100) and ennemi.isFalling == False:
#ennemi.move(random.randint(-10, 10))
#self.lastHitennemyTimer = pygame.time.get_ticks()
ennemi_hits = pygame.sprite.spritecollide(ennemi , self.weapons , False)
ennemi.life_multiplyer = self.life_multiplyer
if ennemi.type_en == 1 and not self.player_hits:
if (pygame.time.get_ticks() < self.lastHitTimer_e + 500) and ennemi_hits :
if ennemi.right :
ennemi.image = ennemi.sprites_walk[2]
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk[3]
else:
if ennemi.right :
ennemi.image = ennemi.sprites_walk[0]
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk[1]
if ennemi_hits and self.player.pos.x > ennemi.rect.x and self.shoot :
if ennemi.right :
ennemi.image = ennemi.sprites_walk[2]
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk[3]
self.lastHitTimer_e = pygame.time.get_ticks()
ennemi.life -= 1
if ennemi.isFalling == False:
ennemi.rect.x -= 10
elif ennemi_hits and self.player.pos.x < ennemi.rect.x and self.shoot:
if ennemi.right :
ennemi.image = ennemi.sprites_walk[2]
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk[3]
self.lastHitTimer_e = pygame.time.get_ticks()
ennemi.life -= 1
if ennemi.isFalling == False:
ennemi.rect.x += 10
for ennemi in self.ennemis:
if (pygame.time.get_ticks() > self.lastHitTimer_e + 500) :
if ennemi_hits and self.shoot and ennemi.type_en == 2:
if ennemi.right :
ennemi.life -= 1
ennemi.image = ennemi.sprites_walk_right[5]
self.lastHitTimer_e = pygame.time.get_ticks()
elif not ennemi.right:
ennemi.image = ennemi.sprites_walk_left[5]
ennemi.life -= 1
self.lastHitTimer_e = pygame.time.get_ticks()
if ennemi.life < 0 :
ennemi.kill()
self.player.score += 1
if len(self.ennemis) == 0:
self.wave += 1
self.new_wave()
if self.shoot and self.looking_at == -1 :
if self.anim_player_attack < 9 :
self.player.image = self.player.sprites_attack[int(self.anim_player_attack)]
self.anim_player_attack = self.anim_player_attack + 0.3
if self.shoot and self.looking_at == 1 :
if self.anim_player_attack < 9 :
self.player.image = self.player.sprites_attack_r[int(self.anim_player_attack)]
self.anim_player_attack = self.anim_player_attack + 0.3
#gestion of shoot or attack
if self.shoot :
if self.looking_at == -1 :
if self.x < self.x_max :
y = (-9.81 /( 2 * (self.v0 * self.v0) * (math.cos(self.alpha) * math.cos(self.alpha)) ) * (self.x*self.x)) + math.tan(self.alpha) * self.x + self.h
self.weapon.rect.midbottom = (self.coord_x , self.h + (self.h - y))
self.x = self.x + 10
self.coord_x = self.coord_x - 20
else:
self.shoot = False
self.x = ( (self.v0*self.v0) * ( math.sin (2*self.alpha)) ) / (2*9.81)
self.coord_x = 0
elif self.looking_at == 1 :
if self.x < self.x_max :
y = (-9.81 /( 2 * (self.v0 * self.v0) * (math.cos(self.alpha) * math.cos(self.alpha)) ) * (self.x*self.x)) + math.tan(self.alpha) * self.x + self.h
self.weapon.rect.midbottom = (self.coord_x , self.h + (self.h - y) )
self.x = self.x + 10
self.coord_x = self.coord_x + 20
else :
self.shoot = False
self.x = ( (self.v0*self.v0) * ( math.sin (2*self.alpha)) ) / (2*9.81)
self.coord_x = 0
else :
#player have weapon
self.weapon.rect.midbottom = (20000,20000)
for ennemi in self.ennemis:
if ennemi.type_en == 2 and pygame.time.get_ticks() > self.last_boss_attack + 5000:
self.last_boss_attack = pygame.time.get_ticks()
self.boss_h = float(ennemi.rect.y) + 30
self.boss_angle = 50.0 # en degres
self.boss_alpha = float(self.boss_angle * 3.14 / 180.0) # conversion en radian
self.boss_v0 = 40.0
self.boss_x_max = self.skull.shoot(self.boss_v0, self.boss_alpha, self.boss_h)
self.boss_coord_x = ennemi.rect.x + 65
self.boss_x = ((self.boss_v0 * 0) * (math.sin(2 * self.boss_alpha))) / (9.81)
self.boss_attack = True
if ennemi.right:
self.boss_looking_at = 1
else:
self.boss_looking_at = -1
if self.boss_attack:
if self.boss_looking_at == -1:
if self.boss_x < self.boss_x_max:
self.boss_y = (-9.81 / (2 * (self.boss_v0 * self.boss_v0) * (math.cos(self.boss_alpha) * math.cos(self.boss_alpha))) * (
self.boss_x * self.boss_x)) + math.tan(
self.boss_alpha) * self.boss_x + self.boss_h
self.skull.rect.midbottom = (self.boss_coord_x, self.boss_h + (self.boss_h - self.boss_y))
self.boss_x = self.boss_x + 10
self.boss_coord_x = self.boss_coord_x - 10
else:
self.boss_attack = False
self.anim_ennemi_attack = 0
self.boss_x = ((self.boss_v0 * self.boss_v0) * (math.sin(2 * self.boss_alpha))) / (2 * 9.81)
self.boss_coord_x = 0
elif self.boss_looking_at == 1:
if self.boss_x < self.boss_x_max:
self.boss_y = (-9.81 / (2 * (self.boss_v0 * self.boss_v0) * (
math.cos(self.boss_alpha) * math.cos(self.boss_alpha))) * (
self.boss_x * self.boss_x)) + math.tan(
self.boss_alpha) * self.boss_x + self.boss_h
self.skull.rect.midbottom = (self.boss_coord_x, self.boss_h + (self.boss_h - self.boss_y))
self.boss_x = self.boss_x + 10
self.boss_coord_x = self.boss_coord_x + 10
else:
self.boss_x = ((self.boss_v0 * self.boss_v0) * (math.sin(2 * self.boss_alpha))) / (2 * 9.81)
self.boss_coord_x = 0
self.boss_attack = False
self.anim_ennemi_attack = 0
else:
# boss have skull
self.skull.rect.midbottom = (20000, 20000)
for ennemi in self.ennemis:
if ennemi.right:
self.boss_looking_at = 1
else:
self.boss_looking_at = -1
if self.boss_attack and self.boss_looking_at == -1 and ennemi.type_en == 2:
if self.anim_ennemi_attack < 7:
ennemi.image = ennemi.sprites_attack_right[int(self.anim_ennemi_attack)]
self.anim_ennemi_attack = self.anim_ennemi_attack + 0.3
if self.boss_attack and self.boss_looking_at == 1 and ennemi.type_en == 2:
if self.anim_ennemi_attack < 7:
ennemi.image = ennemi.sprites_attack_left[int(self.anim_ennemi_attack)]
self.anim_ennemi_attack = self.anim_ennemi_attack + 0.3
ennemi_hit_together = pygame.sprite.collide_rect(self.player, ennemi)
if ennemi_hit_together and ennemi.type_en == 2:
if ennemi.attack_anim_boss < 10 and self.boss_looking_at == -1:
ennemi.image = ennemi.sprites_boss_attack_right[int(ennemi.attack_anim_boss)]
ennemi.attack_anim_boss = ennemi.attack_anim_boss + 0.3
elif ennemi.attack_anim_boss < 10 and self.boss_looking_at == 1:
ennemi.image = ennemi.sprites_boss_attack_left[int(ennemi.attack_anim_boss)]
ennemi.attack_anim_boss = ennemi.attack_anim_boss + 0.3
else:
ennemi.attack_anim_boss = 0
for jar in self.jars:
if pygame.time.get_ticks() > self.last_jar_attack + 3000 :
self.last_jar_attack = pygame.time.get_ticks()
self.jar_h = float(jar.rect.y) + 50
self.jar_angle = random.randint(7.0,11.0) # en degres
self.jar_alpha = float(self.jar_angle * 3.14 / 180.0) #conversion en radian
self.jar_v0 = random.randint(200,280)
self.jar_x_max = self.skull.shoot(self.jar_v0, self.jar_alpha , self.jar_h)
self.jar_coord_x = jar.rect.x + 65
self.jar_x = ( (self.jar_v0*0) * ( math.sin (2*self.jar_alpha)) ) / (9.81)
self.jar_attack = True
if self.jar_attack:
for self.jar in self.jars:
if self.jar_x < self.jar_x_max :
self.jar_y = (-9.81 /( 2 * (self.jar_v0 * self.jar_v0) * (math.cos(self.jar_alpha) * math.cos(self.jar_alpha)) ) * (self.jar_x*self.jar_x)) + math.tan(self.jar_alpha) * self.jar_x + self.jar_h
self.fire.rect.midbottom = (self.jar_coord_x , self.jar_h + (self.jar_h - self.jar_y) )
self.jar_x = self.jar_x + 10
self.jar_coord_x = self.jar_coord_x + 3
else :
self.jar_x = ( (self.jar_v0*self.jar_v0) * ( math.sin (2*self.jar_alpha)) ) / (2*9.81)
self.jar_coord_x = 0
self.jar_attack = False
else :
#jar have skull
self.fire.rect.midbottom = (20000,20000)
def events(self):
# GAME ~events~
for event in pygame.event.get():
#check if windows closed or game loose
if event.type == pygame.QUIT :
pygame.quit()
if self.player.life <= 0 :
if self.play:
self.play = False
self.running = False
#check if the player jump
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_SPACE:
self.player.jump()
#shoot
if event.type == pygame.KEYDOWN :
if event.key == pygame.K_e and pygame.time.get_ticks() > self.lastShootTimer + 300:
self.shoot = True
self.anim_player_attack = 0
self.h = float(self.player.pos.y)
if self.player.right:
self.looking_at = 1
else:
self.looking_at = -1
angle = 7.0 # en degres
self.alpha = float(angle * 3.14 / 180.0) #conversion en radian
self.v0 = 150.0
self.x_max = self.weapon.shoot(self.v0, self.alpha , self.h)
self.coord_x = self.player.pos.x
self.x = ( (self.v0*self.v0) * ( math.sin (2*self.alpha)) ) / (2*9.81)
self.lastShootTimer = pygame.time.get_ticks()
if event.key == pygame.K_r:
self.attack = True
def dynamic_difficulty(self):
if pass_difficulty()==1:
self.ennemy_speed=1
self.ennemy_range=300
self.ennemy_attack=25
self.player.speed_multiplyer = 1
self.life_multiplyer = 0.2
if pass_difficulty()==2:
self.ennemy_speed=2
self.ennemy_range=400
self.ennemy_attack=30
self.life_multiplyer = 0.4
self.player.speed_multiplyer = 1.2
if pass_difficulty()==3:
self.ennemy_speed=3
self.ennemy_range=450
self.ennemy_attack=4
self.life_multiplyer = 0.6
self.player.speed_multiplyer = 1.4
if pass_difficulty()==4:
self.ennemy_speed=4
self.ennemy_range=600
self.ennemy_attack=50
self.life_multiplyer = 0.8
self.player.speed_multiplyer = 1.6
def new_wave(self):
self.dynamic_difficulty()
spawn_ennemi = 3
nbr_ennemi = ((spawn_ennemi * self.wave)//2)
for nbr in range(nbr_ennemi):
ennemi = ENNEMI(random.randrange(self.ennemy_speed-1,self.ennemy_speed+1), self.ennemy_range, self.ennemy_attack, self.platforms,self.life_multiplyer, 1 )
self.ennemis.add(ennemi)
self.all_sprites.add(ennemi)
self.ennemy_list.append(ennemi)
if self.wave %3 == 0 :
ennemi = ENNEMI(self.ennemy_speed//1.5, self.ennemy_range*3, self.ennemy_attack*2, self.platforms,self.life_multiplyer, 2 )
self.ennemis.add(ennemi)
self.all_sprites.add(ennemi)
self.ennemy_list.append(ennemi)
def draw(self):
#GAME ~draw~
self.screen.blit(self.background,(0,0))
self.all_sprites.draw(self.screen)
self.draw_text(str(self.wave), 40, BLACK, WIDTH / 2, 15)
self.draw_text(str(self.player.life), 40, BLACK, WIDTH / 3, 15)
#after drawing flip the display
pygame.display.flip()
def translate_difficulty(self):
def easy():
return "Easy"
def normal():
return "Normal"
def hard():
return "Hard"
def nightmare():
return "Nightmare"
case = {1: easy,
2: normal,
3: hard,
4: nightmare,
}
return case.get(pass_difficulty())()
def add_data_to_highscore(self, f):
with open(f, 'r') as file: # we open the files in "read" mode
data = file.readlines()
self.username = input("Input your name : ")
data.append(str(self.wave) + str(" ") + str(self.player.score) + str(" ") + str(self.translate_difficulty()) + str(" ") + str(self.username) + "\n")
return data
def add_data_to_highscore_temp(self, f):
data = []
data.append(str(self.wave) + str(" ") + str(self.player.score) + str(" ") + str(self.translate_difficulty()) + str(" ") + str(self.username) + "\n")
return data
def sort_highscore_data(self, f, data):
l = len(data) - 1
x = 0
i = 0
max_values = []
index = 0
while x < l: # here i had to use a while as when the program deletes a line in data, the for wouldn't be updated and then the if would give an out of range error
while data[x][i] != " ":
i = i + 1
max_values.append(data[x][:i] + "\n")
i = 0
x = x + 1
if len(max_values) > 10:
min1 = max_values[0]
for i in range(len(max_values)):
if max_values[i] < min1:
min1 = max_values[i]
index = i
del data[index]
del data[index]
l = len(data)
x = 0
i = 0
max_values = []
index = 0
while x < l: # here i had to use a while as when the program deletes a line in data, the for wouldn't be updated and then the if would give an out of range error
while data[x][i] != " ":
i = i + 1
max_values.append(data[x][:i])
i = 0
x = x + 1
new_data = []
alrsorted = 0
len_values = len(max_values)
while alrsorted < len_values:
max = 0
for i in range(len(max_values)):
if int(max_values[i]) > int(max):
max = max_values[i]
index = i
new_data.append(data[index])
del max_values[index]
del data[index]
alrsorted = alrsorted + 1
return new_data
def add_highscore(self, f, highscore):
file = open(f, "w")
file.writelines(highscore)
def show_start_screen(self):
# GAME ~menu screen~
self.menu_music.play(15,0,2500)
menu(self)
self.menu_music.set_volume(Menu.set_music())
self.game_music.set_volume(Menu.set_music())
self.gameover_music.set_volume(Menu.set_music())
self.menu_music.fadeout(2500)
self.game_music.play(15,0,2500)
def show_game_over_screen(self):
# GAME ~game over~
self.add_highscore("highscore.txt",self.sort_highscore_data("highscore.txt", self.add_data_to_highscore("highscore.txt")))
self.add_highscore(("highscore_temp.txt"),self.add_data_to_highscore_temp("highscore_temp.txt"))
self.gameover_music.play(15,0,2500)
self.game_music.fadeout(2500)
game_over(self)
self.gameover_music.fadeout(2500)
def draw_text(self, text, size, color, x, y):
font = pygame.font.SysFont('Arial', size)
text_surface = font.render(text, True, color)
text_rect = text_surface.get_rect()
text_rect.midtop = (x, y)
self.screen.blit(text_surface, text_rect)
movie = VideoFileClip("intro_v.2.3.mpg")
pygame.display.set_caption(TITLE)
screen = pygame.display.set_mode((WIDTH,HEIGHT))
smallfont = pygame.font.SysFont('Corbel', 35)
text_loading = smallfont.render('Now loading...', True, WHITE)
screen.blit(text_loading,(WIDTH/2,HEIGHT/2))
movie.preview()
def launch_game():
g = GAME()
g.show_start_screen()
while g.running:
g.new_game()
g.show_game_over_screen()
launch_game()
launch_game() | StarcoderdataPython |
3270093 | <reponame>l3l3l/vulnman
from vulnman.settings import BASE_DIR
# Application definition
INSTALLED_APPS = [
# keep before django.contrib.admin
'dal',
'dal_select2',
'queryset_sequence',
'dal_queryset_sequence',
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'widget_tweaks',
'django_tex',
'extra_views',
'rest_framework',
'dry_rest_permissions',
'crispy_forms',
"crispy_bootstrap5",
"guardian",
"taggit",
"split_settings",
# apps
'apps.api.apps.ApiConfig',
'apps.account.apps.AccountConfig',
'apps.external_tools.apps.ExternalToolsConfig',
'apps.reporting.apps.ReportingConfig',
'apps.projects.apps.ProjectsConfig',
'apps.dashboard.apps.DashboardConfig',
'apps.networking.apps.NetworkingConfig',
'apps.methodologies.apps.MethodologiesConfig',
'apps.social.apps.SocialConfig',
'apps.findings.apps.FindingsConfig',
'apps.agents.apps.AgentsConfig',
'apps.commands.apps.CommandsConfig',
'apps.tagging.apps.TaggingConfig'
]
| StarcoderdataPython |
3363660 | <filename>hive/__init__.py
# -*- coding: utf-8 -*-
from .hive import Hive
__version__ = '0.10'
| StarcoderdataPython |
111671 | <gh_stars>1-10
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
from ibeis import constants as const
from ibeis.control.accessor_decors import (adder, getter_1to1, ider)
import utool as ut
from ibeis.control.controller_inject import make_ibs_register_decorator
print, rrr, profile = ut.inject2(__name__)
CLASS_INJECT_KEY, register_ibs_method = make_ibs_register_decorator(__name__)
@register_ibs_method
@ider
def _get_all_known_lblannot_rowids(ibs, _lbltype):
"""
Returns:
list_ (list): all nids of known animals
(does not include unknown names) """
all_known_lblannot_rowids = ibs.db.get_all_rowids_where(const.LBLANNOT_TABLE, 'lbltype_rowid=?', (ibs.lbltype_ids[_lbltype],))
return all_known_lblannot_rowids
@register_ibs_method
@adder
def add_lbltype(ibs, text_list, default_list):
""" Adds a label type and its default value
Should only be called at the begining of the program.
"""
params_iter = zip(text_list, default_list)
colnames = ('lbltype_text', 'lbltype_default',)
get_rowid_from_superkey = ibs.get_lbltype_rowid_from_text
lbltype_rowid_list = ibs.db.add_cleanly(const.LBLTYPE_TABLE, colnames, params_iter,
get_rowid_from_superkey)
return lbltype_rowid_list
#
# GETTERS::LBLTYPE
@register_ibs_method
@getter_1to1
def get_lbltype_rowid_from_text(ibs, text_list):
"""
Returns:
lbltype_rowid (list): lbltype_rowid where the lbltype_text is given
"""
# FIXME: MAKE SQL-METHOD FOR NON-ROWID GETTERS
# FIXME: Use unique SUPERKEYS instead of specifying id_colname
lbltype_rowid = ibs.db.get(const.LBLTYPE_TABLE, ('lbltype_rowid',), text_list, id_colname='lbltype_text')
return lbltype_rowid
@register_ibs_method
@getter_1to1
def get_lbltype_default(ibs, lbltype_rowid_list):
lbltype_default_list = ibs.db.get(const.LBLTYPE_TABLE, ('lbltype_default',), lbltype_rowid_list)
return lbltype_default_list
@register_ibs_method
@getter_1to1
def get_lbltype_text(ibs, lbltype_rowid_list):
lbltype_text_list = ibs.db.get(const.LBLTYPE_TABLE, ('lbltype_text',), lbltype_rowid_list)
return lbltype_text_list
if __name__ == '__main__':
"""
CommandLine:
python -m ibeis.control.manual_lbltype_funcs
python -m ibeis.control.manual_lbltype_funcs --allexamples
python -m ibeis.control.manual_lbltype_funcs --allexamples --noface --nosrc
"""
import multiprocessing
multiprocessing.freeze_support() # for win32
import utool as ut # NOQA
ut.doctest_funcs()
| StarcoderdataPython |
90330 | import sys
import time
import pygame
from gen_new_updated_file import gen_file, format_prog
registers = [0] * 128
main_reg = 0
where_in_regs = 0
should_replace = False
replace_with = None
with open('data.py', 'r') as f:
commands_dict = eval(f.read())
pygame.init()
screen = pygame.display.set_mode((512, 512))
with open(sys.argv[1], 'r') as f:
txt = f.read()
exe = list(map(str.split, txt.splitlines()))
class UserProgramFailure(Exception):
pass
class Pygame_handler:
'''WARNING: only make one instance'''
def __init__(self):
self.sxscval = -1
self.syscval = -1
def sxlc(self, value):
self.sxscval = int(value, 2)
def sylc(self, value):
self.syscval = int(value, 2)
def scol(self, color):
rgb = str(color)
self.r = int(rgb[:2], 2) * 127
self.g = int(rgb[2:4], 2) * 127
self.b = int(rgb[4:], 2) * 127
def draw(self):
pygame.gfxdraw.pixel(screen, (self.r, self.g, self.b), [self.sxscval*2, self.syscval*2, 2, 2])
time.sleep(1)
pygame.display.flip()
class basic_handler(Pygame_handler):
def jump(self, loc):
global loop_num
#print(f'jumping to line {loc}')
loop_num = int(loc, 2) - 1 # minus one is because of the plus one at the bottom of the while loop
def jpfz(self, loc):
if main_reg == 0:
self.jump(loc)
def jfnz(self, loc):
if main_reg != 0:
self.jump(loc)
def semr(self, input):
global main_reg
main_reg = int(input, 2)
registers[where_in_regs] = main_reg
def rpnx(self, reg):
global should_replace
global replace_with
should_replace = True
shortened_value = bin(registers[int(reg, 2)])[2:]
replace_with = [shortened_value if not shortened_value.startswith('b') else '-' + shortened_value[1]]
#print(replace_with)
def log(self, input): # Doesn't count; only for testing.
print('LOG:', input)
def add(self, input):
global main_reg
main_reg += int(input, 2)
registers[where_in_regs] = main_reg
def sub(self, input):
global main_reg
main_reg -= int(input, 2)
registers[where_in_regs] = main_reg
def dump(self): # for testing
print(self.dumpr())
def dumpr(self): # DO NOT USE IN SIM
return f'''On line {loop_num}. The program is {len(exe)} lines long.
The registers contain {registers},\nof which register {where_in_regs} is the main one.
it contains {main_reg}.'''
def swir(self, reg):
reg = int(reg, 2)
global main_reg
global where_in_regs
registers[where_in_regs] = main_reg
main_reg = registers[reg]
where_in_regs = reg
def kill(self): # Doesn't count; only for testing.
time.sleep(10)
sys.exit()
def abs(self): # can use boolean and instead in other implamentations
global main_reg
main_reg = abs(main_reg)
registers[where_in_regs] = main_reg
def band(self, value):
global main_reg
main_reg = main_reg & int(value, 2)
registers[where_in_regs] = main_reg
def rplz(self):
global main_reg
main_reg = 0 if main_reg < 0 else main_reg
registers[where_in_regs] = main_reg
def rein(self, instruction):
exe[main_reg][0] = commands_dict[instruction.zfill(4)]
def cont(self):
pass
get_all = basic_handler()
loop_num = 0
while True:
try:
if exe[loop_num]:
program, *value = exe[loop_num][:2]
if not program.startswith('#'):
if should_replace:
value = replace_with
should_replace = False
if value and value[0].startswith('#'): # This shorts out if value is empty
value = []
getattr(get_all, program)(*value)
print('here')
loop_num += 1
if loop_num == len(exe):
break
except TypeError as e:
if type(e) == type(TypeError()):
raise UserProgramFailure(f'ERROR: Too few arguments\n\ndebug status:\n{"-"*10}\n{get_all.dumpr()}\n{"-"*10}')
except Exception:
raise UserProgramFailure('unknown error')
get_all.dump()
input("prompt to end program...")
gen_file(sys.argv[1], format_prog(exe))
| StarcoderdataPython |
4809265 | <reponame>Dmunch04/SquareIt
import json
from Objects import Notification as Notif
from Modding import Mod as ModObject
class Loader:
def __init__ (self):
self.Mods = []
self.Data = {}
def Load (self, _Mod):
""" Prepare the mod """
with open (_Mod, 'r') as ModFile:
ModData = json.loads (ModFile.read ())
if not ModData['Name'] in self.Mods:
self.Mods.append (ModData['Name'])
Name = ModData.get ('Name', 'Untitled')
Version = ModData.get ('Version', '0.0.1')
Author = ModData.get ('Author', 'Anonymous')
Description = ModData.get ('Description', 'A Mod')
Data = ModData.get ('Data', {})
if Data:
self.Data[Name] = Data
Mod = ModObject (
Name,
Version,
Author,
Description,
Data
)
return Mod
def Unload (self, _Mod):
""" Remove the mod and it's data """
if _Mod in self.Mods:
del self.Mods[self.Mods.index (_Mod)]
self.Data[_Mod] = {}
def SetData (self, _Object):
""" Apply the mods data to the game """
NotificationsToAdd = {}
for Mod in self.Data:
Data = self.Data[Mod]
if Data:
WindowSettings = Data.get ('Window', {})
if WindowSettings:
Color = WindowSettings.get ('Color', _Object.Window.BackgroundColor)
_Object.Window.BackgroundColor = Color
GameSettings = Data.get ('Game', {})
if GameSettings:
Enemies = GameSettings.get ('Enemies', [])
if Enemies:
for Enemy in Enemies:
X = Enemy.get ('X', 0)
Y = Enemy.get ('Y', 0)
Width = Enemy.get ('Width', 50)
Height = Enemy.get ('Height', 50)
_Object.AddEnemy (
X,
Y,
Width,
Height
)
Bombs = GameSettings.get ('Bombs', [])
if Bombs:
for Bomb in Bombs:
X = Bomb.get ('X', 0)
Y = Bomb.get ('Y', 0)
Width = Bomb.get ('Width', 50)
Height = Bomb.get ('Height', 50)
_Object.AddBomb (
X,
Y,
Width,
Height
)
Notifications = GameSettings.get ('Notifications', [])
if Notifications:
for NotificationItem in Notifications:
Text = NotificationItem.get ('Text', 'None')
Duration = NotificationItem.get ('Duration', 1)
NotificationsToAdd[str (len (NotificationsToAdd) + 1)] = Notif (Text, Duration)
PlayerSettings = Data.get ('Player', {})
if PlayerSettings:
X = PlayerSettings.get ('X', _Object.Player.X)
Y = PlayerSettings.get ('Y', _Object.Player.Y)
Width = PlayerSettings.get ('Width', _Object.Player.Width)
Height = PlayerSettings.get ('Height', _Object.Player.Height)
Speed = PlayerSettings.get ('Speed', _Object.Player.Speed)
Color = PlayerSettings.get ('Color', _Object.Player.Color)
_Object.Player.X = int (X)
_Object.Player.Y = int (Y)
_Object.Player.OriginalX = int (X)
_Object.Player.OriginalY = int (Y)
_Object.Player.Width = int (Width)
_Object.Player.Height = int (Height)
_Object.Player.Speed = int (Speed)
_Object.Player.Color = Color
MessagesSettings = Data.get ('Messages', {})
if MessagesSettings:
for Message in MessagesSettings:
if Message in _Object.Messages:
_Object.Messages[Message] = MessagesSettings[Message]
self.Unload (Mod)
_Object.AddNotification (
# Text
f'{Mod} has been loaded!',
# Duration (Frames)
500
)
for Notification in NotificationsToAdd:
Notification = NotificationsToAdd[Notification]
_Object.AddNotification (
# Text
str (Notification.Text),
# Duration (Frames)
int (Notification.Duration)
)
| StarcoderdataPython |
1728262 | from odd_models.models import DataEntity, DataSet, DataEntityType
from oddrn_generator import SnowflakeGenerator
from . import (
MetadataNamedtuple,
_data_set_metadata_schema_url,
_data_set_metadata_excluded_keys,
ColumnMetadataNamedtuple,
)
from .columns import map_column
from .metadata import append_metadata_extension
from .types import TABLE_TYPES_SQL_TO_ODD
from .utils import transform_datetime
from .views import extract_transformer_data
def map_table(
oddrn_generator: SnowflakeGenerator, tables: list[tuple], columns: list[tuple]
) -> list[DataEntity]:
data_entities: list[DataEntity] = []
column_index: int = 0
for table in tables:
metadata: MetadataNamedtuple = MetadataNamedtuple(*table)
data_entity_type = TABLE_TYPES_SQL_TO_ODD.get(
metadata.table_type, DataEntityType.UNKNOWN
)
oddrn_path = "views" if data_entity_type == DataEntityType.VIEW else "tables"
oddrn_generator.set_oddrn_paths(
**{"schemas": metadata.table_schema, oddrn_path: metadata.table_name}
)
# DataEntity
data_entity: DataEntity = DataEntity(
oddrn=oddrn_generator.get_oddrn_by_path(oddrn_path),
name=metadata.table_name,
type=data_entity_type,
owner=metadata.table_owner,
description=metadata.comment,
metadata=[],
updated_at=transform_datetime(metadata.last_altered),
created_at=transform_datetime(metadata.created),
)
data_entities.append(data_entity)
append_metadata_extension(
data_entity.metadata,
_data_set_metadata_schema_url,
metadata,
_data_set_metadata_excluded_keys,
)
data_entity.dataset = DataSet(
rows_number=int(metadata.row_count)
if metadata.row_count is not None
else None,
field_list=[],
)
# DataTransformer
if data_entity.type == DataEntityType.VIEW:
data_entity.data_transformer = extract_transformer_data(
metadata.view_definition, oddrn_generator
)
# DatasetField
while column_index < len(columns):
column: tuple = columns[column_index]
column_metadata: ColumnMetadataNamedtuple = ColumnMetadataNamedtuple(
*column
)
if (
column_metadata.table_schema == metadata.table_schema
and column_metadata.table_name == metadata.table_name
):
data_entity.dataset.field_list.append(
map_column(
column_metadata, oddrn_generator, data_entity.owner, oddrn_path
)
)
column_index += 1
else:
break
return data_entities
| StarcoderdataPython |
3349298 | # BSD Licence
# Copyright (c) 2009, Science & Technology Facilities Council (STFC)
# All rights reserved.
#
# See the LICENSE file in the source distribution of this software for
# the full license text.
# Copyright (C) 2007 STFC & NERC (Science and Technology Facilities Council).
# This software may be distributed under the terms of the
# Q Public License, version 1.0 or later.
# http://ndg.nerc.ac.uk/public_docs/QPublic_license.txt
"""
Classes modelling the OWS Data Identification package v1.1.0.
:author: <NAME>
"""
from cows.model.iso19115_subset import Keywords
class Description(object):
"""
:ivar titles:
:type titles: iterable of str or LanguageString
:ivar abstracts:
:type abstracts: iterable of str or LanguageString
:ivar keywords:
:type keywords: iterable or Keywords
"""
def __init__(self, titles=[], abstracts=[], keywords=Keywords()):
self.titles = titles
self.abstracts = abstracts
self.keywords = keywords
class BasicIdentification(Description):
"""
:ivar identifier:
:type identifier: None or Code
:ivar metadata:
:type metadata: iterable of Metadata
"""
def __init__(self, identifier=None, metadata=[], **kwargs):
super(BasicIdentification, self).__init__(**kwargs)
self.identifier = identifier
self.metadata = metadata
class Identification(BasicIdentification):
"""
:ivar outputFormats:
:type outputFormats: iterable of str
:ivar availableCRSs: URIs of available coordinate reference systems
:type availableCRSs: iterable of str
:ivar boundingBoxes:
:type boundingBoxes: iterable of BoundingBox
"""
def __init__(self, outputFormats=[],
availableCRSs=[], boundingBoxes=[], **kwargs):
super(Identification, self).__init__(**kwargs)
self.outputFormats = outputFormats
self.availableCRSs = availableCRSs
self.boundingBoxes = boundingBoxes
| StarcoderdataPython |
1601618 | import torch
from .torchpoints import ball_query_partial_dense
import numpy as np
import numba
from typing import List
@numba.jit(nopython=True)
def _grow_proximity_core(neighbours, min_cluster_size):
num_points = int(neighbours.shape[0])
visited = np.zeros((num_points,), dtype=numba.types.bool_)
clusters = []
for i in range(num_points):
if visited[i]:
continue
cluster = []
queue = []
visited[i] = True
queue.append(i)
cluster.append(i)
while len(queue):
k = queue.pop()
k_neighbours = neighbours[k]
for nei in k_neighbours:
if nei.item() == -1:
break
if not visited[nei]:
visited[nei] = True
queue.append(nei.item())
cluster.append(nei.item())
if len(cluster) >= min_cluster_size:
clusters.append(cluster)
return clusters
def grow_proximity(pos, batch, nsample=16, radius=0.02, min_cluster_size=32):
"""Grow based on proximity only
Neighbour search is done on device while the cluster assignement is done on cpu"""
assert pos.shape[0] == batch.shape[0]
neighbours = ball_query_partial_dense(radius, nsample, pos, pos, batch, batch)[0].cpu().numpy()
return _grow_proximity_core(neighbours, min_cluster_size)
def region_grow(
pos, labels, batch, ignore_labels=[], nsample=16, radius=0.02, min_cluster_size=32
) -> List[torch.Tensor]:
"""Region growing clustering algorithm proposed in
PointGroup: Dual-Set Point Grouping for 3D Instance Segmentation
https://arxiv.org/pdf/2004.01658.pdf
for instance segmentation
Parameters
----------
pos: torch.Tensor [N, 3]
Location of the points
labels: torch.Tensor [N,]
labels of each point
ignore_labels:
Labels that should be ignored, no region growing will be performed on those
nsample:
maximum number of neighbours to consider
radius:
radius for the neighbour search
min_cluster_size:
Number of points above which a cluster is considered valid
"""
assert labels.dim() == 1
assert pos.dim() == 2
assert pos.shape[0] == labels.shape[0]
unique_labels = torch.unique(labels)
clusters = []
ind = torch.arange(0, pos.shape[0])
for l in unique_labels:
if l in ignore_labels:
continue
# Build clusters for a given label (ignore other points)
label_mask = labels == l
local_ind = ind[label_mask]
# Remap batch to a continuous sequence
label_batch = batch[label_mask]
unique_in_batch = torch.unique(label_batch)
remaped_batch = torch.empty_like(label_batch)
for new, old in enumerate(unique_in_batch):
mask = label_batch == old
remaped_batch[mask] = new
# Cluster
label_clusters = grow_proximity(
pos[label_mask, :],
remaped_batch,
nsample=nsample,
radius=radius,
min_cluster_size=min_cluster_size,
)
# Remap indices to original coordinates
if len(label_clusters):
for cluster in label_clusters:
cluster = torch.tensor(cluster).to(pos.device)
clusters.append(local_ind[cluster])
return clusters
| StarcoderdataPython |
3362128 | <reponame>VisionSystemsTech/projects-time
# -*- coding: utf-8 -*-
import yaml
from copy import deepcopy
from pathlib import Path
from easydict import EasyDict
class SingletonMeta(type):
_instances = {}
def __call__(cls, *args, **kwargs):
if cls not in cls._instances:
instance = super().__call__(*args, **kwargs)
cls._instances[cls] = instance
return cls._instances[cls]
class SingleConfig(metaclass=SingletonMeta):
def __init__(self, user_source='config.yaml', default_source='config-default.yaml'):
self._keys = list()
self._user_source = user_source
self._default_source = default_source
self.reset()
def __getattr__(self, name):
if name in self.__dict__:
return self.__dict__[name]
else:
raise AttributeError(name)
def __getitem__(self, name):
return getattr(self, name)
def get_part(self, subconfig):
partial_config = {} if self[subconfig] is None else deepcopy(self[subconfig])
partial_config.update(self['general'])
return partial_config
def reset(self):
for key in self._keys:
delattr(self, key)
self._keys = list()
d = EasyDict(self._load_config())
for k, v in d.items():
setattr(self, k, v)
self._keys.append(k)
def _load_config(self):
config = self._read_config(self._default_source)
user_config = dict()
if Path(self._user_source).exists():
user_config = self._read_config(self._user_source)
config_issues = list()
is_valid = self._update_config(config, user_config, str_to_print=self._default_source, to_print=config_issues)
if config_issues:
print(f'Problem(s) with configs:\n{"".join(config_issues)}\n'
f'Check and correct your {self._make_bold(self._default_source)} '
f'and {self._make_bold(self._user_source)}!')
if not is_valid:
exit(0)
working_dir = Path(config['general']['working_dir']).absolute()
resources_dir = Path(config['general']['resources_dir']).absolute()
config['general']['working_dir'] = str(working_dir)
self._set_absolute_paths(config, working_dir, resources_dir)
return config
@staticmethod
def _read_config(source):
if isinstance(source, str):
with open(source, 'r') as stream:
config = yaml.safe_load(stream)
if config is None:
print(f'{source} is empty. Fill it, please.')
exit()
else:
raise TypeError('Unexpected source to load config')
return config
@staticmethod
def _update_config(default_cfg, user_cfg, str_to_print, to_print):
result = True
if type(default_cfg) != type(user_cfg):
to_print.append(f'{str_to_print} have different types: {type(default_cfg)} and {type(user_cfg)}\n')
result = False
elif isinstance(user_cfg, dict):
for key, value in user_cfg.items():
if key not in default_cfg:
to_print.append(f'No key in {str_to_print} '
f'{SingleConfig._get_delimiter_key()} '
f'{SingleConfig._make_bold(key)}\n')
result = False
elif isinstance(value, dict):
new_str = f'{str_to_print} {SingleConfig._get_delimiter_key()} {key}'
cfg1, cfg2 = default_cfg[key], value
result = SingleConfig._update_config(cfg1, cfg2, new_str, to_print) and result
else:
default_cfg[key] = value
return result
@staticmethod
def _set_absolute_paths(d, working_dir, resources_dir):
for key, value in d.items():
if isinstance(d[key], dict):
if key.strip() == 'resources':
for sub_key, sub_value in value.items():
value[sub_key] = str(resources_dir.joinpath(sub_value))
else:
SingleConfig._set_absolute_paths(d[key], working_dir, resources_dir)
else:
if value is not None:
if 'path' in key:
d[key] = working_dir.joinpath(value)
elif 'location' in key:
d[key] = resources_dir.joinpath(value)
@staticmethod
def _make_bold(s):
bold = '\033[1m'
end_bold = '\033[0m'
return bold + s + end_bold
@staticmethod
def _get_delimiter_key():
return '->'
| StarcoderdataPython |
3368123 | <reponame>mahmuuud/pip-accel
# Extension of pip-accel that deals with dependencies on system packages.
#
# Author: <NAME> <<EMAIL>>
# Last Change: October 31, 2015
# URL: https://github.com/paylogic/pip-accel
"""
System package dependency handling.
The :mod:`pip_accel.deps` module is an extension of pip-accel that deals
with dependencies on system packages. Currently only Debian Linux and
derivative Linux distributions are supported by this extension but it should be
fairly easy to add support for other platforms.
The interface between pip-accel and :class:`SystemPackageManager` focuses on
:func:`~SystemPackageManager.install_dependencies()` (the other methods are
used internally).
"""
# Standard library modules.
import logging
import os
import shlex
import subprocess
# Modules included in our package.
from pip_accel.compat import WINDOWS, configparser
from pip_accel.exceptions import DependencyInstallationFailed, DependencyInstallationRefused, SystemDependencyError
from pip_accel.utils import is_root
# External dependencies.
from humanfriendly import Timer, concatenate, format, pluralize
from humanfriendly.prompts import prompt_for_confirmation
# Initialize a logger for this module.
logger = logging.getLogger(__name__)
class SystemPackageManager(object):
"""Interface to the system's package manager."""
def __init__(self, config):
"""
Initialize the system package dependency manager.
:param config: The pip-accel configuration (a :class:`.Config`
object).
"""
# Defaults for unsupported systems.
self.list_command = 'true'
self.install_command = 'true'
self.dependencies = {}
# Keep a reference to the pip-accel configuration.
self.config = config
# Initialize the platform specific package manager interface.
directory = os.path.dirname(os.path.abspath(__file__))
for filename in sorted(os.listdir(directory)):
pathname = os.path.join(directory, filename)
if filename.endswith('.ini') and os.path.isfile(pathname):
logger.debug("Loading configuration from %s ..", pathname)
parser = configparser.RawConfigParser()
parser.read(pathname)
# Check if the package manager is supported.
supported_command = parser.get('commands', 'supported')
logger.debug("Checking if configuration is supported: %s", supported_command)
with open(os.devnull, 'wb') as null_device:
if subprocess.call(supported_command, shell=True,
stdout=null_device,
stderr=subprocess.STDOUT) == 0:
logger.debug("System package manager configuration is supported!")
# Get the commands to list and install system packages.
self.list_command = parser.get('commands', 'list')
self.install_command = parser.get('commands', 'install')
# Get the known dependencies.
self.dependencies = dict((n.lower(), v.split()) for n, v
in parser.items('dependencies'))
logger.debug("Loaded dependencies of %s: %s",
pluralize(len(self.dependencies), "Python package"),
concatenate(sorted(self.dependencies)))
else:
logger.debug("Command failed, assuming configuration doesn't apply ..")
def install_dependencies(self, requirement):
"""
Install missing dependencies for the given requirement.
:param requirement: A :class:`.Requirement` object.
:returns: :data:`True` when missing system packages were installed,
:data:`False` otherwise.
:raises: :exc:`.DependencyInstallationRefused` when automatic
installation is disabled or refused by the operator.
:raises: :exc:`.DependencyInstallationFailed` when the installation
of missing system packages fails.
If `pip-accel` fails to build a binary distribution, it will call this
method as a last chance to install missing dependencies. If this
function does not raise an exception, `pip-accel` will retry the build
once.
"""
install_timer = Timer()
missing_dependencies = self.find_missing_dependencies(requirement)
if missing_dependencies:
# Compose the command line for the install command.
install_command = shlex.split(self.install_command) + missing_dependencies
# Prepend `sudo' to the command line?
if not WINDOWS and not is_root():
# FIXME Ideally this should properly detect the presence of `sudo'.
# Or maybe this should just be embedded in the *.ini files?
install_command.insert(0, 'sudo')
# Always suggest the installation command to the operator.
logger.info("You seem to be missing %s: %s",
pluralize(len(missing_dependencies), "dependency", "dependencies"),
concatenate(missing_dependencies))
logger.info("You can install %s with this command: %s",
"it" if len(missing_dependencies) == 1 else "them", " ".join(install_command))
if self.config.auto_install is False:
# Refuse automatic installation and don't prompt the operator when the configuration says no.
self.installation_refused(requirement, missing_dependencies, "automatic installation is disabled")
# Get the operator's permission to install the missing package(s).
if self.config.auto_install:
logger.info("Got permission to install %s (via auto_install option).",
pluralize(len(missing_dependencies), "dependency", "dependencies"))
elif self.confirm_installation(requirement, missing_dependencies, install_command):
logger.info("Got permission to install %s (via interactive prompt).",
pluralize(len(missing_dependencies), "dependency", "dependencies"))
else:
logger.error("Refused installation of missing %s!",
"dependency" if len(missing_dependencies) == 1 else "dependencies")
self.installation_refused(requirement, missing_dependencies, "manual installation was refused")
if subprocess.call(install_command) == 0:
logger.info("Successfully installed %s in %s.",
pluralize(len(missing_dependencies), "dependency", "dependencies"),
install_timer)
return True
else:
logger.error("Failed to install %s.",
pluralize(len(missing_dependencies), "dependency", "dependencies"))
msg = "Failed to install %s required by Python package %s! (%s)"
raise DependencyInstallationFailed(msg % (pluralize(len(missing_dependencies),
"system package", "system packages"),
requirement.name,
concatenate(missing_dependencies)))
return False
def find_missing_dependencies(self, requirement):
"""
Find missing dependencies of a Python package.
:param requirement: A :class:`.Requirement` object.
:returns: A list of strings with system package names.
"""
known_dependencies = self.find_known_dependencies(requirement)
if known_dependencies:
installed_packages = self.find_installed_packages()
logger.debug("Checking for missing dependencies of %s ..", requirement.name)
missing_dependencies = sorted(set(known_dependencies).difference(installed_packages))
if missing_dependencies:
logger.debug("Found %s: %s",
pluralize(len(missing_dependencies), "missing dependency", "missing dependencies"),
concatenate(missing_dependencies))
else:
logger.info("All known dependencies are already installed.")
return missing_dependencies
def find_known_dependencies(self, requirement):
"""
Find the known dependencies of a Python package.
:param requirement: A :class:`.Requirement` object.
:returns: A list of strings with system package names.
"""
logger.info("Checking for known dependencies of %s ..", requirement.name)
known_dependencies = sorted(self.dependencies.get(requirement.name.lower(), []))
if known_dependencies:
logger.info("Found %s: %s", pluralize(len(known_dependencies), "known dependency", "known dependencies"),
concatenate(known_dependencies))
else:
logger.info("No known dependencies... Maybe you have a suggestion?")
return known_dependencies
def find_installed_packages(self):
"""
Find the installed system packages.
:returns: A list of strings with system package names.
:raises: :exc:`.SystemDependencyError` when the command to list the
installed system packages fails.
"""
list_command = subprocess.Popen(self.list_command, shell=True, stdout=subprocess.PIPE)
stdout, stderr = list_command.communicate()
if list_command.returncode != 0:
raise SystemDependencyError("The command to list the installed system packages failed! ({command})",
command=self.list_command)
installed_packages = sorted(stdout.decode().split())
logger.debug("Found %i installed system package(s): %s", len(installed_packages), installed_packages)
return installed_packages
def installation_refused(self, requirement, missing_dependencies, reason):
"""
Raise :exc:`.DependencyInstallationRefused` with a user friendly message.
:param requirement: A :class:`.Requirement` object.
:param missing_dependencies: A list of strings with missing dependencies.
:param reason: The reason why installation was refused (a string).
"""
msg = "Missing %s (%s) required by Python package %s (%s) but %s!"
raise DependencyInstallationRefused(
msg % (pluralize(len(missing_dependencies), "system package", "system packages"),
concatenate(missing_dependencies),
requirement.name,
requirement.version,
reason)
)
def confirm_installation(self, requirement, missing_dependencies, install_command):
"""
Ask the operator's permission to install missing system packages.
:param requirement: A :class:`.Requirement` object.
:param missing_dependencies: A list of strings with missing dependencies.
:param install_command: A list of strings with the command line needed
to install the missing dependencies.
:raises: :exc:`.DependencyInstallationRefused` when the operator refuses.
"""
try:
return prompt_for_confirmation(format(
"Do you want me to install %s %s?",
"this" if len(missing_dependencies) == 1 else "these",
"dependency" if len(missing_dependencies) == 1 else "dependencies",
), default=True)
except KeyboardInterrupt:
# Control-C is a negative response but doesn't
# otherwise interrupt the program flow.
return False
| StarcoderdataPython |
1751851 | # -*- coding: utf-8 -*-
def __len__(self):
"""Method to behave like a dict and the number of keys"""
return self._statements.__len__()
| StarcoderdataPython |
3372024 | <filename>backend/shared/shared/azure_adfs/urls.py
from django.urls import include, path, re_path
from shared.azure_adfs.views import HelsinkiOAuth2CallbackView
urlpatterns = [
re_path(r"^callback$", HelsinkiOAuth2CallbackView.as_view(), name="callback"),
path("", include("django_auth_adfs.urls")),
]
| StarcoderdataPython |
4823522 | """
Дан список. Определите, является ли он монотонно возрастающим(то есть верно ли, что каждый элемент этого списка
больше предыдущего).Выведите YES, если массив монотонно возрастает и NO в противном случае.Решение оформите в виде
функции IsAscending(A).В данной функции должен быть один цикл while, не содержащий вложенных условий и циклов —
используйте схему линейного поиска.
"""
def IsAscending(A):
i = 1
while i < len(A) and A[i] > A[i - 1]:
i += 1
print('YES') if i == len(A) else print('NO')
intList = list(map(int, input().split()))
IsAscending(intList)
| StarcoderdataPython |
194129 | <filename>tests/test_4_delete_test_db.py
import os
db_path = 'tests/processed/test_db.sqlite'
def test_delete_test_db():
#This should always be the last script run. This will delete the test db created to avoid appending onto it on future tests
os.remove(db_path) | StarcoderdataPython |
1765089 | <reponame>acse-zh820/ci_acse1
import pytest
from simple_functions import my_sum, factorial, average
class TestSimpleFunctions(object):
'''Class to test our simple functions are working correctly'''
@pytest.mark.parametrize('iterable, expected', [
([8, 7, 5], 20),
((10, -2, 5, -10, 1), 4)
])
def test_my_add(self, iterable, expected):
'''Test our add function'''
isum = my_sum(iterable)
assert isum == expected
@pytest.mark.parametrize('number, expected', [
(5, 120),
(3, 6),
(1, 1)
])
def test_factorial(self, number, expected):
'''Test our factorial function'''
answer = factorial(number)
assert answer == expected
@pytest.mark.parametrize('n, expected', [
([1, 2, 3], 2),
([4, 6, 8], 6)
])
def test_average(self, n, expected):
result = average(n)
assert result == expected
| StarcoderdataPython |
4802326 | from setuptools import setup, find_packages
with open('README.md') as f:
readme = f.read()
with open('LICENSE') as f:
license_ = f.read()
setup(
name='econ_watcher_reader',
version='0.0.1',
description='Web scraper to get economy watcher data from Cabinet Office of Japan.',
long_description=readme,
long_description_content_type="text/markdown",
author='<NAME>',
author_email='<EMAIL>',
install_requires=['numpy', 'pandas', 'xlrd', 'requests', 'bs4'],
url='https://github.com/si4141/scraper_for_economy_watcher',
license=license_,
packages=find_packages(exclude=('tests', 'docs')),
classifiers=[
'Programming Language :: Python :: 3.7',
'License :: OSI Approved :: MIT License',
'Development Status :: 4 - Beta',
]
) | StarcoderdataPython |
4823292 | <gh_stars>10-100
import os
import time
import torch as T
import numpy as np
import json
import queue
import importlib
import multiprocessing as mp
from collections import namedtuple
from .utils.plot import smoothed_plot
from .utils.replay_buffer import ReplayBuffer, PrioritisedReplayBuffer
from .utils.normalizer import Normalizer
# T.multiprocessing.set_start_method('spawn')
t = namedtuple("transition", ('state', 'action', 'next_state', 'reward', 'done'))
def mkdir(paths):
for path in paths:
os.makedirs(path, exist_ok=True)
class Agent(object):
def __init__(self, algo_params, image_obs=False, action_type='continuous', path=None, seed=-1):
# torch device
self.device = T.device("cuda" if T.cuda.is_available() else "cpu")
if 'cuda_device_id' in algo_params.keys():
self.device = T.device("cuda:%i" % algo_params['cuda_device_id'])
# path & seeding
T.manual_seed(seed)
T.cuda.manual_seed_all(seed) # this has no effect if cuda is not available
assert path is not None, 'please specify a project path to save files'
self.path = path
# path to save neural network check point
self.ckpt_path = os.path.join(path, 'ckpts')
# path to save statistics
self.data_path = os.path.join(path, 'data')
# create directories if not exist
mkdir([self.path, self.ckpt_path, self.data_path])
# non-goal-conditioned args
self.image_obs = image_obs
self.action_type = action_type
if self.image_obs:
self.state_dim = 0
self.state_shape = algo_params['state_shape']
else:
self.state_dim = algo_params['state_dim']
self.action_dim = algo_params['action_dim']
if self.action_type == 'continuous':
self.action_max = algo_params['action_max']
self.action_scaling = algo_params['action_scaling']
# common args
if not self.image_obs:
# todo: observation in distributed training should be synced as well
self.observation_normalization = algo_params['observation_normalization']
# if not using obs normalization, the normalizer is just a scale multiplier, returns inputs*scale
self.normalizer = Normalizer(self.state_dim,
algo_params['init_input_means'], algo_params['init_input_vars'],
activated=self.observation_normalization)
self.gamma = algo_params['discount_factor']
self.tau = algo_params['tau']
# network dict is filled in each specific agent
self.network_dict = {}
self.network_keys_to_save = None
# algorithm-specific statistics are defined in each agent sub-class
self.statistic_dict = {
# use lowercase characters
'actor_loss': [],
'critic_loss': [],
}
def _soft_update(self, source, target, tau=None, from_params=False):
if tau is None:
tau = self.tau
if not from_params:
for target_param, param in zip(target.parameters(), source.parameters()):
target_param.data.copy_(
target_param.data * (1.0 - tau) + param.data * tau
)
else:
for target_param, param in zip(target.parameters(), source):
target_param.data.copy_(
target_param.data * (1.0 - tau) + T.tensor(param).float().to(self.device) * tau
)
def _save_network(self, keys=None, ep=None):
if ep is None:
ep = ''
else:
ep = '_ep' + str(ep)
if keys is None:
keys = self.network_keys_to_save
assert keys is not None
for key in keys:
T.save(self.network_dict[key].state_dict(), self.ckpt_path + '/ckpt_' + key + ep + '.pt')
def _load_network(self, keys=None, ep=None):
if not self.image_obs:
self.normalizer.history_mean = np.load(os.path.join(self.data_path, 'input_means.npy'))
self.normalizer.history_var = np.load(os.path.join(self.data_path, 'input_vars.npy'))
if ep is None:
ep = ''
else:
ep = '_ep' + str(ep)
if keys is None:
keys = self.network_keys_to_save
assert keys is not None
for key in keys:
self.network_dict[key].load_state_dict(T.load(self.ckpt_path + '/ckpt_' + key + ep + '.pt'))
def _save_statistics(self, keys=None):
if not self.image_obs:
np.save(os.path.join(self.data_path, 'input_means'), self.normalizer.history_mean)
np.save(os.path.join(self.data_path, 'input_vars'), self.normalizer.history_var)
if keys is None:
keys = self.statistic_dict.keys()
for key in keys:
if len(self.statistic_dict[key]) == 0:
continue
# convert everything to a list before save via json
if T.is_tensor(self.statistic_dict[key][0]):
self.statistic_dict[key] = T.as_tensor(self.statistic_dict[key], device=self.device).cpu().numpy().tolist()
else:
self.statistic_dict[key] = np.array(self.statistic_dict[key]).tolist()
json.dump(self.statistic_dict[key], open(os.path.join(self.data_path, key+'.json'), 'w'))
def _plot_statistics(self, keys=None, x_labels=None, y_labels=None, window=5, save_to_file=True):
if save_to_file:
self._save_statistics(keys=keys)
if y_labels is None:
y_labels = {}
for key in list(self.statistic_dict.keys()):
if key not in y_labels.keys():
if 'loss' in key:
label = 'Loss'
elif 'return' in key:
label = 'Return'
elif 'success' in key:
label = 'Success'
else:
label = key
y_labels.update({key: label})
if x_labels is None:
x_labels = {}
for key in list(self.statistic_dict.keys()):
if key not in x_labels.keys():
if ('loss' in key) or ('alpha' in key) or ('entropy' in key) or ('step' in key):
label = 'Optimization step'
elif 'cycle' in key:
label = 'Cycle'
elif 'epoch' in key:
label = 'Epoch'
else:
label = 'Episode'
x_labels.update({key: label})
if keys is None:
for key in list(self.statistic_dict.keys()):
smoothed_plot(os.path.join(self.path, key + '.png'), self.statistic_dict[key],
x_label=x_labels[key], y_label=y_labels[key], window=window)
else:
for key in keys:
smoothed_plot(os.path.join(self.path, key + '.png'), self.statistic_dict[key],
x_label=x_labels[key], y_label=y_labels[key], window=window)
class Worker(Agent):
def __init__(self, algo_params, queues, path=None, seed=0, i=0):
self.queues = queues
self.worker_id = i
self.worker_update_gap = algo_params['worker_update_gap'] # in episodes
self.env_step_count = 0
super(Worker, self).__init__(algo_params, path=path, seed=seed)
def run(self, render=False, test=False, load_network_ep=None, sleep=0):
raise NotImplementedError
def _interact(self, render=False, test=False, sleep=0):
raise NotImplementedError
def _select_action(self, obs, test=False):
raise NotImplementedError
def _remember(self, batch):
try:
self.queues['replay_queue'].put_nowait(batch)
except queue.Full:
pass
def _download_actor_networks(self, keys, tau=1.0):
try:
source = self.queues['network_queue'].get_nowait()
except queue.Empty:
return False
print("Worker No. %i downloading network" % self.worker_id)
for key in keys:
self._soft_update(source[key], self.network_dict[key], tau=tau, from_params=True)
return True
class Learner(Agent):
def __init__(self, algo_params, queues, path=None, seed=0):
self.queues = queues
self.num_workers = algo_params['num_workers']
self.learner_steps = algo_params['learner_steps']
self.learner_upload_gap = algo_params['learner_upload_gap'] # in optimization steps
self.actor_learning_rate = algo_params['actor_learning_rate']
self.critic_learning_rate = algo_params['critic_learning_rate']
self.discard_time_limit = algo_params['discard_time_limit']
self.batch_size = algo_params['batch_size']
self.prioritised = algo_params['prioritised']
self.optimizer_steps = algo_params['optimization_steps']
self.optim_step_count = 0
super(Learner, self).__init__(algo_params, path=path, seed=seed)
def run(self):
raise NotImplementedError
def _learn(self, steps=None):
raise NotImplementedError
def _upload_learner_networks(self, keys):
print("Learner uploading network")
params = dict.fromkeys(keys)
for key in keys:
params[key] = [p.data.cpu().detach().numpy() for p in self.network_dict[key].parameters()]
# delete an old net and upload a new one
try:
data = self.queues['network_queue'].get_nowait()
del data
except queue.Empty:
pass
try:
self.queues['network_queue'].put(params)
except queue.Full:
pass
class CentralProcessor(object):
def __init__(self, algo_params, env_name, env_source, learner, worker, transition_tuple=None, path=None,
worker_seeds=None, seed=0):
self.algo_params = algo_params.copy()
self.env_name = env_name
assert env_source in ['gym', 'pybullet_envs', 'pybullet_multigoal_gym'], \
"unsupported env source: {}, " \
"only 3 env sources are supported: {}, " \
"for new env sources please modify the original code".format(env_source,
['gym', 'pybullet_envs',
'pybullet_multigoal_gym'])
self.env_source = importlib.import_module(env_source)
self.learner = learner
self.worker = worker
self.batch_size = algo_params['batch_size']
self.num_workers = algo_params['num_workers']
self.learner_steps = algo_params['learner_steps']
if worker_seeds is None:
worker_seeds = np.random.randint(10, 1000, size=self.num_workers).tolist()
else:
assert len(worker_seeds) == self.num_workers, 'should assign seeds to every worker'
self.worker_seeds = worker_seeds
assert path is not None, 'please specify a project path to save files'
self.path = path
# create a random number generator and seed it
self.rng = np.random.default_rng(seed=0)
# multiprocessing queues
self.queues = {
'replay_queue': mp.Queue(maxsize=algo_params['replay_queue_size']),
'batch_queue': mp.Queue(maxsize=algo_params['batch_queue_size']),
'network_queue': T.multiprocessing.Queue(maxsize=self.num_workers),
'learner_step_count': mp.Value('i', 0),
'global_episode_count': mp.Value('i', 0),
}
# setup replay buffer
# prioritised replay
self.prioritised = algo_params['prioritised']
self.store_with_given_priority = algo_params['store_with_given_priority']
# non-goal-conditioned replay buffer
tr = transition_tuple
if transition_tuple is None:
tr = t
if not self.prioritised:
self.buffer = ReplayBuffer(algo_params['memory_capacity'], tr, seed=seed)
else:
self.queues.update({
'priority_queue': mp.Queue(maxsize=algo_params['priority_queue_size'])
})
self.buffer = PrioritisedReplayBuffer(algo_params['memory_capacity'], tr, rng=self.rng)
def run(self):
def worker_process(i, seed):
env = self.env_source.make(self.env_name)
path = os.path.join(self.path, "worker_%i" % i)
worker = self.worker(self.algo_params, env, self.queues, path=path, seed=seed, i=i)
worker.run()
self.empty_queue('replay_queue')
def learner_process():
env = self.env_source.make(self.env_name)
path = os.path.join(self.path, "learner")
learner = self.learner(self.algo_params, env, self.queues, path=path, seed=0)
learner.run()
if self.prioritised:
self.empty_queue('priority_queue')
self.empty_queue('network_queue')
def update_buffer():
while self.queues['learner_step_count'].value < self.learner_steps:
num_transitions_in_queue = self.queues['replay_queue'].qsize()
for n in range(num_transitions_in_queue):
data = self.queues['replay_queue'].get()
if self.prioritised:
if self.store_with_given_priority:
self.buffer.store_experience_with_given_priority(data['priority'], *data['transition'])
else:
self.buffer.store_experience(*data)
else:
self.buffer.store_experience(*data)
if self.batch_size > len(self.buffer):
continue
if self.prioritised:
try:
inds, priorities = self.queues['priority_queue'].get_nowait()
self.buffer.update_priority(inds, priorities)
except queue.Empty:
pass
try:
batch, weights, inds = self.buffer.sample(batch_size=self.batch_size)
state, action, next_state, reward, done = batch
self.queues['batch_queue'].put_nowait((state, action, next_state, reward, done, weights, inds))
except queue.Full:
continue
else:
try:
batch = self.buffer.sample(batch_size=self.batch_size)
state, action, next_state, reward, done = batch
self.queues['batch_queue'].put_nowait((state, action, next_state, reward, done))
except queue.Full:
time.sleep(0.1)
continue
self.empty_queue('batch_queue')
processes = []
p = T.multiprocessing.Process(target=update_buffer)
processes.append(p)
p = T.multiprocessing.Process(target=learner_process)
processes.append(p)
for i in range(self.num_workers):
p = T.multiprocessing.Process(target=worker_process,
args=(i, self.worker_seeds[i]))
processes.append(p)
for p in processes:
p.start()
for p in processes:
p.join()
def empty_queue(self, queue_name):
while True:
try:
data = self.queues[queue_name].get_nowait()
del data
except queue.Empty:
break
self.queues[queue_name].close()
| StarcoderdataPython |
1785093 | <reponame>binhmuc/gluon-ts<filename>test/model/naive_predictors/test_r_code_compliance_of_naive_2.py<gh_stars>1-10
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# A copy of the License is located at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# or in the "license" file accompanying this file. This file is distributed
# on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either
# express or implied. See the License for the specific language governing
# permissions and limitations under the License.
# Third-party imports
import numpy as np
import pandas as pd
import os
from pathlib import Path
# First-party imports
from gluonts.model.naive_2 import naive_2
# DISCLAIMER:
# this script is only to test whether the R implementation
# of naive_2 produces the same outputs as out python implementation
R_INPUT_FILE = "r_naive_2_inputs.csv"
R_OUTPUT_FILE = "r_naive_2_outputs.csv"
def load_naive_2_data():
test_directory_path = Path(os.getenv("PYTEST_CURRENT_TEST")).parents[0]
r_naive_2_inputs = pd.read_csv(test_directory_path / R_INPUT_FILE).values
r_naive_2_outputs = pd.read_csv(test_directory_path / R_OUTPUT_FILE).values
return r_naive_2_inputs, r_naive_2_outputs
# To generate the above dataset use the following script:
# https://github.com/Mcompetitions/M4-methods/blob/master/Benchmarks%20and%20Evaluation.R
# and then paste the following code in as the first line:
"""
set.seed(1234567)
"""
# an the below code at the very end:
"""
for (i in 1:length(data_train)){
insample <- data_train[[i]]
d <- insample[1:14]
v <-paste(d, collapse=", ")
cat(c("[",v,"],"))
}
print('\n')
for (i in 1:length(data_train)){
insample <- data_train[[i]]
forecasts <- Benchmarks(input=insample, fh=fh)
f <- forecasts[[1]][1:6]
v <-paste(f, collapse=", ")
cat(c("[",v,"],"))
}
"""
# R script variables:
FH = 6 # The forecasting horizon examined
FRQ = 1 # The frequency of the data
def test_naive_2(prediction_length=FH, season_length=FRQ):
r_naive_2_inputs, r_naive_2_outputs = load_naive_2_data()
predictions = []
for i in range(len(r_naive_2_inputs)):
predictions.append(
naive_2(
r_naive_2_inputs[i],
prediction_length=prediction_length,
season_length=season_length,
)
)
predictions = np.array(predictions)
assert np.allclose(r_naive_2_outputs, predictions)
| StarcoderdataPython |
1772715 | import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
mean_sigma = pd.read_csv("mean_sigma.csv")
sns.set(style='ticks', context='talk')
plt.rcParams['font.family'] = 'serif'
plt.rcParams['font.weight'] = 'normal'
plt.rcParams['legend.title_fontsize'] = 24
groups = mean_sigma.groupby("algorithm")
plt.figure(figsize=(10, 8))
sns.regplot(logx=True, scatter=False, data=groups.get_group("nn"), x="sigma", y="loss", label="neural network")
sns.regplot(logx=True, scatter=False, data=groups.get_group("svd"), x="sigma", y="loss", label="svd")
sns.regplot(logx=True, scatter=False, data=groups.get_group("q_method"), x="sigma", y="loss", label="q-method")
sns.regplot(logx=True, scatter=False, data=groups.get_group("quest"), x="sigma", y="loss", label="quest")
chart = sns.regplot(logx=True, scatter=False, data=groups.get_group("esoq2"), x="sigma", y="loss", label="esoq2")
chart.set_yscale("log")
chart.set_xscale("log")
chart.grid(True, which="both", ls="--", c='gray', alpha=0.3)
chart.set_ylabel("Wahba's error (log)", fontsize=30)
chart.set_xlabel("Mean measurement noise (" + r'$\sf{\bar{\sigma}}$' + ")", fontsize=30)
chart.tick_params(axis='both', which='major', labelsize=24)
plt.tight_layout()
plt.legend(prop={"size": 20})
plt.show()
| StarcoderdataPython |
1689215 | <reponame>CiscoSystems/networking-cisco
# Copyright 2015 Cisco Systems, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron import context
from neutron.extensions import l3
from networking_cisco.plugins.cisco.common import cisco_constants
from networking_cisco.plugins.cisco.db.l3 import ha_db
from networking_cisco.plugins.cisco.extensions import ha
from networking_cisco.plugins.cisco.extensions import routerhostingdevice
from networking_cisco.plugins.cisco.extensions import routerrole
from networking_cisco.plugins.cisco.extensions import routertype
from networking_cisco.plugins.cisco.extensions import routertypeawarescheduler
from networking_cisco.tests.unit.cisco.l3 import (
test_ha_l3_router_appliance_plugin as cisco_ha_test)
from networking_cisco.tests.unit.cisco.l3 import (
test_l3_routertype_aware_schedulers as cisco_test_case)
_uuid = uuidutils.generate_uuid
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
AGENT_TYPE_L3_CFG = cisco_constants.AGENT_TYPE_L3_CFG
ROUTER_ROLE_GLOBAL = cisco_constants.ROUTER_ROLE_GLOBAL
ROUTER_ROLE_LOGICAL_GLOBAL = cisco_constants.ROUTER_ROLE_LOGICAL_GLOBAL
ROUTER_ROLE_HA_REDUNDANCY = cisco_constants.ROUTER_ROLE_HA_REDUNDANCY
LOGICAL_ROUTER_ROLE_NAME = cisco_constants.LOGICAL_ROUTER_ROLE_NAME
ROUTER_ROLE_ATTR = routerrole.ROUTER_ROLE_ATTR
HOSTING_DEVICE_ATTR = routerhostingdevice.HOSTING_DEVICE_ATTR
AUTO_SCHEDULE_ATTR = routertypeawarescheduler.AUTO_SCHEDULE_ATTR
class Asr1kRouterTypeDriverTestCase(
cisco_test_case.L3RoutertypeAwareHostingDeviceSchedulerTestCaseBase):
# Nexus router type for ASR1k driver tests, why?
# - Yes(!), it does not matter and there is only one hosting device for
# that router type in the test setup which makes scheduling deterministic
router_type = 'Nexus_ToR_Neutron_router'
def _verify_created_routers(self, router_ids, hd_id):
# tenant routers
q_p = '%s=None' % ROUTER_ROLE_ATTR
r_ids = {r['id'] for r in self._list(
'routers', query_params=q_p)['routers']}
self.assertEqual(len(r_ids), len(router_ids))
for r_id in r_ids:
self.assertIn(r_id, router_ids)
# global router on hosting device
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_rtrs), 1)
g_rtr = g_rtrs[0]
self.assertEqual(g_rtr['name'].endswith(
hd_id[-cisco_constants.ROLE_ID_LEN:]), True)
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
self.assertEqual(g_l_rtr['name'], LOGICAL_ROUTER_ROLE_NAME)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
# ensure first routers_updated notification was for global router
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
notify_call = notifier.method_calls[0]
self.assertEqual(notify_call[0], 'routers_updated')
updated_routers = notify_call[1][1]
self.assertEqual(len(updated_routers), 1)
self.assertEqual(updated_routers[0]['id'], g_rtr['id'])
# ensure *no* update notifications where sent for logical global router
for call in notifier.method_calls:
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_LOGICAL_GLOBAL])
def _test_gw_router_create_adds_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1:
r1 = router1['router']
self.plugin._process_backlogged_routers()
r1_after = self._show('routers', r1['id'])['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
# should have one global router now
self._verify_created_routers({r1['id']}, hd_id)
with self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r2 = router2['router']
self.plugin._process_backlogged_routers()
# should still have only one global router
self._verify_created_routers({r1['id'], r2['id']}, hd_id)
def test_gw_router_create_adds_global_router(self):
self._test_gw_router_create_adds_global_router()
def test_gw_router_create_adds_global_router_non_admin(self):
self._test_gw_router_create_adds_global_router(True)
def _test_router_create_adds_no_global_router(self, set_context=False):
with self.router(set_context=set_context) as router:
r = router['router']
self.plugin._process_backlogged_routers()
# tenant routers
q_p = '%s=None' % ROUTER_ROLE_ATTR
t_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(t_rtrs), 1)
t_rtr = t_rtrs[0]
self.assertEqual(t_rtr['id'], r['id'])
# global router
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_rtrs), 0)
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_l_rtrs), 0)
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
# ensure *no* update notifications where sent for global
# router (as there should be none) or logical global router
for call in notifier.method_calls:
if call[0] != 'router_deleted':
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_GLOBAL,
ROUTER_ROLE_LOGICAL_GLOBAL])
def test_router_create_adds_no_global_router(self):
self._test_router_create_adds_no_global_router()
def test_router_create_adds_no_global_router_non_admin(self):
self._test_router_create_adds_no_global_router(True)
def _verify_updated_routers(self, router_ids, hd_id=None, call_index=1):
# tenant routers
q_p = '%s=None' % ROUTER_ROLE_ATTR
r_ids = {r['id'] for r in self._list(
'routers', query_params=q_p)['routers']}
self.assertEqual(len(r_ids), len(router_ids))
for r_id in r_ids:
self.assertIn(r_id, router_ids)
# global routers
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
if hd_id:
self.assertEqual(len(g_rtrs), 1)
g_rtr = g_rtrs[0]
self.assertEqual(
g_rtr['name'].endswith(hd_id[-cisco_constants.ROLE_ID_LEN:]),
True)
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
self.assertEqual(g_l_rtr['name'], LOGICAL_ROUTER_ROLE_NAME)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
# routers_updated notification call_index is for global router
notify_call = notifier.method_calls[call_index]
self.assertEqual(notify_call[0], 'routers_updated')
updated_routers = notify_call[1][1]
self.assertEqual(len(updated_routers), 1)
self.assertEqual(updated_routers[0]['id'], g_rtr['id'])
else:
self.assertEqual(len(g_rtrs), 0)
self.assertEqual(len(g_l_rtrs), 0)
# ensure *no* update notifications where sent for logical global router
for call in notifier.method_calls:
if call[0] != 'router_deleted':
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_LOGICAL_GLOBAL])
def _test_router_update_set_gw_adds_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
with self.router(tenant_id=tenant_id,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# backlog processing will trigger one routers_updated
# notification containing r1 and r2
self.plugin._process_backlogged_routers()
# should have no global router yet
r_ids = {r1['id'], r2['id']}
self._verify_updated_routers(r_ids)
ext_gw = {'network_id': s['subnet']['network_id']}
r_spec = {'router': {l3.EXTERNAL_GW_INFO: ext_gw}}
r1_after = self._update('routers', r1['id'], r_spec)['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
# should now have one global router
self._verify_updated_routers(r_ids, hd_id)
self._update('routers', r2['id'], r_spec)
# should still have only one global router
self._verify_updated_routers(r_ids, hd_id)
def test_router_update_set_gw_adds_global_router(self):
self._test_router_update_set_gw_adds_global_router()
def test_router_update_set_gw_adds_global_router_non_admin(self):
self._test_router_update_set_gw_adds_global_router(True)
def _test_router_update_unset_gw_keeps_global_router(self,
set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# backlog processing will trigger one routers_updated
# notification containing r1 and r2
self.plugin._process_backlogged_routers()
r1_after = self._show('routers', r1['id'])['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
r_ids = {r1['id'], r2['id']}
# should have one global router now
self._verify_updated_routers(r_ids, hd_id, 0)
r_spec = {'router': {l3.EXTERNAL_GW_INFO: None}}
self._update('routers', r1['id'], r_spec)
# should still have one global router
self._verify_updated_routers(r_ids, hd_id, 0)
self._update('routers', r2['id'], r_spec)
# should have no global router now
self._verify_updated_routers(r_ids)
def test_router_update_unset_gw_keeps_global_router(self):
self._test_router_update_unset_gw_keeps_global_router()
def test_router_update_unset_gw_keeps_global_router_non_admin(self):
self._test_router_update_unset_gw_keeps_global_router(True)
def _verify_deleted_routers(self, hd_id=None, id_global_router=None):
# global routers
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
if hd_id:
self.assertEqual(len(g_rtrs), 1)
g_rtr = g_rtrs[0]
self.assertEqual(g_rtr['name'].endswith(
hd_id[-cisco_constants.ROLE_ID_LEN:]), True)
return g_rtrs[0]['id']
else:
self.assertEqual(len(g_rtrs), 0)
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
# ensure last router_deleted notification was for global router
notify_call = notifier.method_calls[-1]
self.assertEqual(notify_call[0], 'router_deleted')
deleted_router = notify_call[1][1]
self.assertEqual(deleted_router['id'], id_global_router)
def _test_gw_router_delete_removes_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
self.plugin._process_backlogged_routers()
r1_after = self._show('routers', r1['id'])['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
self._delete('routers', r1['id'])
# should still have the global router
id_global_router = self._verify_deleted_routers(hd_id)
self._delete('routers', r2['id'])
# should be no global router now
self._verify_deleted_routers(id_global_router=id_global_router)
def test_gw_router_delete_removes_global_router(self):
self._test_gw_router_delete_removes_global_router()
def test_gw_router_delete_removes_global_router_non_admin(self):
self._test_gw_router_delete_removes_global_router(True)
def _test_router_delete_removes_no_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
self.plugin._process_backlogged_routers()
r1_after = self._show('routers', r1['id'])['router']
hd_id = r1_after[HOSTING_DEVICE_ATTR]
self._delete('routers', r1['id'])
# should still have the global router
id_global_router = self._verify_deleted_routers(hd_id)
self._delete('routers', r2['id'])
# should be no global router now
self._verify_deleted_routers(id_global_router=id_global_router)
def test_router_delete_removes_no_global_router(self):
self._test_router_delete_removes_no_global_router()
def test_router_delete_removes_no_global_router_non_admin(self):
self._test_router_delete_removes_no_global_router(True)
class Asr1kHARouterTypeDriverTestCase(
Asr1kRouterTypeDriverTestCase,
cisco_ha_test.HAL3RouterTestsMixin):
# For the HA tests we need more than one hosting device
router_type = 'ASR1k_Neutron_router'
_is_ha_tests = True
def setUp(self, core_plugin=None, l3_plugin=None, dm_plugin=None,
ext_mgr=None):
if l3_plugin is None:
l3_plugin = cisco_test_case.HA_L3_PLUGIN_KLASS
if ext_mgr is None:
ext_mgr = (cisco_test_case.
TestHASchedulingL3RouterApplianceExtensionManager())
cfg.CONF.set_override('default_ha_redundancy_level', 1, group='ha')
super(Asr1kHARouterTypeDriverTestCase, self).setUp(
l3_plugin=l3_plugin, ext_mgr=ext_mgr)
def _verify_ha_created_routers(self, router_ids, num_redundancy=1,
has_gw=None):
if has_gw is None:
has_gw = [True for r_id in router_ids]
temp = {}
for i in range(len(router_ids)):
temp[router_ids[i]] = has_gw[i]
has_gw = temp
# tenant HA user_visible routers
q_p = '%s=None' % ROUTER_ROLE_ATTR
uv_routers = self._list('routers', query_params=q_p)['routers']
uv_r_ids = {r['id'] for r in uv_routers}
self.assertEqual(len(uv_r_ids), len(router_ids))
for uv_r_id in uv_r_ids:
self.assertIn(uv_r_id, router_ids)
# tenant HA redundancy routers
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_HA_REDUNDANCY)
rr_id_to_rr = {
r['id']: r for r in self._list('routers',
query_params=q_p)['routers']}
all_rr_ids = rr_id_to_rr.keys()
num_rr_ids = 0
hd_ids = set()
for uv_r in uv_routers:
uv_r_hd_id = uv_r[HOSTING_DEVICE_ATTR]
if has_gw[uv_r['id']] is True:
self.assertIsNotNone(uv_r[EXTERNAL_GW_INFO])
hd_ids.add(uv_r_hd_id)
else:
self.assertIsNone(uv_r[EXTERNAL_GW_INFO])
rr_ids = [rr_info['id']
for rr_info in uv_r[ha.DETAILS][ha.REDUNDANCY_ROUTERS]]
num = len(rr_ids)
num_rr_ids += num
self.assertEqual(num, num_redundancy)
for rr_id in rr_ids:
self.assertIn(rr_id, all_rr_ids)
rr = rr_id_to_rr[rr_id]
rr_hd_id = rr[HOSTING_DEVICE_ATTR]
# redundancy router must not be hosted on same device as its
# user visible router since that defeats HA
self.assertFalse(uv_r_hd_id == rr_hd_id)
if has_gw[uv_r['id']] is True:
self.assertIsNotNone(rr[EXTERNAL_GW_INFO])
hd_ids.add(rr_hd_id)
else:
self.assertIsNone(rr[EXTERNAL_GW_INFO])
self.assertEqual(num_rr_ids, len(all_rr_ids))
# we should have a global router on all hosting devices that hosts
# a router (user visible or redundancy router) with gateway set
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_rtrs), len(hd_ids))
g_rtr_ids = set()
for g_rtr in g_rtrs:
self.assertIn(g_rtr[HOSTING_DEVICE_ATTR], hd_ids)
g_rtr_ids.add(g_rtr['id'])
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
if g_l_rtrs:
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
self.assertEqual(g_l_rtr['name'], LOGICAL_ROUTER_ROLE_NAME)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
else:
self.assertEqual(len(g_l_rtrs), 0)
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
if g_l_rtrs:
# ensure first routers_updated notifications were
# for global routers
for i in range(len(hd_ids)):
notify_call = notifier.method_calls[i]
self.assertEqual(notify_call[0], 'routers_updated')
updated_routers = notify_call[1][1]
self.assertEqual(len(updated_routers), 1)
self.assertIn(updated_routers[0]['id'], g_rtr_ids)
g_rtr_ids.remove(updated_routers[0]['id'])
else:
# ensure *no* update notifications where sent for global routers
for call in notifier.method_calls:
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_GLOBAL])
# ensure *no* update notifications where sent for logical global router
for call in notifier.method_calls:
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_LOGICAL_GLOBAL])
def _test_gw_router_create_adds_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1:
r = router1['router']
self.plugin._process_backlogged_routers()
# should now have one user-visible router, its single
# redundancy router and two global routers (one for each of
# the hosting devices of the aforementioned routers)
self._verify_ha_created_routers([r['id']])
def _test_router_create_adds_no_global_router(self, set_context=False):
with self.router(set_context=set_context) as router:
r = router['router']
self.plugin._process_backlogged_routers()
self._verify_ha_created_routers([r['id']], 1, has_gw=[False])
def _verify_ha_updated_router(self, router_id, hd_ids=None, call_index=1,
num_redundancy=1, has_gw=True):
# ids of hosting devices hosting routers with gateway set
hd_ids = hd_ids or set()
if router_id:
# tenant router
uv_r = self._show('routers', router_id)['router']
uv_r_hd_id = uv_r[HOSTING_DEVICE_ATTR]
if has_gw is True:
self.assertIsNotNone(uv_r[EXTERNAL_GW_INFO])
hd_ids.add(uv_r_hd_id)
else:
self.assertIsNone(uv_r[EXTERNAL_GW_INFO])
rr_ids = [rr_info['id']
for rr_info in uv_r[ha.DETAILS][ha.REDUNDANCY_ROUTERS]]
# tenant HA redundancy routers
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_HA_REDUNDANCY)
rr_id_to_rr = {
r['id']: r for r in self._list('routers',
query_params=q_p)['routers']}
all_rr_ids = rr_id_to_rr.keys()
self.assertEqual(len(rr_ids), num_redundancy)
for rr_id in rr_ids:
self.assertIn(rr_id, all_rr_ids)
rr = rr_id_to_rr[rr_id]
rr_hd_id = rr[HOSTING_DEVICE_ATTR]
# redundancy router must not be hosted on same device as its
# user visible router since that defeats HA
self.assertFalse(uv_r_hd_id == rr_hd_id)
if has_gw is True:
self.assertIsNotNone(rr[EXTERNAL_GW_INFO])
hd_ids.add(rr_hd_id)
else:
self.assertIsNone(rr[EXTERNAL_GW_INFO])
# we should have a global router on all hosting devices that hosts
# a router (user visible or redundancy router) with gateway set
num_devices_hosting_gateway_routers = len(hd_ids)
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = self._list('routers', query_params=q_p)['routers']
self.assertEqual(len(g_rtrs), num_devices_hosting_gateway_routers)
g_rtr_ids = set()
for g_rtr in g_rtrs:
self.assertIn(g_rtr[HOSTING_DEVICE_ATTR], hd_ids)
g_rtr_ids.add(g_rtr['id'])
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
if num_devices_hosting_gateway_routers > 0:
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
self.assertEqual(g_l_rtr['name'], LOGICAL_ROUTER_ROLE_NAME)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
else:
self.assertEqual(len(g_l_rtrs), 0)
# global routers
notifier = self.plugin.agent_notifiers[AGENT_TYPE_L3_CFG]
# routers_updated notification call_index is for global router
notify_call = notifier.method_calls[call_index]
self.assertEqual(notify_call[0], 'routers_updated')
updated_routers = notify_call[1][1]
self.assertEqual(len(updated_routers), 1)
self.assertEqual(updated_routers[0][ROUTER_ROLE_ATTR],
ROUTER_ROLE_GLOBAL)
# ensure *no* update notifications where sent for logical global router
for call in notifier.method_calls:
if call[0] != 'router_deleted':
self.assertNotIn(call[1][1][0][ROUTER_ROLE_ATTR],
[ROUTER_ROLE_LOGICAL_GLOBAL])
return hd_ids
def _test_router_update_set_gw_adds_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
with self.router(tenant_id=tenant_id,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# backlog processing to schedule the routers
self.plugin._process_backlogged_routers()
# should have no global router yet
r_ids = [r1['id'], r2['id']]
self._verify_ha_created_routers(r_ids, 1, has_gw=[False,
False])
ext_gw = {'network_id': s['subnet']['network_id']}
r_spec = {'router': {l3.EXTERNAL_GW_INFO: ext_gw}}
self._update('routers', r1['id'], r_spec)
# should now have two global routers, one for hosting device
# of user visible router r1 and one for the hosting device r1's
# redundancy router
hd_ids = self._verify_ha_updated_router(r1['id'])
self._update('routers', r2['id'], r_spec)
self._verify_ha_updated_router(r2['id'], hd_ids)
def _test_router_update_unset_gw_keeps_global_router(self,
set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# make sure we have only two eligible hosting devices
# in this test
qp = "template_id=00000000-0000-0000-0000-000000000005"
hds = self._list('hosting_devices', query_params=qp)
self._delete('hosting_devices',
hds['hosting_devices'][1]['id'])
# backlog processing to schedule the routers
self.plugin._process_backlogged_routers()
self._verify_ha_created_routers([r1['id'], r2['id']])
r_spec = {'router': {l3.EXTERNAL_GW_INFO: None}}
self._update('routers', r1['id'], r_spec)
# should still have two global routers, we verify using r2
self._verify_ha_updated_router(r2['id'])
self._update('routers', r2['id'], r_spec)
# should have no global routers now, we verify using r1
self._verify_ha_updated_router(r2['id'], has_gw=False)
def _test_gw_router_delete_removes_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id, external_gateway_info=ext_gw,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# make sure we have only two eligible hosting devices
# in this test
qp = "template_id=00000000-0000-0000-0000-000000000005"
hds = self._list('hosting_devices', query_params=qp)
self._delete('hosting_devices',
hds['hosting_devices'][1]['id'])
# backlog processing to schedule the routers
self.plugin._process_backlogged_routers()
self._verify_ha_created_routers([r1['id'], r2['id']])
self._delete('routers', r1['id'])
# should still have two global routers, we verify using r2
self._verify_ha_updated_router(r2['id'])
self._delete('routers', r2['id'])
# should have no global routers now
self._verify_ha_updated_router(None)
def _test_router_delete_removes_no_global_router(self, set_context=False):
tenant_id = _uuid()
with self.network(tenant_id=tenant_id) as n_external:
res = self._create_subnet(self.fmt, n_external['network']['id'],
cidr='10.0.1.0/24', tenant_id=tenant_id)
s = self.deserialize(self.fmt, res)
self._set_net_external(s['subnet']['network_id'])
ext_gw = {'network_id': s['subnet']['network_id']}
with self.router(tenant_id=tenant_id,
set_context=set_context) as router1,\
self.router(name='router2', tenant_id=tenant_id,
external_gateway_info=ext_gw,
set_context=set_context) as router2:
r1 = router1['router']
r2 = router2['router']
# make sure we have only two eligible hosting devices
# in this test
qp = "template_id=00000000-0000-0000-0000-000000000005"
hds = self._list('hosting_devices', query_params=qp)
self._delete('hosting_devices',
hds['hosting_devices'][1]['id'])
self.plugin._process_backlogged_routers()
self._verify_ha_created_routers([r1['id'], r2['id']],
has_gw=[False, True])
self._delete('routers', r1['id'])
# should still have two global routers, we verify using r2
self._verify_ha_updated_router(r2['id'])
self._delete('routers', r2['id'])
# should have no global routers now
self._verify_ha_updated_router(None)
class L3CfgAgentAsr1kRouterTypeDriverTestCase(
cisco_test_case.L3RoutertypeAwareHostingDeviceSchedulerTestCaseBase,
cisco_ha_test.HAL3RouterTestsMixin):
_is_ha_tests = True
def setUp(self, core_plugin=None, l3_plugin=None, dm_plugin=None,
ext_mgr=None):
if l3_plugin is None:
l3_plugin = cisco_test_case.HA_L3_PLUGIN_KLASS
if ext_mgr is None:
ext_mgr = (cisco_test_case.
TestHASchedulingL3RouterApplianceExtensionManager())
cfg.CONF.set_override('default_ha_redundancy_level', 1, group='ha')
super(L3CfgAgentAsr1kRouterTypeDriverTestCase, self).setUp(
l3_plugin=l3_plugin, ext_mgr=ext_mgr)
self.orig_get_sync_data = self.plugin.get_sync_data
self.plugin.get_sync_data = self.plugin.get_sync_data_ext
def tearDown(self):
self.plugin.get_sync_data = self.orig_get_sync_data
super(L3CfgAgentAsr1kRouterTypeDriverTestCase, self).tearDown()
def _verify_sync_data(self, context, ids_colocated_routers, g_l_rtr,
g_l_rtr_rr_ids, ha_settings):
routers = self.plugin.get_sync_data_ext(context,
ids_colocated_routers)
self.assertEqual(len(routers), 2)
global_router = [r for r in routers if
r[ROUTER_ROLE_ATTR] == ROUTER_ROLE_GLOBAL][0]
# verify that global router has HA information from logical
# global router, in particular VIP address for the gw port
# comes from the gw port of the logical global router
ha_info = global_router['gw_port']['ha_info']
ha_port_id = ha_info['ha_port']['id']
vip_address = g_l_rtr[l3.EXTERNAL_GW_INFO][
'external_fixed_ips'][0]['ip_address']
self.assertEqual(
ha_info['ha_port']['fixed_ips'][0]['ip_address'],
vip_address)
self.assertEqual(global_router['gw_port_id'] == ha_port_id,
False)
self._verify_ha_settings(global_router, ha_settings)
rr_info_list = global_router[ha.DETAILS][ha.REDUNDANCY_ROUTERS]
self.assertEqual(len(rr_info_list), len(g_l_rtr_rr_ids))
for rr_info in rr_info_list:
self.assertIn(rr_info['id'], g_l_rtr_rr_ids)
def test_l3_cfg_agent_query_global_router_info(self):
with self.subnet(cidr='10.0.1.0/24') as s_ext:
self._set_net_external(s_ext['subnet']['network_id'])
ext_gw = {'network_id': s_ext['subnet']['network_id']}
with self.router(external_gateway_info=ext_gw) as router:
r = router['router']
self.plugin._process_backlogged_routers()
r_after = self._show('routers', r['id'])['router']
hd_id = r_after[HOSTING_DEVICE_ATTR]
id_r_ha_backup = r_after[ha.DETAILS][
ha.REDUNDANCY_ROUTERS][0]['id']
r_ha_backup_after = self._show('routers',
id_r_ha_backup)['router']
ha_backup_hd_id = r_ha_backup_after[HOSTING_DEVICE_ATTR]
# logical global router for global routers HA
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_LOGICAL_GLOBAL)
g_l_rtrs = self._list('routers', query_params=q_p)['routers']
# should be only one logical global router
self.assertEqual(len(g_l_rtrs), 1)
g_l_rtr = g_l_rtrs[0]
g_l_rtr_rr_ids = {r_info['id'] for r_info in g_l_rtr[
ha.DETAILS][ha.REDUNDANCY_ROUTERS]}
self.assertEqual(g_l_rtr[ha.ENABLED], True)
self.assertEqual(g_l_rtr[routertype.TYPE_ATTR],
r[routertype.TYPE_ATTR])
# no auto-scheduling to ensure logical global router is never
# instantiated (unless an admin does some bad thing...)
self.assertEqual(g_l_rtr[AUTO_SCHEDULE_ATTR], False)
# global router on hosting devices
q_p = '%s=%s' % (ROUTER_ROLE_ATTR, ROUTER_ROLE_GLOBAL)
g_rtrs = {g_r[HOSTING_DEVICE_ATTR]: g_r for g_r in self._list(
'routers', query_params=q_p)['routers']}
self.assertEqual(len(g_rtrs), 2)
for g_r in g_rtrs.values():
self.assertEqual(g_r[routertype.TYPE_ATTR],
r[routertype.TYPE_ATTR])
# global routers should have HA disabled in db
self.assertEqual(g_r[ha.ENABLED], False)
# global routers should never be auto-scheduled as that
# can result in them being moved to another hosting device
self.assertEqual(g_r[AUTO_SCHEDULE_ATTR], False)
# global router should be redundancy router of the logical
# global router for this router type
self.assertIn(g_r['id'], g_l_rtr_rr_ids)
e_context = context.get_admin_context()
# global routers should here have HA setup information from
# the logical global router
ha_settings = self._get_ha_defaults(
ha_type=cfg.CONF.ha.default_ha_mechanism,
redundancy_level=2, priority=ha_db.DEFAULT_MASTER_PRIORITY)
# verify global router co-located with the user visible router
ids_colocated_routers = [r['id'], g_rtrs[hd_id]['id']]
self._verify_sync_data(e_context, ids_colocated_routers,
g_l_rtr, g_l_rtr_rr_ids, ha_settings)
# verify global router co.located with the ha backup
# router of the user visible router
ids_colocated_routers = [r_ha_backup_after['id'],
g_rtrs[ha_backup_hd_id]['id']]
self._verify_sync_data(e_context, ids_colocated_routers,
g_l_rtr, g_l_rtr_rr_ids, ha_settings)
| StarcoderdataPython |
3339183 | <filename>core/migrations/0019_auto_20181106_2345.py
# Generated by Django 2.1.3 on 2018-11-06 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20181106_2343'),
]
operations = [
migrations.AddField(
model_name='staff',
name='nfc_dev_id',
field=models.CharField(default='00 00 00 00', max_length=254),
),
migrations.AddField(
model_name='staff',
name='password',
field=models.CharField(max_length=254, null=True),
),
]
| StarcoderdataPython |
1778050 | <filename>exercicio 031.py<gh_stars>1-10
#Desenvolva um programa que pergunte a distância de uma viagem em km.
#Calcule o preço da passagem, cobrando R$0,50 por km rodado para viagens
#de até 200km e R$0,45 para viagens mais longas.
dist = float(input('Qual a distancia em km a percorrer? '))
d1 = dist * 0.50
if dist <= 200:
print('O valor da passagem será R$ {}'.format(d1))
else:
print('O valor da passagem será R${}'.format(dist*0.45))
| StarcoderdataPython |
905 | from typing import Dict, Optional, List, Any
import torch
import torch.nn.functional as F
from allennlp.data import Vocabulary
from allennlp.models.model import Model
from allennlp.modules import FeedForward, TextFieldEmbedder, Seq2SeqEncoder
from allennlp.nn import InitializerApplicator, RegularizerApplicator
from allennlp.nn import util
from allennlp.training.metrics import CategoricalAccuracy, F1Measure
from overrides import overrides
@Model.register("text_classifier")
class TextClassifier(Model):
"""
Implements a basic text classifier:
1) Embed tokens using `text_field_embedder`
2) Seq2SeqEncoder, e.g. BiLSTM
3) Append the first and last encoder states
4) Final feedforward layer
Optimized with CrossEntropyLoss. Evaluated with CategoricalAccuracy & F1.
"""
def __init__(self, vocab: Vocabulary,
text_field_embedder: TextFieldEmbedder,
text_encoder: Seq2SeqEncoder,
classifier_feedforward: FeedForward,
verbose_metrics: False,
initializer: InitializerApplicator = InitializerApplicator(),
regularizer: Optional[RegularizerApplicator] = None,
) -> None:
super(TextClassifier, self).__init__(vocab, regularizer)
self.text_field_embedder = text_field_embedder
self.num_classes = self.vocab.get_vocab_size("labels")
self.text_encoder = text_encoder
self.classifier_feedforward = classifier_feedforward
self.prediction_layer = torch.nn.Linear(self.classifier_feedforward.get_output_dim() , self.num_classes)
self.label_accuracy = CategoricalAccuracy()
self.label_f1_metrics = {}
self.verbose_metrics = verbose_metrics
for i in range(self.num_classes):
self.label_f1_metrics[vocab.get_token_from_index(index=i, namespace="labels")] = F1Measure(positive_label=i)
self.loss = torch.nn.CrossEntropyLoss()
self.pool = lambda text, mask: util.get_final_encoder_states(text, mask, bidirectional=True)
initializer(self)
@overrides
def forward(self,
text: Dict[str, torch.LongTensor],
label: torch.IntTensor = None,
metadata: List[Dict[str, Any]] = None) -> Dict[str, torch.Tensor]:
"""
Parameters
----------
text : Dict[str, torch.LongTensor]
From a ``TextField``
label : torch.IntTensor, optional (default = None)
From a ``LabelField``
metadata : ``List[Dict[str, Any]]``, optional, (default = None)
Metadata containing the original tokenization of the premise and
hypothesis with 'premise_tokens' and 'hypothesis_tokens' keys respectively.
Returns
-------
An output dictionary consisting of:
label_logits : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing unnormalised log probabilities of the label.
label_probs : torch.FloatTensor
A tensor of shape ``(batch_size, num_labels)`` representing probabilities of the label.
loss : torch.FloatTensor, optional
A scalar loss to be optimised.
"""
embedded_text = self.text_field_embedder(text)
mask = util.get_text_field_mask(text)
encoded_text = self.text_encoder(embedded_text, mask)
pooled = self.pool(encoded_text, mask)
ff_hidden = self.classifier_feedforward(pooled)
logits = self.prediction_layer(ff_hidden)
class_probs = F.softmax(logits, dim=1)
output_dict = {"logits": logits}
if label is not None:
loss = self.loss(logits, label)
output_dict["loss"] = loss
# compute F1 per label
for i in range(self.num_classes):
metric = self.label_f1_metrics[self.vocab.get_token_from_index(index=i, namespace="labels")]
metric(class_probs, label)
self.label_accuracy(logits, label)
return output_dict
@overrides
def decode(self, output_dict: Dict[str, torch.Tensor]) -> Dict[str, torch.Tensor]:
class_probabilities = F.softmax(output_dict['logits'], dim=-1)
output_dict['class_probs'] = class_probabilities
return output_dict
def get_metrics(self, reset: bool = False) -> Dict[str, float]:
metric_dict = {}
sum_f1 = 0.0
for name, metric in self.label_f1_metrics.items():
metric_val = metric.get_metric(reset)
if self.verbose_metrics:
metric_dict[name + '_P'] = metric_val[0]
metric_dict[name + '_R'] = metric_val[1]
metric_dict[name + '_F1'] = metric_val[2]
sum_f1 += metric_val[2]
names = list(self.label_f1_metrics.keys())
total_len = len(names)
average_f1 = sum_f1 / total_len
metric_dict['average_F1'] = average_f1
metric_dict['accuracy'] = self.label_accuracy.get_metric(reset)
return metric_dict
| StarcoderdataPython |
80644 | import json
import os
import time
import urllib.parse
import urllib.request
import pandas as pd
import pathlib
#ルートディレクトリの設定(dataディレクトリ )
root_dir = pathlib.Path(__file__).parent.parent
#都道府県一覧の取得
c = os.path.join(root_dir, 'codes/preflist.json')
with open(c) as j:
prefList = json.load(j)
#市区町村一覧の取得
c = os.path.join(root_dir, 'codes/citylist.json')
with open(c) as j:
cityList = json.load(j)
#環境変数からESRAT-APPIDを取得
from dotenv import load_dotenv
load_dotenv()
ESTAT_APPID = os.getenv('ESTAT_APPID')
#estatAPIのパラメータセット(都道府県)
def setEstatParams(params,type):
#appIdの設定
p = {'appId':ESTAT_APPID}
# params['appId']=ESTAT_APPID
#cdAreaの設定
if type == 'prefecture' or type == 'prefectureRank':
prefCodes = [d.get('prefCode') for d in prefList['result']]
prefCodesStr = [f'{n:02}'+'000' for n in prefCodes]
# print(prefCodesStr)
p['cdArea'] = ','.join(prefCodesStr)
if type == 'city' or type == 'cityRank':
cityCodes = [d.get('cityCode') for d in cityList['result']]
p['cdArea'] = ','.join(cityCodes)
# print(cityCodes)
#statsDataIdの設定
p['statsDataId'] = params['statsDataId']
if('cdCat01' in params):
p['cdCat01'] = ','.join([d for d in params['cdCat01']])
return p
#estatAPIのレスポンス取得
def getEstatAPIResponse(params):
# print(params)
url = 'http://api.e-stat.go.jp/rest/2.1/app/json/getStatsData?'
url += urllib.parse.urlencode(params)
# print(url)
with urllib.request.urlopen(url) as response:
return json.loads(response.read().decode())
def saveJson(data,downloadPath,**kwargs):
print('...Saving ' + downloadPath)
with open(downloadPath, 'w') as f:
json.dump(data, f, **kwargs)
| StarcoderdataPython |
123845 | import requests
import os
import json
import pandas as pd
os.chdir("D:\\SynologyDrive\Programming\AlpacaTradingBot")
endpoint = "https://paper-api.alpaca.markets"
headers = json.loads(open("key.txt", 'r').read())
# https://alpaca.markets/docs/api-documentation/api-v2/orders/
def market_order(symbol, quantity, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "market", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def limit_order(symbol, quantity, limit_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "limit", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"limit_price" : limit_price, # price to go in
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def stop_order(symbol, quantity, stop_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "stop", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"stop_price" : stop_price,
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def stop_limit_order(symbol, quantity, stop_price, limit_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "stop_limit", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"stop_price" : stop_price,
"limit_price" : limit_price,
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def trail_stop_order(symbol, quantity, trail_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "trailing_stop", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"trail_price" : trail_price,
"time_in_force" : tif # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def bracket_order(symbol, quantity, tp_limit_price, sl_stop_price, sl_limit_price, side="buy", tif="day"):
order_url = endpoint + "/v2/orders"
params = {"symbol" : symbol,
"qty" : quantity, # Number of shares to trade
"side" : side, # 'buy' or 'sell'
"type" : "market", # 'market', 'limit', 'stop', 'stop_limit', 'trailing_stop'
"time_in_force" : tif, # 'day', 'gtc', 'opg', 'cls', 'ioc' or 'fok'
"order_class" : "bracket",
"take_profit" : {"limit_price": tp_limit_price},
"stop_loss" : {"stop_price" : sl_stop_price,
"limit_price": sl_limit_price
}
}
r = requests.post(order_url, headers=headers, json=params)
return r.json()
def order_list(status="open", limit=50):
"""
Retrieves a list of orders for the account, filtered by the supplied query parameters.
"""
order_list_url = endpoint + "/v2/orders"
params = {"status": status}
r = requests.get(order_list_url, headers=headers, params=params)
data = r.json()
return pd.DataFrame(data)
def order_cancel(order_id=""):
if len(order_id) > 1:
# Cancel specific order
order_cancel_url = endpoint + "/v2/orders/{}".format(order_id)
else:
# Cancel all order
order_cancel_url = endpoint + "/v2/orders"
r = requests.delete(order_cancel_url, headers=headers)
return r.json()
# order_df = order_list()
# order_cancel(order_df[order_df["symbol"]=="CSCO"]["id"].to_list()[0])
def order_replace(order_id, params):
order_cancel_url = endpoint + "/v2/orders/{}".format(order_id)
r = requests.patch(order_cancel_url, headers=headers, json=params)
return r.json()
# order_replace(order_df[order_df["symbol"]=="CSCO"]["id"].to_list()[0],
# {"qty": 10, "trail": 3}) | StarcoderdataPython |
3218919 | # Generated by Django 3.2.7 on 2021-10-08 02:06
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('cryptkeeper', '0002_auto_20211008_0156'),
]
operations = [
migrations.CreateModel(
name='Transaction',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('transaction_type', models.CharField(choices=[('Buy', 'Buy'), ('Sell', 'Sell')], max_length=4)),
('asset_symbol', models.CharField(max_length=50)),
('usd_price_of_asset', models.DecimalField(decimal_places=8, max_digits=19)),
('datetime_of_transaction', models.DateTimeField()),
('quantity_of_transaction', models.DecimalField(decimal_places=8, max_digits=19)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
migrations.RemoveField(
model_name='selltransaction',
name='user',
),
migrations.DeleteModel(
name='BuyTransaction',
),
migrations.DeleteModel(
name='SellTransaction',
),
]
| StarcoderdataPython |
32581 | <gh_stars>0
import numpy as np
from collections import deque
QLEN = 8
class Line(object):
""" from #2.Tips and Tricks for the Project """
def __init__(self, yp=None, xp=None):
self.ym_per_pix = yp
self.xm_per_pix = xp
# self.frame_shape = fs
# was the line detected in the last iteration?
self.detected = False
# --- self.left_fitx & self.right_fitx
# x values of the last n fits of the line
# self.recent_xfitted = []
self.recent_xfitted = deque(maxlen=QLEN)
#average x values of the fitted line over the last n iterations
self.bestx = None
# --- using polyfit self.lfit & self.rfit
#polynomial coefficients averaged over the last n iterations
self.best_fit = None
self.best_fitQ = deque(maxlen=QLEN)
#difference in fit coefficients between last and new fits
self.diffs = np.array([0,0,0], dtype='float')
self.diffs_prev = np.array([0,0,0], dtype='float')
# --- values for a given frame, may not be used ----
# --- polyfit self.lfit & self.rfit
#polynomial coefficients for the most recent fit
self.current_fit = [np.array([False])]
#radius of curvature of the line in some units
self.radius_of_curvature = None # m
#distance in meters of line from edge
self.line_base_pos = None
# --- leftx ?
#x values for detected line pixels
self.allx = None
# --- lefty ?
#y values for detected line pixels
self.ally = None
self.line_fitx = None
# ---
def add_all_pixels(self, ax, ay, warp_shape):
self.allx = ax
self.ally = ay
# print("add_all_pixels-shape:{}".format(warp_shape))
if ((len(self.ally) == 0) or (len(self.allx) == 0)):
self.detected = False
else:
self._fit_line_polynomial(warp_shape)
def use_starting_values(self):
""" Starting values are used for the first frame and to realign the values
when a detected position goes off track
"""
self.detected = True
self.recent_xfitted.append(self.line_fitx)
self.bestx = self.line_fitx
self.best_fit = self.current_fit
self.best_fitQ.append(self.current_fit)
# print("use_starting_values-best_fit:{}".format(self.best_fit))
def use_staged_values(self):
""" Staged values are typically used for most frames. It takes the 'temporary'
values calculated by _fit_line_polynomial() and updates the line deque's and the
averaged values.
"""
# self.detected = True
# self.detected = True
self.recent_xfitted.append(self.line_fitx)
if (len(self.recent_xfitted)> QLEN):
self.recent_xfitted.popleft()
# self.bestx = self.line_fitx
self.bestx = np.mean(self.recent_xfitted, axis=0)
self.best_fitQ.append(self.current_fit)
if (len(self.best_fitQ)> QLEN):
self.best_fitQ.popleft()
self.best_fit = np.mean(self.best_fitQ, axis=0)
# print("\n{:.2f}:current_fit:{}".format(self.line_base_pos, self.current_fit))
# print("{:.2f}:best_fit:{}".format(self.line_base_pos, self.best_fit))
a = self.best_fitQ[0]
b = self.best_fitQ[-1]
self.diff = np.polysub(a, b)
self.diffs_prev = np.polysub(self.best_fitQ[-2], self.current_fit)
# print("{:.2f}:diff:{}".format(self.line_base_pos, self.diff))
# print("len:{}".format(len(self.best_fitQ)))
def discard_staged_values(self):
self.detected = False
def _fit_line_polynomial(self, frame_shape):
""" from lesson 9.4
Combined the polyfit(), lines, curves and other calculations into this
single method as all the necessary data was right here.
"""
# --- coefficients of line
line_fit = np.polyfit(self.ally, self.allx, 2)
_ploty = np.linspace(0, frame_shape[0]-1, frame_shape[0])
try:
# x points of line
line_fitx = line_fit[0]*_ploty**2 + line_fit[1]*_ploty + line_fit[2]
x_intercept = line_fit[0]*frame_shape[0]**2 + line_fit[1]*frame_shape[0] + line_fit[2]
except TypeError:
line_fitx = 1*_ploty**2 + 1*_ploty
x_intercept = 0
# --- curvature recalculate to convert from pixels to meters--
y_eval = np.max(_ploty)*self.ym_per_pix # convert from p to m
line_fit_m = np.polyfit((self.ally*self.ym_per_pix), (self.allx*self.xm_per_pix), 2) # convert from p to m
radius_curve = (np.sqrt (np.power((1+((2*line_fit_m[0]*y_eval)+(line_fit_m[1] ))**2), 3))) / abs(2*((line_fit_m[0])))
self.line_base_pos = x_intercept * self.xm_per_pix # np.max(line_fitx) # abs(((frame_shape[1]/2)-(np.max(line_fitx)))) * self.xm_per_pix
self.current_fit = line_fit
self.radius_of_curvature = radius_curve # (radius_curve * self.ym_per_pix)
self.line_fitx = line_fitx
| StarcoderdataPython |
3336061 | import pytest
from distributed.protocol import serialize, deserialize
import pandas as pd
import numpy as np
@pytest.mark.parametrize("collection", [tuple, dict, list])
@pytest.mark.parametrize(
"y,y_serializer",
[
(np.arange(50), "dask"),
(pd.DataFrame({"C": ["a", "b", None], "D": [2.5, 3.5, 4.5]}), "pickle"),
(None, "pickle"),
],
)
def test_serialize_collection(collection, y, y_serializer):
x = np.arange(100)
if issubclass(collection, dict):
header, frames = serialize({"x": x, "y": y}, serializers=("dask", "pickle"))
else:
header, frames = serialize(collection((x, y)), serializers=("dask", "pickle"))
t = deserialize(header, frames, deserializers=("dask", "pickle", "error"))
assert isinstance(t, collection)
assert header["is-collection"] is True
sub_headers = header["sub-headers"]
if collection is not dict:
assert sub_headers[0]["serializer"] == "dask"
assert sub_headers[1]["serializer"] == y_serializer
if collection is dict:
assert (t["x"] == x).all()
assert str(t["y"]) == str(y)
else:
assert (t[0] == x).all()
assert str(t[1]) == str(y)
def test_large_collections_serialize_simply():
header, frames = serialize(tuple(range(1000)))
assert len(frames) == 1
def test_nested_types():
x = np.ones(5)
header, frames = serialize([[[x]]])
assert "dask" in str(header)
assert len(frames) == 1
assert x.data in frames
| StarcoderdataPython |
3381642 |
from django.apps import AppConfig
default_app_config = 'leonardo_hijack.Config'
LEONARDO_ORDERING = -500
LEONARDO_APPS = [
'leonardo_hijack',
"hijack",
"hijack_admin",
"compat"
]
# LEONARDO_MIDDLEWARES = [
# 'django.contrib.auth.middleware.AuthenticationMiddleware',
# 'hijack.middleware.HijackRemoteUserMiddleware',
# 'django.contrib.auth.middleware.RemoteUserMiddleware',
# ]
LEONARDO_CSS_FILES = [
'hijack/leonardo-hijack-styles.css'
]
class Config(AppConfig):
name = 'leonardo_hijack'
verbose_name = "leonardo-hijack"
| StarcoderdataPython |
3265870 | <gh_stars>0
import os
import sys
from math import exp
from PIL import Image
sys.path.append("../ocrd_typegroups_classifier")
from ocrd_typegroups_classifier.typegroups_classifier import TypegroupsClassifier
if len(sys.argv)!=2:
print('Syntax: python3 %s input-textline.jpg' % sys.argv[0])
img = Image.open(input())
tgc = TypegroupsClassifier.load(os.path.join('ocrd_typegroups_classifier', 'models', 'classifier.tgc'))
result = tgc.classify(img, 75, 64, False)
esum = 0
for key in result:
esum += exp(result[key])
for key in result:
result[key] = exp(result[key]) / esum
print(result)
| StarcoderdataPython |
26046 | #!/usr/bin/env python
# encoding: utf-8
# The MIT License
#
# Copyright (c) 2011 Wyss Institute at Harvard University
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# http://www.opensource.org/licenses/mit-license.php
from urllib2 import Request, urlopen, URLError, HTTPError
import sys
from os.path import basename, dirname, splitext, exists
import os
import shutil
import tarfile
def fetchFile(filename, baseurl, filemode='b', filetype='gz', filepath=None):
"""
filepath - is optional file path location to store the fetched file
"""
# create the url and the request
url = baseurl + '/' + filename
request = Request(url)
# Open the url
try:
f_url = urlopen(request)
print "downloading " + url
# Open our local file for writing
f_dest = open(filename, "w" + filemode)
# Write to our local file
f_dest.write(f_url.read())
f_dest.close()
# handle errors
except HTTPError, e:
print "HTTP Error:", e.code , url
except URLError, e:
print "URL Error:", e.reason , url
filename_out = filename
# unzip if possible
if filetype == 'gz':
# get the extracted folder name
filename_out = splitext(filename)[0]
temp = splitext(filename_out)
if temp[1] == '.tar':
filename_out = temp[0]
# open the archive
try:
f_zip= tarfile.open(filename, mode='r')
except tarfile.ReadError, e:
print "unable to read archive", e.code
print "extracting " + filename_out
try:
if filepath:
# remove existing folder
if os.path.exists(filepath + '/' + filename_out):
print "file exists"
shutil.rmtree(filepath + '/' + filename_out)
else:
print "file does not exist", filename_out
f_zip.extractall(path=filepath)
else:
# remove existing folder
if os.path.exists(filename_out):
print "file exists"
shutil.rmtree(filename_out)
else:
print "file does not exist", filename_out
f_zip.extractall()
except tarfile.ExtractError, e:
print "unable to extract archive", e.code
f_zip.close()
# remove the archive
print "removing the downloaded archive", filename
os.remove(filename)
print "done"
return filename_out
# end def
if __name__ == '__main__':
argv = sys.argv
url = argv[1]
filename = basename(url)
base_url = dirname(url)
fetchFile(filename, base_url)
| StarcoderdataPython |
4800433 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010 OpenStack LLC.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova import exception
def limited(items, req):
"""Return a slice of items according to requested offset and limit.
items - a sliceable
req - wobob.Request possibly containing offset and limit GET variables.
offset is where to start in the list, and limit is the maximum number
of items to return.
If limit is not specified, 0, or > 1000, defaults to 1000.
"""
offset = int(req.GET.get('offset', 0))
limit = int(req.GET.get('limit', 0))
if not limit:
limit = 1000
limit = min(1000, limit)
range_end = offset + limit
return items[offset:range_end]
def get_image_id_from_image_hash(image_service, context, image_hash):
"""Given an Image ID Hash, return an objectstore Image ID.
image_service - reference to objectstore compatible image service.
context - security context for image service requests.
image_hash - hash of the image ID.
"""
# FIX(sandy): This is terribly inefficient. It pulls all images
# from objectstore in order to find the match. ObjectStore
# should have a numeric counterpart to the string ID.
try:
items = image_service.detail(context)
except NotImplementedError:
items = image_service.index(context)
for image in items:
image_id = image['id']
if abs(hash(image_id)) == int(image_hash):
return image_id
raise exception.NotFound(image_hash)
| StarcoderdataPython |
91389 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This software is licensed as described in the file LICENSE, which
# you should have received as part of this distribution.
import unittest2 as unittest
import mock
import os
class CompressTest(unittest.TestCase):
def test_compress_js(self):
from couchapp.config import Config
config = Config()
config.conf['compress'] = {'js': {'foo':['shows/example-show.js']}}
with mock.patch('couchapp.hooks.compress.default.compress', return_value='foo') as mock_compress:
from couchapp.hooks.compress import Compress
compress = Compress(os.path.join(os.path.dirname(__file__), 'testapp'))
compress.conf = config
with mock.patch('couchapp.util.write'):
compress.run()
self.assertTrue(mock_compress.called, 'Default compressor has been called')
def test_our_jsmin_loading(self):
orig_import = __import__
def import_mock(name, *args):
if name == 'jsmin':
raise ImportError()
return orig_import(name, *args)
with mock.patch('__builtin__.__import__', side_effect=import_mock):
with mock.patch('couchapp.hooks.compress.jsmin.jsmin', return_value='foo'):
from couchapp.hooks.compress import default
result = default.compress('bar')
self.assertEqual(result, 'foo', 'Our module is called when it is not installed in the system')
def test_system_jsmin_loading(self):
orig_import = __import__
def import_mock(name, *args):
if name == 'couchapp.hooks.compress.jsmin':
raise ImportError()
return orig_import(name, *args)
with mock.patch('__builtin__.__import__', side_effect=import_mock):
with mock.patch('jsmin.jsmin', return_value='foo'):
from couchapp.hooks.compress import default
result = default.compress('bar')
self.assertEqual(result, 'foo', 'The system module is called when it is installed')
if __name__ == "__main__":
# import sys;sys.argv = ['', 'Test.testName']
unittest.main()
| StarcoderdataPython |
1720358 | from base import Base
from sqlalchemy import Column, Integer, String, Boolean
from sqlalchemy.orm import relationship
class Run(Base):
__tablename__ = 'run'
id = Column(Integer, primary_key=True)
# One-to-Many relationship to Samples contained in Run
samples = relationship("Sample", cascade="delete")
# sequencer type: e.g. 454, MiSeq, HiSeq
type = Column(String)
# collection of index tags used
# TODO load midsets into DB?
mid_set = Column(String)
# Historically, we've referred to all our 454 sequencing as PlateX-Y, where:
# X is our sequencing plate (numbered from 1 as we've submitted plates)
# Y is the region on the 454 sequencing plate; a plate contains a minimum of 2 regions
# TODO - needs to be more generic, perhaps "Run Alias"
plate = Column(String)
sequencing_notes = Column(String)
def __repr__(self):
return '<Run: %r>' % (self.plate)
| StarcoderdataPython |
1685926 | """
Diagnostic Maps.
Diagnostic to produce images of a map with coastlines from a cube.
These plost show latitude vs longitude and the cube value is used as the colour
scale.
Note that this diagnostic assumes that the preprocessors do the bulk of the
hard work, and that the cube received by this diagnostic (via the settings.yml
and metadata.yml files) has no time component, a small number of depth layers,
and a latitude and longitude coordinates.
An approproate preprocessor for a 3D+time field would be:
preprocessors:
prep_map:
extract_levels:
levels: [100., ]
scheme: linear_extrap
time_average:
This tool is part of the ocean diagnostic tools package in the ESMValTool.
Author: <NAME> (PML)
<EMAIL>
"""
import logging
import os
import sys
from itertools import product
import cartopy
import iris
import iris.quickplot as qplt
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import diagnostic_tools as diagtools
from esmvaltool.diag_scripts.shared import run_diagnostic
# This part sends debug statements to stdout
logger = logging.getLogger(os.path.basename(__file__))
logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))
def create_ice_cmap(threshold):
"""Create colour map with ocean blue below 15% and white above 15%."""
threshold = threshold / 100.
ice_cmap_dict = {'red': ((0., 0.0313, 0.0313),
(threshold, 0.0313, 1.),
(1., 1., 1.)),
'green': ((0., 0.237, 0.237),
(threshold, 0.237, 1.),
(1., 1., 1.)),
'blue': ((0., 0.456, 0.456),
(threshold, 0.456, 1.),
(1., 1., 1.))}
return matplotlib.colors.LinearSegmentedColormap('ice_cmap', ice_cmap_dict)
def calculate_area_time_series(cube, plot_type, threshold):
"""
Calculate the area of unmasked cube cells.
Requires a cube with two spacial dimensions. (no depth coordinate).
Parameters
----------
cube: iris.cube.Cube
Original data
Returns
-------
iris.cube.Cube
collapsed cube, in units of m^2
"""
data = []
times = diagtools.cube_time_to_float(cube)
for time_itr, time in enumerate(times):
icedata = cube[time_itr].data
area = iris.analysis.cartography.area_weights(cube[time_itr])
if plot_type.lower() == 'ice extent':
# Ice extend is the area with more than 15% ice cover.
icedata = np.ma.masked_where(icedata < threshold, icedata)
total_area = np.ma.masked_where(icedata.mask, area.data).sum()
if plot_type.lower() == 'ice area':
# Ice area is cover * cell area
total_area = np.sum(icedata * area)
logger.debug('Calculating time series area: %s, %s, %s,',
time_itr, time, total_area)
data.append(total_area)
######
# Create a small dummy output array
data = np.array(data)
return times, data
def make_ts_plots(
cfg,
metadata,
filename,
):
"""
Make a ice extent time series plot for an individual model.
The cfg is the opened global config,
metadata is the metadata dictionairy
filename is the preprocessing model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# # Load threshold, pole, season.
threshold = float(cfg['threshold'])
pole = get_pole(cube)
season = get_season(cube)
# Making plots for each layer
for plot_type in ['Ice Extent', 'Ice Area']:
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
layer = str(layer)
times, data = calculate_area_time_series(cube_layer,
plot_type,
threshold)
plt.plot(times, data)
# Add title to plot
title = ' '.join([metadata['dataset'], pole, 'hemisphere',
season, plot_type])
if layer:
title = ' '.join(
[title, '(', layer,
str(cube_layer.coords('depth')[0].units), ')'])
plt.title(title)
# y axis label:
plt.ylabel(' '.join([plot_type, 'm^2']))
# Determine image filename:
suffix = '_'.join(['ts', metadata['preprocessor'], season, pole,
plot_type, str(layer_index)])\
+ image_extention
suffix = suffix.replace(' ', '')
if multi_model:
path = diagtools.folder(
cfg['plot_dir']) + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def make_polar_map(
cube,
pole='North',
cmap='Blues_r',
):
"""
Make a polar map plot.
The cube is the opened cube (two dimensional),
pole is the polar region (North/South)
cmap is the colourmap,
"""
fig = plt.figure()
fig.set_size_inches(7, 7)
# ####
# Set limits, based on https://nedbatchelder.com/blog/200806/pylint.html
if pole not in ['North', 'South']:
logger.fatal('make_polar_map: hemisphere not provided.')
if pole == 'North': # North Hemisphere
ax1 = plt.subplot(111, projection=cartopy.crs.NorthPolarStereo())
ax1.set_extent([-180, 180, 50, 90], cartopy.crs.PlateCarree())
if pole == 'South': # South Hemisphere
ax1 = plt.subplot(111, projection=cartopy.crs.SouthPolarStereo())
ax1.set_extent([-180, 180, -90, -50], cartopy.crs.PlateCarree())
linrange = np.linspace(0., 100., 21.)
qplt.contourf(cube,
linrange,
cmap=cmap,
linewidth=0,
rasterized=True)
plt.tight_layout()
ax1.add_feature(cartopy.feature.LAND,
zorder=10,
facecolor=[0.8, 0.8, 0.8], )
ax1.gridlines(linewidth=0.5,
color='black',
zorder=20,
alpha=0.5,
linestyle='--')
try:
plt.gca().coastlines()
except AttributeError:
logger.warning('make_polar_map: Not able to add coastlines')
return fig, ax1
def get_pole(cube):
"""Return a hemisphere name as a string (Either North or South)."""
margin = 5.
if np.max(cube.coord('latitude').points) < 0. + margin:
return 'South'
if np.min(cube.coord('latitude').points) > 0. - margin:
return 'North'
logger.fatal('get_pole: Not able to determine hemisphere.')
return False
def get_time_string(cube):
"""Return a climatological season string in the format: "year season"."""
season = cube.coord('clim_season').points
year = cube.coord('year').points
return str(int(year[0])) + ' ' + season[0].upper()
def get_year(cube):
"""Return the cube year as a string."""
year = cube.coord('year').points
return str(int(year))
def get_season(cube):
"""Return a climatological season time string."""
season = cube.coord('clim_season').points
return season[0].upper()
def make_map_plots(
cfg,
metadata,
filename,
):
"""
Make a simple map plot for an individual model.
The cfg is the opened global config,
metadata is the metadata dictionairy
filename is the preprocessing model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention and threshold.
image_extention = diagtools.get_image_format(cfg)
threshold = float(cfg['threshold'])
# Making plots for each layer
plot_types = ['Fractional cover', 'Ice Extent']
plot_times = [0, -1]
for plot_type, plot_time in product(plot_types, plot_times):
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
layer = str(layer)
if plot_type == 'Fractional cover':
cmap = 'Blues_r'
if plot_type == 'Ice Extent':
cmap = create_ice_cmap(threshold)
cube = cube_layer[plot_time]
# use cube to determine which hemisphere, season and year.
pole = get_pole(cube)
time_str = get_time_string(cube)
# Make the polar map.
fig, ax1 = make_polar_map(cube,
pole=pole,
cmap=cmap)
# Add title to plot
title = ' '.join([metadata['dataset'], plot_type, time_str])
if layer:
title = ' '.join([title, '(', layer,
str(cube_layer.coords('depth')[0].units),
')'])
plt.title(title)
# Determine image filename:
suffix = '_'.join(['ortho_map', plot_type, time_str,
str(layer_index)])
suffix = suffix.replace(' ', '') + image_extention
if multi_model:
path = diagtools.folder(cfg['plot_dir'])
path = path + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def agregate_by_season(cube):
"""
Aggregate the cube into seasonal means.
Note that it is not currently possible to do this in the preprocessor,
as the seasonal mean changes the cube units.
"""
if not cube.coords('clim_season'):
iris.coord_categorisation.add_season(cube,
'time',
name='clim_season')
if not cube.coords('season_year'):
iris.coord_categorisation.add_season_year(cube,
'time',
name='season_year')
return cube.aggregated_by(['clim_season', 'season_year'],
iris.analysis.MEAN)
def make_map_extent_plots(
cfg,
metadata,
filename,
):
"""
Make an extent map plot showing several times for an individual model.
The cfg is the opened global config,
metadata is the metadata dictionairy
filename is the preprocessing model file.
"""
# Load cube and set up units
cube = iris.load_cube(filename)
cube = diagtools.bgc_units(cube, metadata['short_name'])
cube = agregate_by_season(cube)
# Is this data is a multi-model dataset?
multi_model = metadata['dataset'].find('MultiModel') > -1
# Make a dict of cubes for each layer.
cubes = diagtools.make_cube_layer_dict(cube)
# Load image format extention
image_extention = diagtools.get_image_format(cfg)
# Load threshold, pole and season
threshold = float(cfg['threshold'])
pole = get_pole(cube)
season = get_season(cube)
# Start making figure
for layer_index, (layer, cube_layer) in enumerate(cubes.items()):
fig = plt.figure()
fig.set_size_inches(7, 7)
if pole == 'North': # North Hemisphere
projection = cartopy.crs.NorthPolarStereo()
ax1 = plt.subplot(111, projection=projection)
ax1.set_extent([-180, 180, 50, 90], cartopy.crs.PlateCarree())
if pole == 'South': # South Hemisphere
projection = cartopy.crs.SouthPolarStereo()
ax1 = plt.subplot(111, projection=projection)
ax1.set_extent([-180, 180, -90, -50], cartopy.crs.PlateCarree())
ax1.add_feature(cartopy.feature.LAND,
zorder=10,
facecolor=[0.8, 0.8, 0.8])
ax1.gridlines(linewidth=0.5,
color='black',
zorder=20,
alpha=0.5,
linestyle='--')
try:
plt.gca().coastlines()
except AttributeError:
logger.warning('make_polar_map: Not able to add coastlines')
times = np.array(cube.coord('time').points.astype(float))
plot_desc = {}
for time_itr, time in enumerate(times):
cube = cube_layer[time_itr]
line_width = 1
color = plt.cm.jet(float(time_itr) / float(len(times)))
label = get_year(cube)
plot_desc[time] = {'label': label,
'c': [color, ],
'lw': [line_width, ],
'ls': ['-', ]}
layer = str(layer)
qplt.contour(cube,
[threshold, ],
colors=plot_desc[time]['c'],
linewidths=plot_desc[time]['lw'],
linestyles=plot_desc[time]['ls'],
rasterized=True)
# Add legend
legend_size = len(plot_desc.keys()) + 1
ncols = int(legend_size / 25) + 1
ax1.set_position([ax1.get_position().x0,
ax1.get_position().y0,
ax1.get_position().width * (1. - 0.1 * ncols),
ax1.get_position().height])
fig.set_size_inches(7 + ncols * 1.2, 7)
# Construct dummy plots.
for i in sorted(plot_desc.keys()):
plt.plot([], [],
c=plot_desc[i]['c'][0],
lw=plot_desc[i]['lw'][0],
ls=plot_desc[i]['ls'][0],
label=plot_desc[i]['label'],)
legd = ax1.legend(loc='center left',
ncol=ncols,
prop={'size': 10},
bbox_to_anchor=(1., 0.5))
legd.draw_frame(False)
legd.get_frame().set_alpha(0.)
# Add title to plot
title = ' '.join([metadata['dataset'], ])
if layer:
title = ' '.join([title, '(', layer,
str(cube_layer.coords('depth')[0].units), ')'])
plt.title(title)
# Determine image filename:
suffix = '_'.join(['ortho_map', pole, season, str(layer_index)])
suffix = suffix.replace(' ', '') + image_extention
if multi_model:
path = diagtools.folder(cfg['plot_dir'])
path = path + os.path.basename(filename)
path = path.replace('.nc', suffix)
else:
path = diagtools.get_image_path(
cfg,
metadata,
suffix=suffix,
)
# Saving files:
if cfg['write_plots']:
logger.info('Saving plots to %s', path)
plt.savefig(path)
plt.close()
def main(cfg):
"""
Load the config file, and send it to the plot maker.
The cfg is the opened global config.
"""
for index, metadata_filename in enumerate(cfg['input_files']):
logger.info(
'metadata filename:\t%s',
metadata_filename,
)
metadatas = diagtools.get_input_files(cfg, index=index)
for filename in sorted(metadatas.keys()):
logger.info('-----------------')
logger.info(
'model filenames:\t%s',
filename,
)
######
# extent maps plots of individual models
make_map_extent_plots(cfg, metadatas[filename], filename)
######
# maps plots of individual models
make_map_plots(cfg, metadatas[filename], filename)
######
# time series plots o
make_ts_plots(cfg, metadatas[filename], filename)
logger.info('Success')
if __name__ == '__main__':
with run_diagnostic() as config:
main(config)
| StarcoderdataPython |
1734744 | <gh_stars>10-100
from __future__ import absolute_import
import sys
try:
__PKG_SETUP__
except NameError:
__PKG_SETUP__ = False
if __PKG_SETUP__:
sys.stderr.write('Running from source directory.\n')
else:
from . import alg
from . import bench
from . import distutils
from . import plotter
__all__ = []
__all__.extend(['alg'])
__all__.extend(['bench'])
__all__.extend(['distutils'])
__all__.extend(['plotter'])
| StarcoderdataPython |
193354 | #!/usr/bin/env python3
import time
from matrix_client.client import MatrixClient
from matrix_client.api import MatrixRequestError
from requests.exceptions import ConnectionError, Timeout
import argparse
import random
from configparser import ConfigParser
import re
import traceback
import urllib.parse
import logging
import os
import sys
import signal
import queue
import codecs
from database import MarkovDatabaseBrain
COMMANDS = [
'!rate'
]
def sigterm_handler(_signo, _stack_frame):
"""Raises SystemExit(0), causing everything to cleanly shut down."""
sys.exit(0)
class ConfigParser(ConfigParser):
# allow case-sensitive option names
# needed for saving per-room response rates
optionxform = str
class Backend(object):
"""Interface for chat backends."""
def __init__(self, brain_file):
pass
def train_file(self, filename):
"""Trains the chat backend on the given file."""
with codecs.open(filename, encoding='utf8') as train_file:
for line in train_file:
self.learn(line)
def learn(self, line):
"""Updates the chat backend based on the given line of input."""
pass
def save(self):
"""Saves the backend to disk, if needed."""
pass
def reply(self, message):
"""Generates a reply to the given message."""
return "(dummy response)"
class MarkovBackend(Backend):
"""Chat backend using markov chains."""
def __init__(self, brain_file):
self.brain = MarkovDatabaseBrain(brain_file)
def sanitize(self, word):
"""Removes any awkward whitespace characters from the given word.
Removes '\n', '\r', and '\\u2028' (unicode newline character)."""
return word.replace('\n', '').replace('\r', '').replace('\u2028', '')
def train_file(self, filename):
with codecs.open(filename, encoding='utf8') as train_file:
for line in train_file:
self.learn(line)
self.save()
def learn(self, line):
line = line.strip()
words = line.split(' ')
words = [self.sanitize(word) for word in words]
for i in range(len(words) - 2):
prefix = words[i], words[i + 1]
follow = words[i + 2]
self.brain.add(prefix, follow)
def save(self):
self.brain.save()
def get_random_next_link(self, word1, word2):
"""Gives a word that could come after the two provided.
Words that follow the two given words are weighted by how frequently
they appear after them.
"""
possibilities = self.brain.get_followers((word1, word2))
if not possibilities:
return None
total = 0
for p in possibilities:
total += possibilities[p]
num = random.randint(1, total)
total = 0
for p in possibilities:
total += possibilities[p]
if total >= num:
break
return p
def reply(self, message):
if self.brain.is_empty():
return ''
seed = None
# try to seed reply from the message
possible_seed_words = message.split()
while seed is None and possible_seed_words:
message_word = random.choice(possible_seed_words)
seeds = list(self.brain.get_pairs_containing_word_ignoring_case(
message_word))
if seeds:
seed = random.choice(seeds)
else:
possible_seed_words.remove(message_word)
# we couldn't seed the reply from the input
# fall back to random seed
if seed is None:
seed = self.brain.get_three_random_words()
words = list(seed)
while self.brain.contains_pair((words[-2], words[-1])) and \
len(words) < 100:
word = self.get_random_next_link(words[-2], words[-1])
words.append(word)
return ' '.join(words)
class Config(object):
def __init__(self, cfgparser):
self.backend = cfgparser.get('General', 'backend')
self.display_name = cfgparser.get('General', 'display name')
self.learning = cfgparser.getboolean('General', 'learning')
self.username = cfgparser.get('Login', 'username')
self.password = cfgparser.get('Login', 'password')
self.server = cfgparser.get('Login', 'server')
self.default_response_rate = cfgparser.getfloat(
'General', 'default response rate')
self.response_rates = {}
for room_id, rate in cfgparser.items('Response Rates'):
room_id = room_id.replace('-colon-', ':')
self.response_rates[room_id] = float(rate)
def get_response_rate(self, room_id):
"""Returns our response rate for the room with the given room id."""
if room_id in self.response_rates:
return self.response_rates[room_id]
else:
return self.default_response_rate
def write(self):
"""Writes this config back to the file, with any changes reflected."""
cfgparser = ConfigParser()
cfgparser.add_section('General')
cfgparser.set('General', 'default response rate',
str(self.default_response_rate))
cfgparser.set('General', 'backend', self.backend)
cfgparser.set('General', 'display name', self.display_name)
cfgparser.set('General', 'learning', str(self.learning))
cfgparser.add_section('Login')
cfgparser.set('Login', 'username', self.username)
cfgparser.set('Login', 'password', self.password)
cfgparser.set('Login', 'server', self.server)
cfgparser.add_section('Response Rates')
for room_id, rate in list(self.response_rates.items()):
# censor colons because they are a configparser special
# character
room_id = room_id.replace(':', '-colon-')
cfgparser.set('Response Rates', room_id, str(rate))
with open('config.cfg', 'wt') as configfile:
cfgparser.write(configfile)
def get_default_configparser():
"""Returns a ConfigParser object for the default config file."""
config = ConfigParser(allow_no_value=True)
config.add_section('General')
config.set('General', 'default response rate', "0.10")
config.set('General', 'backend', 'markov')
config.set('General', 'display name', 'Markov')
config.set('General', 'learning', 'on')
config.add_section('Login')
config.set('Login', 'username', 'username')
config.set('Login', 'password', 'password')
config.set('Login', 'server', 'http://matrix.org')
config.add_section('Response Rates')
return config
class Bot(object):
"""Handles everything that the bot does."""
def __init__(self, config, chat_backend):
self.config = config
self.client = None
self.chat_backend = chat_backend
self.event_queue = queue.Queue()
self.invite_queue = queue.Queue()
def login(self):
"""Logs onto the server."""
client = MatrixClient(self.config.server)
client.login_with_password_no_sync(
self.config.username, self.config.password)
self.client = client
def get_room(self, event):
"""Returns the room the given event took place in."""
return self.client.rooms[event['room_id']]
def handle_command(self, event, command, args):
"""Handles the given command, possibly sending a reply to it."""
command = command.lower()
if command == '!rate':
if args:
num = re.match(r'[0-9]*(\.[0-9]+)?(%|)', args[0]).group()
if not num:
self.reply(event, "Error: Could not parse number.")
return
if num[-1] == '%':
rate = float(num[:-1]) / 100
else:
rate = float(num)
self.config.response_rates[event['room_id']] = rate
self.reply(event, "Response rate set to %f." % rate)
else:
rate = self.config.get_response_rate(event['room_id'])
self.reply(
event, "Response rate set to %f in this room." % rate)
def reply(self, event, message):
"""Replies to the given event with the provided message."""
room = self.get_room(event)
logging.info("Reply: %s" % message)
room.send_notice(message)
def is_name_in_message(self, message):
"""Returns whether the message contains the bot's name.
Considers both display name and username.
"""
regex = "({}|{})".format(
self.config.display_name, self.config.username)
return re.search(regex, message, flags=re.IGNORECASE)
def handle_invite(self, room_id, invite_state):
# join rooms if invited
try:
self.client.join_room(room_id)
logging.info('Joined room: %s' % room_id)
except MatrixRequestError as e:
if e.code == 404:
# room was deleted after invite or something; ignore it
logging.info('invited to nonexistent room {}'.format(room_id))
elif e.code in range(500, 600):
# synapse v0.99.1 500s if it cannot locate a room sometimes
# (when there are federation issues)
logging.warning('got 500 trying to join room we were invited to')
else:
raise(e)
def handle_event(self, event):
"""Handles the given event.
Joins a room if invited, learns from messages, and possibly responds to
messages.
"""
if event['type'] == 'm.room.message':
# only care about text messages by other people
if event['sender'] != self.client.user_id and \
event['content']['msgtype'] == 'm.text':
message = str(event['content']['body'])
# lowercase message so we can search it
# case-insensitively
logging.info("Handling message: %s" % message)
command_found = False
for command in COMMANDS:
match = re.search(command, message, flags=re.IGNORECASE)
if match and (match.start() == 0 or
self.is_name_in_message(message)):
command_found = True
args = message[match.start():].split(' ')
self.handle_command(event, args[0], args[1:])
break
if not command_found:
room = self.get_room(event)
response_rate = self.config.get_response_rate(room.room_id)
if self.is_name_in_message(message) or \
random.random() < response_rate:
# remove name from message and respond to it
message_no_name = re.sub(
' *' + re.escape(self.get_display_name()) + ' *',
' ', message, flags=re.IGNORECASE)
response = self.chat_backend.reply(message_no_name)
self.reply(event, response)
if self.config.learning:
self.chat_backend.learn(message)
self.send_read_receipt(event)
def set_display_name(self, display_name):
"""Sets the bot's display name on the server."""
self.client.api.set_display_name(self.client.user_id, display_name)
def get_display_name(self):
"""Gets the bot's display name from the server."""
return self.client.api.get_display_name(self.client.user_id)
def run(self):
"""Indefinitely listens for messages and handles all that come."""
current_display_name = self.get_display_name()
if current_display_name != self.config.display_name:
self.set_display_name(self.config.display_name)
last_save = time.time()
# listen for invites, including initial sync invites
self.client.add_invite_listener(
lambda room_id, state: self.invite_queue.put((room_id, state)))
# get rid of initial event sync
logging.info("initial event stream")
self.client.listen_for_events()
# listen to events and add them all to the event queue
# for handling in this thread
self.client.add_listener(self.event_queue.put)
def exception_handler(e):
if isinstance(e, Timeout):
logging.warning("listener thread timed out.")
logging.error("exception in listener thread:")
traceback.print_exc()
# start listen thread
logging.info("starting listener thread")
self.client.start_listener_thread(exception_handler=exception_handler)
try:
while True:
time.sleep(1)
# handle any queued events
while not self.event_queue.empty():
event = self.event_queue.get_nowait()
self.handle_event(event)
while not self.invite_queue.empty():
room_id, invite_state = self.invite_queue.get_nowait()
self.handle_invite(room_id, invite_state)
# save every 10 minutes or so
if time.time() - last_save > 60 * 10:
self.chat_backend.save()
last_save = time.time()
finally:
logging.info("stopping listener thread")
self.client.stop_listener_thread()
def send_read_receipt(self, event):
"""Sends a read receipt for the given event."""
if "room_id" in event and "event_id" in event:
room_id = urllib.parse.quote(event['room_id'])
event_id = urllib.parse.quote(event['event_id'])
self.client.api._send("POST", "/rooms/" + room_id +
"/receipt/m.read/" + event_id,
api_path="/_matrix/client/r0")
def train(backend, train_file):
"""Trains the given chat backend on the given train_file & saves it."""
print("Training...")
backend.train_file(train_file)
print("Training complete!")
backend.save()
def main():
argparser = argparse.ArgumentParser(
description="A chatbot for Matrix (matrix.org)")
argparser.add_argument("--debug",
help="Print out way more things.",
action="store_true")
argparser.add_argument("--train", metavar="train.txt", type=str,
help="Train the bot with a file of text.")
argparser.add_argument("--config", metavar="config.cfg", type=str,
help="Bot's config file (must be read-writable)")
argparser.add_argument("--brain", metavar="brain.db", type=str,
help="Bot's brain file (must be read-writable)")
args = vars(argparser.parse_args())
debug = args['debug']
# suppress logs of libraries
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
log_level = logging.DEBUG if debug else logging.INFO
logging.basicConfig(level=log_level,
format='%(asctime)s %(name)s '
'%(levelname)s %(message)s')
train_path = args['train']
config_path = args['config'] if args['config'] \
else os.getenv('MATRIX_CHATBOT_CONFIG', 'config.cfg')
brain_path = args['brain'] if args['brain'] \
else os.getenv('MATRIX_CHATBOT_BRAIN', 'brain.db')
cfgparser = ConfigParser()
success = cfgparser.read(config_path)
if not success:
cfgparser = get_default_configparser()
with open(config_path, 'wt') as configfile:
cfgparser.write(configfile)
print("A config has been generated. "
"Please set your bot's username, password, and homeserver "
"in " + config_path + " then run this again.")
return
config = Config(cfgparser)
backends = {'markov': MarkovBackend}
backend = backends[config.backend](brain_path)
logging.info("loading brain")
if train_path:
train(backend, train_path)
else:
signal.signal(signal.SIGTERM, sigterm_handler)
while True:
try:
bot = Bot(config, backend)
bot.login()
bot.run()
except (MatrixRequestError, ConnectionError):
traceback.print_exc()
logging.warning("disconnected. Waiting a minute to see if"
" the problem resolves itself...")
time.sleep(60)
finally:
backend.save()
logging.info('Saving config...')
config.write()
if __name__ == '__main__':
main()
| StarcoderdataPython |
1677554 | <filename>InvenTree/barcodes/plugins/inventree_barcode.py<gh_stars>1-10
"""
The InvenTreeBarcodePlugin validates barcodes generated by InvenTree itself.
It can be used as a template for developing third-party barcode plugins.
The data format is very simple, and maps directly to database objects,
via the "id" parameter.
Parsing an InvenTree barcode simply involves validating that the
references model objects actually exist in the database.
"""
# -*- coding: utf-8 -*-
import json
from barcodes.barcode import BarcodePlugin
from stock.models import StockItem, StockLocation
from part.models import Part
from rest_framework.exceptions import ValidationError
class InvenTreeBarcodePlugin(BarcodePlugin):
PLUGIN_NAME = "InvenTreeBarcode"
def validate(self):
"""
An "InvenTree" barcode must be a jsonnable-dict with the following tags:
{
'tool': 'InvenTree',
'version': <anything>
}
"""
# The data must either be dict or be able to dictified
if type(self.data) is dict:
pass
elif type(self.data) is str:
try:
self.data = json.loads(self.data)
if type(self.data) is not dict:
return False
except json.JSONDecodeError:
return False
else:
return False # pragma: no cover
# If any of the following keys are in the JSON data,
# let's go ahead and assume that the code is a valid InvenTree one...
for key in ['tool', 'version', 'InvenTree', 'stockitem', 'stocklocation', 'part']:
if key in self.data.keys():
return True
return True
def getStockItem(self):
for k in self.data.keys():
if k.lower() == 'stockitem':
data = self.data[k]
pk = None
# Initially try casting to an integer
try:
pk = int(data)
except (TypeError, ValueError): # pragma: no cover
pk = None
if pk is None: # pragma: no cover
try:
pk = self.data[k]['id']
except (AttributeError, KeyError):
raise ValidationError({k: "id parameter not supplied"})
try:
item = StockItem.objects.get(pk=pk)
return item
except (ValueError, StockItem.DoesNotExist): # pragma: no cover
raise ValidationError({k, "Stock item does not exist"})
return None
def getStockLocation(self):
for k in self.data.keys():
if k.lower() == 'stocklocation':
pk = None
# First try simple integer lookup
try:
pk = int(self.data[k])
except (TypeError, ValueError): # pragma: no cover
pk = None
if pk is None: # pragma: no cover
# Lookup by 'id' field
try:
pk = self.data[k]['id']
except (AttributeError, KeyError):
raise ValidationError({k: "id parameter not supplied"})
try:
loc = StockLocation.objects.get(pk=pk)
return loc
except (ValueError, StockLocation.DoesNotExist): # pragma: no cover
raise ValidationError({k, "Stock location does not exist"})
return None
def getPart(self):
for k in self.data.keys():
if k.lower() == 'part':
pk = None
# Try integer lookup first
try:
pk = int(self.data[k])
except (TypeError, ValueError): # pragma: no cover
pk = None
if pk is None: # pragma: no cover
try:
pk = self.data[k]['id']
except (AttributeError, KeyError):
raise ValidationError({k, 'id parameter not supplied'})
try:
part = Part.objects.get(pk=pk)
return part
except (ValueError, Part.DoesNotExist): # pragma: no cover
raise ValidationError({k, 'Part does not exist'})
return None
| StarcoderdataPython |
1764569 | import glob
import re
import os
import datetime
import argparse as ap
import matplotlib
# Enables saving plots over ssh
try:
os.environ['DISPLAY']
except KeyError:
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from operator import itemgetter, attrgetter, methodcaller
import numpy as np
def main():
parser = ap.ArgumentParser(description="""Plots the minimum required basis terms to achieve a
desired accuracy against an independent parameter""")
parser.add_argument('path',type=str,help="Base directory path for the sorted sweep")
parser.add_argument('-s','--show',action='store_true',default=False,help="""Show plots
interactively""")
args = parser.parse_args()
path = args.path
if not os.path.isdir(path):
print("Specified path does not exist")
quit()
fields = ['default','jones','normal','no_vec']
#radii = ['nw_radius_0.075','nw_radius_0.06']
radii = ['nw_radius_0.06']
#periods = ['array_period_0.25','array_period_0.3']
periods = ['array_period_0.25']
systems = {'array_period_0.25':['nw_radius_0.075','nw_radius_0.06'],
'array_period_0.3':['nw_radius_0.075']}
for period,radii in systems.items():
for radius in radii:
fig, axes = plt.subplots(1,1)
for field in fields:
tglob = os.path.join(path,field,period+"*",radius,'frequency_3.2962*/time_scaling.dat')
tlist = glob.glob(tglob)
if tlist:
tpath = tlist[0]
else:
print(tglob)
quit()
print(tpath)
numbasis, tdata = np.loadtxt(tpath,unpack=True,skiprows=1,delimiter=',')
axes.plot(numbasis,tdata,'-o',label=field)
axes.legend(loc='best')
axes.set_xlabel('Number of Basis Terms')
axes.set_ylabel('Wall Clock Time (seconds)')
#axes.set_title('%s, %s'%(period,radius))
mglob = os.path.join(path,field,period+"*",radius,'minimum_basis_terms_global_t0.1.dat')
mlist = glob.glob(mglob)
if mlist:
mpath = mlist[0]
else:
print(mglob)
quit()
print(mpath)
freq, mdata = np.loadtxt(mpath,unpack=True,skiprows=1,delimiter=',')
#axes.plot(freq,mdata,'-o',label=field)
#axes.legend(loc='best')
#axes.set_ylabel('Number of Basis Terms')
#axes.set_xlabel('Frequency (Hz)')
#ymin,ymax = axes.get_ylim()
#print(ymin)
#print(ymax)
#axes.set_ylim((ymin-10,ymax+30))
#axes.set_title('%s, %s, Threshold = .1'%(period,radius))
#fig.subplots_adjust(hspace=.35)
plt.savefig(os.path.join(path,'%s_%s_vec_field_convergence_comparison.png'%(period,radius)),transparent=True)
#plt.show()
#fig, axes = plt.subplots(3,2)
#counter = 0
#for period,radii in systems.items():
# for radius in radii:
# for field in fields:
#
# fig, axes = plt.subplots(3,2)
# tglob = os.path.join(path,field,period+"*",radius,'frequency_3.2962*/time_scaling.dat')
# tlist = glob.glob(tglob)
# if tlist:
# tpath = tlist[0]
# else:
# print(tglob)
# quit()
# print(tpath)
# numbasis, tdata = np.loadtxt(tpath,unpack=True,skiprows=1,delimiter=',')
# axes[counter][0].plot(numbasis,tdata,'-o',label=field)
# axes[counter][0].legend(loc='best')
# axes[counter][0].set_xlabel('Number of Basis Terms')
# axes[counter][0].set_ylabel('Wall Clock Time (seconds)')
# axes[counter][0].set_title('%s, %s'%(period,radius))
# for field in fields:
# mglob = os.path.join(path,field,period+"*",radius,'minimum_basis_terms_global_t0.05.dat')
# mlist = glob.glob(mglob)
# if mlist:
# mpath = mlist[0]
# else:
# print(mglob)
# quit()
# print(mpath)
# freq, mdata = np.loadtxt(mpath,unpack=True,skiprows=1,delimiter=',')
# axes[counter][1].plot(freq,mdata,'-o',label=field)
# axes[counter][1].legend(loc='best')
# axes[counter][1].set_ylabel('Number of Basis Terms')
# axes[counter][1].set_xlabel('Frequency (Hz)')
# axes[counter][1].set_title('%s, %s, Threshold = .05'%(period,radius))
# counter += 1
#fig.subplots_adjust(hspace=.25)
#plt.savefig(os.path.join(path,'vec_field_timescaling_comparison.pdf'))
#plt.show()
#time_glob = os.path.join(path,'**/frequency_3.2962*/**/time_scaling.dat')
#print(time_glob)
#time_files = glob.glob(time_glob,recursive=True)
#print(time_files)
#quit()
#for time_file in time_files:
# basis_terms, data = np.loadtxt(time_file,unpack=True)
# if 'normal' in time_file:
# normal = data
# elif 'no_vec' in time_file:
# no_vec = data
# elif 'default' in time_file:
# default = data
# elif 'jones' in time_file:
# jones = data
# else:
# print('Didnt find valid vec field config in path')
#quit()
#plt.figure()
#excludes = [os.path.join(path,'comp_struct')]
#print('Beginning global time scaling analysis for %s'%path)
#for root,dirs,files in os.walk(path,topdown=False):
# # If we haven't already analyzed this node in the directory tree
# if os.path.split(root)[0] not in excludes:
# # Find all error files in current directory
# if 'timing.dat' in files:
# # If we found an time file, go up one directory and perform the analysis
# base = os.path.split(root)[0]
# print('Computing time scaling for subdir %s'%base)
# analyze(base,args.show)
# # Add the level above the basis term sweep to the excludes list so we don't perform
# # the analysis for every basis term dir we find an error file in
# excludes.append(base)
if __name__ == '__main__':
main()
| StarcoderdataPython |
83164 | <reponame>haikezegwaard/msg_parser
# coding=utf-8
# autogenerated using ms_props_generator.py
DATA_TYPE_MAP = {
"0x0000": "PtypUnspecified",
"0x0001": "PtypNull",
"0x0002": "PtypInteger16",
"0x0003": "PtypInteger32",
"0x0004": "PtypFloating32",
"0x0005": "PtypFloating64",
"0x0006": "PtypCurrency",
"0x0007": "PtypFloatingTime",
"0x000A": "PtypErrorCode",
"0x000B": "PtypBoolean",
"0x000D": "PtypObject",
"0x0014": "PtypInteger64",
"0x001E": "PtypString8",
"0x001F": "PtypString",
"0x0040": "PtypTime",
"0x0048": "PtypGuid",
"0x00FB": "PtypServerId",
"0x00FD": "PtypRestriction",
"0x00FE": "PtypRuleAction",
"0x0102": "PtypBinary",
"0x1002": "PtypMultipleInteger16",
"0x1003": "PtypMultipleInteger32",
"0x1004": "PtypMultipleFloating32",
"0x1005": "PtypMultipleFloating64",
"0x1006": "PtypMultipleCurrency",
"0x1007": "PtypMultipleFloatingTime",
"0x1014": "PtypMultipleInteger64",
"0x101F": "PtypMultipleString",
"0x101E": "PtypMultipleString8",
"0x1040": "PtypMultipleTime",
"0x1048": "PtypMultipleGuid",
"0x1102": "PtypMultipleBinary"
}
| StarcoderdataPython |
1711192 | # pyright: reportUndefinedVariable=false
from sly import Lexer
from ..exceptions import SyntaxError
class ExpressionLexer(Lexer):
# Set of token names.
tokens = {
VALUE,
INT,
FLOAT,
BETWEEN,
AND,
OR,
NOT,
IN,
EQ,
NE,
GT,
GTE,
LT,
LTE,
ATTRIBUTE_EXISTS,
ATTRIBUTE_NOT_EXISTS,
ATTRIBUTE_TYPE,
BEGINS_WITH,
CONTAINS,
SIZE,
FROM_JSON,
KEYS,
OLD_IMAGE,
NEW_IMAGE,
NAME,
CHANGED,
MATCH,
IS_TYPE,
FALSE,
TRUE,
}
# Set of literal characters
literals = {"(", ")", "[", "]", ",", "."}
# String containing ignored characters
ignore = " \t"
# Regular expression rules for tokens
OLD_IMAGE = r"\$OLD"
NEW_IMAGE = r"\$NEW"
AND = r"\&{1}"
OR = r"\|{1}"
NOT = "NOT"
IN = "IN"
BETWEEN = "BETWEEN"
CHANGED = "has_changed"
IS_TYPE = "is_type"
ATTRIBUTE_EXISTS = "attribute_exists"
ATTRIBUTE_NOT_EXISTS = "attribute_not_exists"
ATTRIBUTE_TYPE = "attribute_type"
BEGINS_WITH = "begins_with"
CONTAINS = "contains"
FROM_JSON = "from_json"
SIZE = "size"
TRUE = "True"
FALSE = ("False",)
"""
NAME has to come AFTER any keywords above. NAME is used as a path within OLD_IMAGE/NEW_IMAGE
and also Dynamodb types such as S, L, SS, NS, BOOL, etc...
"""
NAME = r"[a-zA-Z_][a-zA-Z0-9\-_]*"
NE = "!="
GTE = ">="
LTE = "<="
EQ = "=="
GT = ">"
LT = "<"
INT = "\d+"
MATCH = "=~"
FLOAT = "\d+\.\d+"
VALUE = r""""([^"\\]*(\\.[^"\\]*)*)"|\'([^\'\\]*(\\.[^\'\\]*)*)\'"""
# Line number tracking
@_(r"\n+")
def ignore_newline(self, t):
self.lineno += t.value.count("\n")
def error(self, t):
if t.value[0] == "$":
raise SyntaxError(
f"Invalid base path {t.value.split(' ')[0].split('.')[0]}"
)
else:
raise SyntaxError(
f"Bad character '{t.value[0]}' at line {self.lineno} character {self.index}"
)
| StarcoderdataPython |
1652440 | # Copyright 2015, 2016 OpenMarket Ltd
# Copyright 2017 New Vector Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import re
from typing import Any, Dict, List, Mapping, Optional, Pattern, Set, Tuple, Union
from matrix_common.regex import glob_to_regex, to_word_pattern
from synapse.events import EventBase
from synapse.types import UserID
from synapse.util.caches.lrucache import LruCache
logger = logging.getLogger(__name__)
GLOB_REGEX = re.compile(r"\\\[(\\\!|)(.*)\\\]")
IS_GLOB = re.compile(r"[\?\*\[\]]")
INEQUALITY_EXPR = re.compile("^([=<>]*)([0-9]*)$")
def _room_member_count(
ev: EventBase, condition: Dict[str, Any], room_member_count: int
) -> bool:
return _test_ineq_condition(condition, room_member_count)
def _sender_notification_permission(
ev: EventBase,
condition: Dict[str, Any],
sender_power_level: int,
power_levels: Dict[str, Union[int, Dict[str, int]]],
) -> bool:
notif_level_key = condition.get("key")
if notif_level_key is None:
return False
notif_levels = power_levels.get("notifications", {})
assert isinstance(notif_levels, dict)
room_notif_level = notif_levels.get(notif_level_key, 50)
return sender_power_level >= room_notif_level
def _test_ineq_condition(condition: Dict[str, Any], number: int) -> bool:
if "is" not in condition:
return False
m = INEQUALITY_EXPR.match(condition["is"])
if not m:
return False
ineq = m.group(1)
rhs = m.group(2)
if not rhs.isdigit():
return False
rhs_int = int(rhs)
if ineq == "" or ineq == "==":
return number == rhs_int
elif ineq == "<":
return number < rhs_int
elif ineq == ">":
return number > rhs_int
elif ineq == ">=":
return number >= rhs_int
elif ineq == "<=":
return number <= rhs_int
else:
return False
def tweaks_for_actions(actions: List[Union[str, Dict]]) -> Dict[str, Any]:
"""
Converts a list of actions into a `tweaks` dict (which can then be passed to
the push gateway).
This function ignores all actions other than `set_tweak` actions, and treats
absent `value`s as `True`, which agrees with the only spec-defined treatment
of absent `value`s (namely, for `highlight` tweaks).
Args:
actions: list of actions
e.g. [
{"set_tweak": "a", "value": "AAA"},
{"set_tweak": "b", "value": "BBB"},
{"set_tweak": "highlight"},
"notify"
]
Returns:
dictionary of tweaks for those actions
e.g. {"a": "AAA", "b": "BBB", "highlight": True}
"""
tweaks = {}
for a in actions:
if not isinstance(a, dict):
continue
if "set_tweak" in a:
# value is allowed to be absent in which case the value assumed
# should be True.
tweaks[a["set_tweak"]] = a.get("value", True)
return tweaks
class PushRuleEvaluatorForEvent:
def __init__(
self,
event: EventBase,
room_member_count: int,
sender_power_level: int,
power_levels: Dict[str, Union[int, Dict[str, int]]],
relations: Dict[str, Set[Tuple[str, str]]],
relations_match_enabled: bool,
):
self._event = event
self._room_member_count = room_member_count
self._sender_power_level = sender_power_level
self._power_levels = power_levels
self._relations = relations
self._relations_match_enabled = relations_match_enabled
# Maps strings of e.g. 'content.body' -> event["content"]["body"]
self._value_cache = _flatten_dict(event)
# Maps cache keys to final values.
self._condition_cache: Dict[str, bool] = {}
def check_conditions(
self, conditions: List[dict], uid: str, display_name: Optional[str]
) -> bool:
"""
Returns true if a user's conditions/user ID/display name match the event.
Args:
conditions: The user's conditions to match.
uid: The user's MXID.
display_name: The display name.
Returns:
True if all conditions match the event, False otherwise.
"""
for cond in conditions:
_cache_key = cond.get("_cache_key", None)
if _cache_key:
res = self._condition_cache.get(_cache_key, None)
if res is False:
return False
elif res is True:
continue
res = self.matches(cond, uid, display_name)
if _cache_key:
self._condition_cache[_cache_key] = bool(res)
if not res:
return False
return True
def matches(
self, condition: Dict[str, Any], user_id: str, display_name: Optional[str]
) -> bool:
"""
Returns true if a user's condition/user ID/display name match the event.
Args:
condition: The user's condition to match.
uid: The user's MXID.
display_name: The display name, or None if there is not one.
Returns:
True if the condition matches the event, False otherwise.
"""
if condition["kind"] == "event_match":
return self._event_match(condition, user_id)
elif condition["kind"] == "contains_display_name":
return self._contains_display_name(display_name)
elif condition["kind"] == "room_member_count":
return _room_member_count(self._event, condition, self._room_member_count)
elif condition["kind"] == "sender_notification_permission":
return _sender_notification_permission(
self._event, condition, self._sender_power_level, self._power_levels
)
elif (
condition["kind"] == "org.matrix.msc3772.relation_match"
and self._relations_match_enabled
):
return self._relation_match(condition, user_id)
else:
# XXX This looks incorrect -- we have reached an unknown condition
# kind and are unconditionally returning that it matches. Note
# that it seems possible to provide a condition to the /pushrules
# endpoint with an unknown kind, see _rule_tuple_from_request_object.
return True
def _event_match(self, condition: dict, user_id: str) -> bool:
"""
Check an "event_match" push rule condition.
Args:
condition: The "event_match" push rule condition to match.
user_id: The user's MXID.
Returns:
True if the condition matches the event, False otherwise.
"""
pattern = condition.get("pattern", None)
if not pattern:
pattern_type = condition.get("pattern_type", None)
if pattern_type == "user_id":
pattern = user_id
elif pattern_type == "user_localpart":
pattern = UserID.from_string(user_id).localpart
if not pattern:
logger.warning("event_match condition with no pattern")
return False
# XXX: optimisation: cache our pattern regexps
if condition["key"] == "content.body":
body = self._event.content.get("body", None)
if not body or not isinstance(body, str):
return False
return _glob_matches(pattern, body, word_boundary=True)
else:
haystack = self._value_cache.get(condition["key"], None)
if haystack is None:
return False
return _glob_matches(pattern, haystack)
def _contains_display_name(self, display_name: Optional[str]) -> bool:
"""
Check an "event_match" push rule condition.
Args:
display_name: The display name, or None if there is not one.
Returns:
True if the display name is found in the event body, False otherwise.
"""
if not display_name:
return False
body = self._event.content.get("body", None)
if not body or not isinstance(body, str):
return False
# Similar to _glob_matches, but do not treat display_name as a glob.
r = regex_cache.get((display_name, False, True), None)
if not r:
r1 = re.escape(display_name)
r1 = to_word_pattern(r1)
r = re.compile(r1, flags=re.IGNORECASE)
regex_cache[(display_name, False, True)] = r
return bool(r.search(body))
def _relation_match(self, condition: dict, user_id: str) -> bool:
"""
Check an "relation_match" push rule condition.
Args:
condition: The "event_match" push rule condition to match.
user_id: The user's MXID.
Returns:
True if the condition matches the event, False otherwise.
"""
rel_type = condition.get("rel_type")
if not rel_type:
logger.warning("relation_match condition missing rel_type")
return False
sender_pattern = condition.get("sender")
if sender_pattern is None:
sender_type = condition.get("sender_type")
if sender_type == "user_id":
sender_pattern = user_id
type_pattern = condition.get("type")
# If any other relations matches, return True.
for sender, event_type in self._relations.get(rel_type, ()):
if sender_pattern and not _glob_matches(sender_pattern, sender):
continue
if type_pattern and not _glob_matches(type_pattern, event_type):
continue
# All values must have matched.
return True
# No relations matched.
return False
# Caches (string, is_glob, word_boundary) -> regex for push. See _glob_matches
regex_cache: LruCache[Tuple[str, bool, bool], Pattern] = LruCache(
50000, "regex_push_cache"
)
def _glob_matches(glob: str, value: str, word_boundary: bool = False) -> bool:
"""Tests if value matches glob.
Args:
glob
value: String to test against glob.
word_boundary: Whether to match against word boundaries or entire
string. Defaults to False.
"""
try:
r = regex_cache.get((glob, True, word_boundary), None)
if not r:
r = glob_to_regex(glob, word_boundary=word_boundary)
regex_cache[(glob, True, word_boundary)] = r
return bool(r.search(value))
except re.error:
logger.warning("Failed to parse glob to regex: %r", glob)
return False
def _flatten_dict(
d: Union[EventBase, Mapping[str, Any]],
prefix: Optional[List[str]] = None,
result: Optional[Dict[str, str]] = None,
) -> Dict[str, str]:
if prefix is None:
prefix = []
if result is None:
result = {}
for key, value in d.items():
if isinstance(value, str):
result[".".join(prefix + [key])] = value.lower()
elif isinstance(value, Mapping):
_flatten_dict(value, prefix=(prefix + [key]), result=result)
return result
| StarcoderdataPython |
1707192 | import sys
import os
UTILS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'utils')
sys.path.insert(1, UTILS_DIR)
from training import train, test
if __name__ == '__main__':
"""
This assumes a pretrained actor at the actor-path.
"""
BREAK_EARLY = False
BATCH_SIZE = 500
for data_subdir in ['ml-20m', 'netflix-prize', 'msd']:
actor_path = "WMFVAE_ACTOR_TRAIN_{}".format(data_subdir)
train(
model_class='wmf_vae',
data_subdir=data_subdir,
n_epochs_pred_only=100,
n_epochs_ac_only=0,
n_epochs_pred_and_ac=0,
max_kl=0.05,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.0001,
evaluation_metric="NDCG",
logging_frequency=50,
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
positive_weights=5.0,
version_tag="FULL_RUN_ON_OTHER_DATASETS",
path_to_save_actor=actor_path,
log_critic_training_error=False,
)
print("Now, hopefully on to testing...")
test(
model_class='wmf_vae',
data_subdir=data_subdir,
n_epochs_pred_only=100,
n_epochs_ac_only=0,
n_epochs_pred_and_ac=0,
max_kl=0.05,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.0001,
evaluation_metric="NDCG",
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
positive_weights=5.0,
version_tag="FULL_RUN_ON_OTHER_DATASETS",
)
print("On to round 2! Now we'll do the critic.")
train(
model_class='wmf_vae',
data_subdir=data_subdir,
n_epochs_pred_only=0,
n_epochs_ac_only=50,
n_epochs_pred_and_ac=50,
max_kl=0.05,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.0001,
evaluation_metric="NDCG",
logging_frequency=50,
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
positive_weights=5.0,
version_tag="FULL_RUN_ON_OTHER_DATASETS",
restore_trained_actor_path=actor_path,
)
print("Now, hopefully on to testing...")
test(
model_class='wmf_vae',
data_subdir=data_subdir,
n_epochs_pred_only=0,
n_epochs_ac_only=50,
n_epochs_pred_and_ac=50,
max_kl=0.05,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.0001,
evaluation_metric="NDCG",
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
positive_weights=5.0,
version_tag="FULL_RUN_ON_OTHER_DATASETS",
restore_trained_actor_path=actor_path,
)
print("Bye bye")
exit()
# train(
# # model_class="wmf",
# # model_class='multi_dae',
# model_class="wmf_vae",
# # model_class='warp_encoder',
# n_epochs_pred_only=200,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.05,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# # actor_reg_loss_scaler=0.01,
# positive_weights=5.0,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="FULL_WMFVAE_RUN_JUST_ACTOR",
# path_to_save_actor="200_EPOCHS_WMFVAE_AT_0.05_KL_JUST_ACTOR",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# train(
# # model_class="wmf",
# # model_class='multi_dae',
# model_class="wmf_vae",
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.05,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# # actor_reg_loss_scaler=0.01,
# positive_weights=5.0,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="FULL_WMFVAE_RUN_WITH_CRITIC",
# # path_to_save_actor="200_EPOCHS_WMFVAE_AT_0.05_KL",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# restore_trained_actor_path="200_EPOCHS_WMFVAE_AT_0.05_KL_JUST_ACTOR",
# )
# print("Now that we've done the thing we really care about, let's have some fun with hyperparameters")
# for max_kl in [0.4, 0.2, 0.1, 0.01]:
# train(
# # model_class="wmf",
# # model_class='multi_dae',
# model_class="wmf_vae",
# # model_class='warp_encoder',
# n_epochs_pred_only=100,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=max_kl,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# actor_reg_loss_scaler=0.01,
# positive_weights=5.0,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TESTING_HYPERPARAMETERS",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("On to the next one...")
# exit()
# print("Now, hopefully on to testing...")
# test(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=10,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# # max_kl=0.2,
# # ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.01,
# positive_weights=5.0,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("On to round 2! Now we'll do the critic.")
# train(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Now, hopefully on to testing...")
# test(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Bye bye")
# exit()
| StarcoderdataPython |
1714463 | #!/usr/bin/python
'''
Given a running machine with ssh, apply a configuration
'''
import logging
import os
import sys
from bellatrix.lib import bellatrix_util
from bellatrix.lib import util
import bellatrix.bewitch_ami
class Provision(bellatrix.bewitch_ami.Bewitch):
def __init__(self, key, sec, app_name, pk, reports):
#we cheat on the pk so Bewitch doesn't complain
bellatrix.bewitch_ami.Bewitch.__init__(self, key, sec, app_name, pk, reports)
def _processConfig(self, config, commands, user, key_name, hostname):
"""execute a configuration, internal method of run"""
errors = []
r, e = self.executeCommands(user, hostname, key_name, commands, config)
self.saveReport(r, config)
errors += e
if len(e) > 0:
logging.warning("There were errors while executing the commands. Not burning the instance...")
return errors
def getVal(self, cfg, module_name, key, local_value):
logging.debug("getting value from module:%s key:%s local value:%s module data:%s" \
% (module_name, key, local_value, dir(cfg)))
if local_value != None:
return local_value
else:
key = "cfg." + module_name + "." + key if not hasattr(cfg,key) else "cfg." + key
try:
return eval(key)
except:
logging.error("Error getting value from module:%s key:%s local value:%s module data:%s" \
% (module_name, key, local_value, dir(cfg)))
raise
def provision(self, configuration, user, hostname, pk):
"""execute a configuration"""
configs = os.path.splitext(configuration)[0]
cfg = configs
sys.path = [util.getCurDir()] + sys.path
logging.info("processing: " + cfg + " in: " + os.getcwd())
module_name = os.path.basename(cfg)
c = util.importModule(cfg)
commands = self.getVal(c, module_name, self.CMDS, None)
user = self.getVal(c, module_name, self.USER, user)
key_name = self.getVal(c, module_name, self.KEY_NAME, pk)
util.waitForSSHReady(user, key_name, hostname)
errors = self._processConfig(str(configuration), commands, user, key_name, hostname)
self.printErrors(errors)
return 0 if len(errors)==0 else 1
def run(configuration, user, hostname, pk):
r = Provision('', '', bellatrix.APP, __file__, bellatrix_util.getReportsDir())
exit_code = r.provision(configuration, user, hostname, pk)
return exit_code
if __name__ == '__main__':
sys.exit(run(*sys.argv[1:]))
| StarcoderdataPython |
3201839 | from ._NumExpr import NumExpr
from ._Basic import AlwaysTrue, AlwaysFalse, And, Not, Or
| StarcoderdataPython |
123120 | def change(img2_head_mask, img2, cv2, convexhull2, result):
(x, y, w, h) = cv2.boundingRect(convexhull2)
center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2))
#can change it to Mix_clone
seamlessclone = cv2.seamlessClone(result, img2, img2_head_mask, center_face2, cv2.NORMAL_CLONE)
return seamlessclone | StarcoderdataPython |
167709 | <reponame>csc-training/geocomputing<filename>pouta/arcpy/test_data/my_arcpy_script.py
import arcpy
from arcpy.sa import *
import os
arcpy.env.overwriteOutput = True
directory = "./output/"
if not os.path.exists(directory):
os.makedirs(directory)
outFlowDirection = FlowDirection("./dem.tif", "NORMAL")
outFlowDirection.save(directory+"flowdir.tif")
| StarcoderdataPython |
1744661 | <reponame>CEHENKLE/opensearch-py<filename>test_opensearchpy/test_server/conftest.py
# SPDX-License-Identifier: Apache-2.0
#
# The OpenSearch Contributors require contributions made to
# this file be licensed under the Apache-2.0 license or a
# compatible open source license.
#
# Modifications Copyright OpenSearch Contributors. See
# GitHub history for details.
#
# Licensed to Elasticsearch B.V. under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch B.V. licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import time
import pytest
import opensearchpy
from opensearchpy.helpers.test import CA_CERTS, OPENSEARCH_URL
from ..utils import wipe_cluster
# Information about the OpenSearch instance running, if any
# Used for
OPENSEARCH_VERSION = ""
OPENSEARCH_BUILD_HASH = ""
OPENSEARCH_REST_API_TESTS = []
@pytest.fixture(scope="session")
def sync_client_factory():
client = None
try:
# Configure the client with certificates and optionally
# an HTTP conn class depending on 'PYTHON_CONNECTION_CLASS' envvar
kw = {
"timeout": 3,
"ca_certs": CA_CERTS,
"headers": {"Authorization": "Basic ZWxhc3RpYzpjaGFuZ2VtZQ=="},
}
if "PYTHON_CONNECTION_CLASS" in os.environ:
from opensearchpy import connection
kw["connection_class"] = getattr(
connection, os.environ["PYTHON_CONNECTION_CLASS"]
)
# We do this little dance with the URL to force
# Requests to respect 'headers: None' within rest API spec tests.
client = opensearchpy.OpenSearch(
OPENSEARCH_URL.replace("elastic:changeme@", ""), **kw
)
# Wait for the cluster to report a status of 'yellow'
for _ in range(100):
try:
client.cluster.health(wait_for_status="yellow")
break
except ConnectionError:
time.sleep(0.1)
else:
pytest.skip("OpenSearch wasn't running at %r" % (OPENSEARCH_URL,))
wipe_cluster(client)
yield client
finally:
if client:
client.close()
@pytest.fixture(scope="function")
def sync_client(sync_client_factory):
try:
yield sync_client_factory
finally:
wipe_cluster(sync_client_factory)
| StarcoderdataPython |
3385895 | def to_hex(val):
'''Return val as str of hex values concatenated by colons.'''
if type(val) is int:
return hex(val)
try:
# Python-2.x
if type(val) is long:
return hex(val)
except NameError:
pass
# else:
try:
# Python-2.x
return ":".join("{0:02x}".format(ord(char)) for char in val)
except TypeError:
# Python-3.x
return ":".join("{0:02x}".format(char) for char in val)
# http://stackoverflow.com/a/16891418
def string_without_prefix(prefix ,string):
'''Return string without prefix. If string does not start with prefix,
return string.
'''
if string.startswith(prefix):
return string[len(prefix):]
return string
def string_with_prefix(prefix, string):
'''Return string with prefix prepended. If string already starts with
prefix, return string.
'''
return str(prefix) + string_without_prefix(str(prefix), str(string))
| StarcoderdataPython |
3292196 | import sys
from typing import NoReturn, Optional
import click
def default(message: str, exit_with_code: Optional[int] = None) -> None:
click.echo(message)
if exit_with_code:
sys.exit(exit_with_code)
def success(message: str) -> None:
click.secho(message, fg="green")
def error(message: str, exit_with_code: Optional[int] = None) -> None:
click.secho(message, fg="red", bold=True)
if exit_with_code is not None:
sys.exit(exit_with_code)
def fatal(message: str) -> NoReturn: # type: ignore
error(message, exit_with_code=1)
| StarcoderdataPython |
1674872 | <gh_stars>1-10
from __future__ import print_function
from math import pi
from bokeh.client import push_session
from bokeh.document import Document
from bokeh.models.glyphs import Line, HBar
from bokeh.models import (Plot, ColumnDataSource, DataRange1d, FactorRange,
LinearAxis, CategoricalAxis, Grid, Legend)
from bokeh.sampledata.population import load_population
from bokeh.models.widgets import Select
from bokeh.models.layouts import WidgetBox, Column
document = Document()
session = push_session(document)
df = load_population()
revision = 2012
year, location = 2010, "World"
years = [str(x) for x in sorted(df.Year.unique())]
locations = sorted(df.Location.unique())
groups = [str(x) for x in df.AgeGrp.unique()]
groups.remove('80+') # remove oddball group
source_pyramid_m = ColumnDataSource(data=dict(value=[], group=[]))
source_pyramid_f = ColumnDataSource(data=dict(value=[], group=[]))
def pyramid():
xdr = DataRange1d()
ydr = FactorRange(factors=groups)
plot = Plot(x_range=xdr, y_range=ydr, plot_width=600, plot_height=500, toolbar_location=None)
xaxis = LinearAxis()
plot.add_layout(xaxis, 'below')
plot.add_layout(CategoricalAxis(), 'left')
plot.add_layout(Grid(dimension=0, ticker=xaxis.ticker))
m = HBar(left="value", right=0, y="group", height=1, fill_color="#3B8686")
mglyph = plot.add_glyph(source_pyramid_m, m)
f = HBar(left=0, right="value", y="group", height=1, fill_color="#CFF09E")
fglyph = plot.add_glyph(source_pyramid_f, f)
plot.add_layout(Legend(items=[("Male" , [mglyph]), ("Female" , [fglyph])]))
return plot
source_known = ColumnDataSource(data=dict(x=[], y=[]))
source_predicted = ColumnDataSource(data=dict(x=[], y=[]))
def population():
xdr = FactorRange(factors=years)
ydr = DataRange1d()
plot = Plot(x_range=xdr, y_range=ydr, plot_width=600, plot_height=150, toolbar_location=None)
plot.add_layout(CategoricalAxis(major_label_orientation=pi / 4), 'below')
known = Line(x="x", y="y", line_color="violet", line_width=2)
known_glyph = plot.add_glyph(source_known, known)
predicted = Line(x="x", y="y", line_color="violet", line_width=2, line_dash="dashed")
predicted_glyph = plot.add_glyph(source_predicted, predicted)
legend = Legend(location="bottom_right",
items=[("known", [known_glyph]), ("predicted", [predicted_glyph])])
plot.add_layout(legend)
return plot
def update_pyramid():
pyramid = df[(df.Location == location) & (df.Year == year)]
male = pyramid[pyramid.Sex == "Male"]
female = pyramid[pyramid.Sex == "Female"]
total = df.Value.sum()
male_percent = -male.Value / total
female_percent = female.Value / total
source_pyramid_m.data = dict(
group=[str(x) for x in male.AgeGrp.unique()],
value=male_percent,
)
source_pyramid_f.data = dict(
group=[str(x) for x in female.AgeGrp.unique()],
value=female_percent,
)
def update_population():
population = df[df.Location == location].groupby(df.Year).Value.sum()
aligned_revision = revision // 10 * 10
known = population[population.index <= aligned_revision]
predicted = population[population.index >= aligned_revision]
source_known.data = dict(x=known.index.map(str), y=known.values)
source_predicted.data = dict(x=predicted.index.map(str), y=predicted.values)
def update_data():
update_population()
update_pyramid()
def on_year_change(attr, old, new):
global year
year = int(new)
update_data()
def on_location_change(attr, old, new):
global location
location = new
update_data()
def create_layout():
year_select = Select(title="Year:", value="2010", options=years)
location_select = Select(title="Location:", value="World", options=locations)
year_select.on_change('value', on_year_change)
location_select.on_change('value', on_location_change)
controls = WidgetBox(children=[year_select, location_select], height=150, width=600)
layout = Column(children=[controls, pyramid(), population()])
return layout
layout = create_layout()
update_data()
document.add_root(layout)
session.show(layout)
if __name__ == "__main__":
document.validate()
print("\npress ctrl-C to exit")
session.loop_until_closed()
| StarcoderdataPython |
3309226 | """
Installs the vanilla distribution of kubeflow and validates FSx for Lustre integration by:
- Installing the FSx CSI Driver from upstream
- Creating the required IAM Policy, Role and Service Account
- Creating the FSx for Lustre Volume
- Creating a StorageClass, PersistentVolume and PersistentVolumeClaim using Static Provisioning
"""
import pytest
import subprocess
from e2e.utils.constants import DEFAULT_USER_NAMESPACE
from e2e.utils.config import metadata
from e2e.conftest import region
from e2e.fixtures.cluster import cluster
from e2e.fixtures.clients import account_id
from e2e.fixtures.kustomize import kustomize, configure_manifests
from e2e.fixtures.storage_fsx_dependencies import (
install_fsx_csi_driver,
create_fsx_driver_sa,
create_fsx_volume,
static_provisioning,
)
GENERIC_KUSTOMIZE_MANIFEST_PATH = "../../../../example"
@pytest.fixture(scope="class")
def kustomize_path():
return GENERIC_KUSTOMIZE_MANIFEST_PATH
class TestFSx:
@pytest.fixture(scope="class")
def setup(self, metadata, static_provisioning):
metadata_file = metadata.to_file()
print(metadata.params) # These needed to be logged
print("Created metadata file for TestSanity", metadata_file)
def test_pvc_with_volume(
self,
metadata,
account_id,
setup,
create_fsx_volume,
static_provisioning,
):
driver_list = subprocess.check_output("kubectl get csidriver".split()).decode()
assert "fsx.csi.aws.com" in driver_list
pod_list = subprocess.check_output("kubectl get pods -A".split()).decode()
assert "fsx-csi-controller" in pod_list
sa_account = subprocess.check_output(
"kubectl describe -n kube-system serviceaccount fsx-csi-controller-sa".split()
).decode()
assert f"arn:aws:iam::{account_id}:role" in sa_account
fs_id = create_fsx_volume["file_system_id"]
assert "fs-" in fs_id
claim_name = static_provisioning["claim_name"]
claim_list = subprocess.check_output("kubectl get pvc -A".split()).decode()
assert claim_name in claim_list
| StarcoderdataPython |
72183 | <reponame>nthparty/rbcl
"""Allow users to use classes directly."""
from rbcl.rbcl import\
crypto_scalarmult_ristretto255,\
crypto_scalarmult_ristretto255_BYTES,\
crypto_scalarmult_ristretto255_SCALARBYTES,\
crypto_scalarmult_ristretto255_base,\
crypto_core_ristretto255_BYTES,\
crypto_core_ristretto255_HASHBYTES,\
crypto_core_ristretto255_NONREDUCEDSCALARBYTES,\
crypto_core_ristretto255_SCALARBYTES,\
crypto_core_ristretto255_add,\
crypto_core_ristretto255_from_hash,\
crypto_core_ristretto255_is_valid_point,\
crypto_core_ristretto255_random,\
crypto_core_ristretto255_scalar_add,\
crypto_core_ristretto255_scalar_complement,\
crypto_core_ristretto255_scalar_invert,\
crypto_core_ristretto255_scalar_mul,\
crypto_core_ristretto255_scalar_negate,\
crypto_core_ristretto255_scalar_random,\
crypto_core_ristretto255_scalar_reduce,\
crypto_core_ristretto255_scalar_sub,\
crypto_core_ristretto255_sub,\
randombytes,\
randombytes_buf_deterministic
| StarcoderdataPython |
182693 | """This package contains infrastructure for solving differential equations within ``simframe``. The ``Integrator`` class
is the basic class that advances the simulation from snapshot to snapshot by executing one integration ``Instruction`` at
a time. Instructions contain a list of integration ``Scheme``. The ``schemes`` package contains pre-defined integration
schemes that are ready to use in ``simframe``."""
from simframe.integration.instruction import Instruction
from simframe.integration.integrator import Integrator
from simframe.integration.scheme import Scheme
import simframe.integration.schemes as schemes
__all__ = ["Instruction",
"Integrator",
"Scheme",
"schemes"]
| StarcoderdataPython |
3390222 | import sys
sys.setdefaultencoding('utf8') | StarcoderdataPython |
1766777 | """Various kernels"""
import abc
import numpy as np
from scipy import signal
from fsic import util
class Kernel(metaclass=abc.ABCMeta):
"""Abstract class for kernels"""
@abc.abstractmethod
def eval(self, X1, X2):
"""Evalute the kernel on data X1 and X2 """
class KHoPoly(Kernel):
"""Homogeneous polynomial kernel of the form (x.dot(y))**d"""
def __init__(self, degree):
if degree <= 0:
raise ValueError("degree must be positive; found {}".format(degree))
self.degree = degree
def eval(self, X1, X2):
return X1.dot(X2.T) ** self.degree
def __repr__(self):
return "KHoPoly(degree={})".format(self.degree)
class KLinear(Kernel):
def eval(self, X1, X2):
return X1.dot(X2.T)
def __repr__(self):
return "KLinear()"
class KGauss(Kernel):
def __init__(self, sigma2):
if sigma2 <= 0:
raise ValueError("sigma2 must be positive; found {}".format(sigma2))
self.sigma2 = sigma2
def eval(self, X1, X2):
"""
Evaluate the Gaussian kernel on the two 2d numpy arrays.
Parameters
----------
X1 : n1 x d numpy array
X2 : n2 x d numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
d1 = X1.shape[1]
d2 = X2.shape[1]
if d1 != d2:
raise ValueError(
"The X1 dimensions (_, {}) do not match the X2 dimensions (_, {})".format(
d1, d2
)
)
D2 = util.dist_matrix2(X1, X2)
np.divide(D2, -self.sigma2, out=D2)
return np.exp(D2)
def __repr__(self):
return "KGauss(sigma2={})".format(self.sigma2)
class KTriangle(Kernel):
"""
A triangular kernel defined on 1D. k(x, y) = B_1((x-y)/width) where B_1 is the
B-spline function of order 1 (i.e., triangular function).
"""
def __init__(self, width):
if width <= 0:
raise ValueError("width must be positive; found {}".format(width))
self.width = width
def eval(self, X1, X2):
"""
Evaluate the triangular kernel on the two 2d numpy arrays.
Parameters
----------
X1 : n1 x 1 numpy array
X2 : n2 x 1 numpy array
Return
------
K : a n1 x n2 Gram matrix.
"""
d1 = X1.shape[1]
if d1 != 1:
raise ValueError("The X1 dimension (_, {}) must be 1".format(d1))
d2 = X2.shape[1]
if d2 != 1:
raise ValueError("The X2 dimension (_, {}) must be 1".format(d2))
diff = X1 - X2.T
np.divide(diff, self.width, out=diff)
return signal.bspline(diff, 1)
def __repr__(self):
return "KTriangle(width={})".format(self.width)
| StarcoderdataPython |
1671104 | # Generated by Django 2.2.19 on 2021-05-27 20:46
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('tax_calendar', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='regulator',
name='slug',
field=models.SlugField(default='a'),
),
]
| StarcoderdataPython |
4827002 | <gh_stars>10-100
"""
Random generation of approval profiles
"""
import random
from math import fabs, sqrt
from abcvoting.preferences import Profile
def random_profile(num_voters, num_cand, prob_distribution, committeesize=None, **kwargs):
if prob_distribution == "IC":
return random_IC_profile(num_cand, num_voters, **kwargs)
elif prob_distribution.startswith("Mallows"):
dispersion = float(prob_distribution[7:])
return random_mallows_profile(num_cand, num_voters, dispersion=dispersion, **kwargs)
else:
raise ValueError(f"Probability model {prob_distribution} unknown.")
def random_urn_profile(num_cand, num_voters, setsize, replace):
"""Generate Polya Urn profile with fixed size approval sets."""
currsize = 1.0
approval_sets = []
replacedsets = {}
for _ in range(num_voters):
r = random.random() * currsize
if r < 1.0:
# base case: sample uniformly at random
randset = random.sample(range(num_cand), setsize)
approval_sets.append(randset)
key = tuple(set(randset))
if key in replacedsets:
replacedsets[key] += 1
else:
replacedsets[key] = 1
currsize += replace
else:
# sample from one of the replaced ballots
r = random.randint(0, sum(replacedsets.values()))
for approval_set in replacedsets:
count = replacedsets[approval_set]
if r <= count:
approval_sets.append(list(approval_set))
break
else:
r -= count
profile = Profile(num_cand)
profile.add_voters(approval_sets)
return profile
def random_urn_party_list_profile(num_cand, num_voters, num_parties, replace, uniform=False):
"""Generate Polya Urn profile from a number of parties.
If uniform each party gets the same amount of candidates."""
currsize = 1.0
approval_sets = []
replacedsets = {}
parties = list(range(num_parties))
party_cands = __distribute_candidates_to_parties(num_cand, parties, uniform=uniform)
for _ in range(num_voters):
r = random.random() * currsize
if r < 1.0:
# base case: sample uniformly at random
party = random.choice(parties)
randpartyset = list(party_cands[party])
approval_sets.append(randpartyset)
if party in replacedsets:
replacedsets[party] += 1
else:
replacedsets[party] = 1
currsize += replace
else:
# sample from one of the parties
r = random.randint(0, sum(replacedsets.values()))
for party in replacedsets:
count = replacedsets[party]
if r <= count:
approval_sets.append(list(party_cands[party]))
break
else:
r -= count
profile = Profile(num_cand)
profile.add_voters(approval_sets)
return profile
def random_IC_profile(num_cand, num_voters, setsize):
"""Generates profile with random assignment of candidates to
the fix size of setsize."""
approval_sets = []
for _ in range(num_voters):
randset = random.sample(range(num_cand), setsize)
approval_sets.append(randset)
profile = Profile(num_cand)
profile.add_voters(approval_sets)
return profile
def random_IC_party_list_profile(num_cand, num_voters, num_parties, uniform=False):
"""Generates profile with random assignment of parties.
A party is a list of candidates.
If uniform the number of candidates per party is the same,
else at least 1."""
parties = list(range(num_parties))
party_cands = __distribute_candidates_to_parties(num_cand, parties, uniform=uniform)
approval_sets = []
for _ in range(num_voters):
approval_sets.append(party_cands[random.choice(parties)])
profile = Profile(num_cand)
profile.add_voters(approval_sets)
return profile
def random_2d_points_profile(
num_cand, num_voters, candpointmode, voterpointmode, sigma, approval_threshold
):
"""Generates profiles from randomly generated 2d points according
to some distributions with the given sigma."""
voters = list(range(num_voters))
cands = list(range(num_cand))
voter_points = __generate_2d_points(voters, voterpointmode, sigma)
cand_points = __generate_2d_points(cands, candpointmode, sigma)
approval_sets = __get_profile_from_points(
voters, cands, voter_points, cand_points, approval_threshold
)
profile = Profile(num_cand)
profile.add_voters(approval_sets)
return profile
def random_2d_points_party_list_profile(
num_cand, num_voters, num_parties, partypointmode, voterpointmode, sigma, uniform=False
):
"""Generates profiles from randomly generated 2d points according
to some distributions with the given sigma.
This selects parties for each voter, the parties are either
uniform (equal size) or randomly generated (at least 1) candidate
lists."""
parties = list(range(num_parties))
party_cands = __distribute_candidates_to_parties(num_cand, parties, uniform=uniform)
voters = list(range(num_voters))
voter_points = __generate_2d_points(voters, voterpointmode, sigma)
party_points = __generate_2d_points(parties, partypointmode, sigma)
party_sets = __get_profile_from_points(voters, parties, voter_points, party_points, 1.0)
approval_sets = []
for p in party_sets:
approval_sets.append(party_cands[p[0]])
profile = Profile(num_cand)
profile.add_voters(approval_sets)
return profile
def random_mallows_profile(num_cand, num_voters, setsize, dispersion):
"""Generates a Mallows Profile after the definition for
repeated insertion mode (RIM) in
https://icml.cc/2011/papers/135_icmlpaper.pdf"""
if not (0 < dispersion <= 1):
raise Exception("Invalid dispersion, needs to be in (0, 1].")
reference_ranking = list(range(num_cand))
random.shuffle(reference_ranking)
insert_dist = __compute_mallows_insert_distributions(num_cand, dispersion)
approval_sets = []
for _ in range(num_voters):
vote = []
for i, distribution in enumerate(insert_dist):
pos = __select_pos(distribution)
vote.insert(pos, reference_ranking[i])
approval_sets.append(vote[:setsize])
profile = Profile(num_cand)
profile.add_voters(approval_sets)
return profile
def __compute_mallows_insert_distributions(num_cand, dispersion):
"""Computes the insertion probability vectors for
the dispersion and a given number of candidates"""
distributions = []
denominator = 0
for i in range(num_cand):
# compute the denominator = dispersion^0 + dispersion^1
# + ... dispersion^(i-1)
denominator += pow(dispersion, i)
dist = []
for j in range(i + 1): # 0..i
dist.append(pow(dispersion, i - j) / denominator)
distributions.append(dist)
return distributions
def __select_pos(distribution):
"""Returns a randomly selected value with the help of the
distribution"""
if round(sum(distribution), 10) != 1.0:
raise Exception("Invalid Distribution", distribution, "sum:", sum(distribution))
r = round(random.random(), 10) # or random.uniform(0, 1)
pos = -1
s = 0
for prob in distribution:
pos += 1
s += prob
if s >= r:
return pos
return pos # in case of rounding errors
def __distribute_candidates_to_parties(num_cand, parties, uniform):
"""Distributes the candidates to the parties.
Either uniformly distributed or randomly distributed with
at least one candidate per party."""
if num_cand < len(parties):
raise ValueError("Not enough candidates to split them between" + "the parties.")
if uniform:
if num_cand % len(parties) != 0:
raise ValueError(
"To uniformly distribute candidates "
+ "between parties the number of candidates"
+ " needs to be divisible by the number of"
+ " parties."
)
party_cands = {}
party_size = int(num_cand / len(parties))
cands = set(range(num_cand))
for i, party in enumerate(parties):
# note: there is no guaranty about reproducibility because cands is a set
appr = random.sample(tuple(cands), party_size)
party_cands[party] = appr
cands = cands - set(appr)
return party_cands
else: # not uniform
num_parties = len(parties)
party_cands = {}
num_random_cands = num_cand - num_parties
for i, party in enumerate(parties):
party_cands[party] = [num_random_cands + i]
for cand in range(num_random_cands):
party = random.choice(parties)
party_cands[party].append(cand)
return party_cands
def __generate_2d_points(agents, mode, sigma):
"""Generates a list of 2d coordinates subject to
various distributions."""
points = {}
# normal distribution, 1/3 of agents centered on (-0.5,-0.5),
# 2/3 of agents on (0.5,0.5)
if mode == "twogroups":
for i in range(int(len(agents) // 3)):
points[agents[i]] = (random.gauss(-0.5, sigma), random.gauss(-0.5, sigma))
for i in range(int(len(agents) // 3), len(agents)):
points[agents[i]] = (random.gauss(0.5, sigma), random.gauss(0.5, sigma))
# normal distribution
elif mode == "normal":
for i in range(len(agents)):
points[agents[i]] = (random.gauss(0.0, sigma), random.gauss(0.0, sigma))
elif mode == "uniform_square":
for a in agents:
points[a] = (random.uniform(-1, 1), random.uniform(-1, 1))
else:
raise ValueError("mode", mode, "not known")
return points
def __euclidean(p1, p2):
return sqrt(fabs(p1[0] - p2[0]) ** 2 + fabs(p1[1] - p2[1]) ** 2)
def __get_profile_from_points(voters, cands, voter_points, cand_points, approval_threshold):
"""Generates a list of approval sets from 2d points according to
approval_threshold."""
profile = {}
for v in voters:
distances = {cand: __euclidean(voter_points[v], cand_points[cand]) for cand in cands}
mindist = min(distances.values())
profile[v] = [cand for cand in cands if distances[cand] <= mindist * approval_threshold]
return list(profile.values())
| StarcoderdataPython |
1755359 | <reponame>iashraful/survey-api
"""slug-added
Revision ID: e4367957251b
Revises: <PASSWORD>
Create Date: 2021-09-18 07:59:48.356988
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '<KEY>'
down_revision = '<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('survey_responses', sa.Column('slug', sa.String(), nullable=True))
op.create_unique_constraint(None, 'survey_responses', ['slug'])
op.add_column('surveys', sa.Column('slug', sa.String(), nullable=True))
op.create_unique_constraint(None, 'surveys', ['slug'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'surveys', type_='unique')
op.drop_column('surveys', 'slug')
op.drop_constraint(None, 'survey_responses', type_='unique')
op.drop_column('survey_responses', 'slug')
# ### end Alembic commands ###
| StarcoderdataPython |
95773 | <reponame>megado123/azure-cosmosdb-python-function-app
import json
import os
import uuid
import docdb
import platform
print("Python == ", platform.python_version())
os.environ["DOCDB_HOST"] = 'https://cs489cosmossql.documents.azure.com:443/'
os.environ["DOCDB_KEY"] = '<KEY>
# request body (POST requests) is stored in a file
# and the file path is exposed as an environment variable
with open(os.environ["REQ"]) as req:
details = json.loads(req.read())
# note that there's no init for functions, so this will be
# executed everytime the function is triggered :(
# see https://github.com/Azure/azure-webjobs-sdk-script/issues/586
repository = docdb.Repository(os.environ["DOCDB_HOST"], os.environ["DOCDB_KEY"])
print(details)
print(uuid.uuid4())
# using email as partition key, so ensure that it exists,
# even if it's made up :)
#if not details.has_key("email"):
# details["email"] = <EMAIL>" % uuid.uuid4()
print ("Storing the contact details in Azure Document DB")
doc = repository.insert(details)
print ("Repository returned %s" % doc)
#new stuff
responsedata = {}
responsedata['key'] = details['GID'] + "-" + details['Sample'] + "-" + details['Material']
json_data = json.dumps(responsedata)
#response = json_data
#response.write(json_data)
#response.close()
response = open(os.environ['resp'], 'w')
response.write(json_data)
response.close()
| StarcoderdataPython |
178331 | <gh_stars>0
from enum import Enum
class Side(Enum):
"""
Brain hemisphere side.
"""
LEFT = 'left'
RIGHT = 'right'
| StarcoderdataPython |
3287527 | # ************************************************************
# Copyright (c) 2020, <NAME> - All Rights Reserved.
# You may use, distribute and modify this code under the
# terms of the BSD-3 license. You should have received a copy
# of the BSD-3 license with this file.
# If not, visit: https://opensource.org/licenses/BSD-3-Clause
# ************************************************************
from math import atan2
from networkx import Graph
## Convert and edge to a dart.
# order of vertices is important. To get the dart in the other
# direction swap order of nodes or use alpha function.
def edge2dart(edge: tuple) -> str:
return ",".join(map(str, edge))
## Get edge corresponding to given dart.
def dart2edge(dart: str) -> tuple:
return tuple(map(int, dart.split(",")))
## Get node numbers associated with given boundary cycle.
# No specific order is guaranteed.
def cycle2nodes(cycle: tuple) -> list:
return [int(dart2edge(dart)[0]) for dart in cycle]
## Find boundary cycle associated with given set of nodes.
# WARNING: This function is unsafe to use since the cycle
# associated with a set of nodes may be non-unique. Extra
# checks should be in place. This function will return the
# first cycle that is found with a matching set of nodes.
def nodes2cycle(node_list: list, boundary_cycles: list) -> tuple:
for cycle in boundary_cycles:
if set(node_list) == set(cycle2nodes(cycle)):
return cycle
## This class implements a combinatorial map.
# A 2-dimensional combinatorial map (or 2-map) is a triplet M = (D, σ, α) such that:
#
# D is a finite set of darts;
# σ is a permutation on D;
# α is an involution on D with no fixed point.
#
# Intuitively, a 2-map corresponds to a planar graph where each edge is subdivided into two
# darts (sometimes also called half-edges). The permutation σ gives, for each dart, the next
# dart by turning around the vertex in the positive orientation; the other permutation α gives,
# for each dart, the other dart of the same edge.
#
# α allows one to retrieve edges, and σ allows one to retrieve vertices. We define φ = σ o α
# which gives, for each dart, the next dart of the same face.
class CMap:
## Initialize combinatorial map with Graph and Points list.
# You can optionally initialize with the rotation_data, meaning
# that for each node, you provide the list of edges from that node
# to each connected neighbor in counter-clockwise order. Leave
# points empty if the feature is desired. Otherwise the rotation_data
# will be computed from the point data.
def __init__(self, graph: Graph, points: list = (), rotation_data: list = ()) -> None:
self._sorted_darts = dict()
self._boundary_cycles = []
if rotation_data:
sorted_edges = rotation_data
else:
sorted_edges = get_rotational_data(graph, points)
for node in graph.nodes():
self._sorted_darts[node] = [edge2dart((e2, e1)) for (e1, e2) in sorted_edges[node]]
self.darts = [edge2dart((e1, e2)) for e1, e2 in graph.edges]
self.darts.extend([edge2dart((e2, e1)) for e1, e2 in graph.edges])
self.set_boundary_cycles()
## Get next outgoing dart.
# For a given outgoing dart, return the next outgoing dart in counter-clockwise
# order.
def sigma(self, dart: str) -> str:
neighbor, node = dart2edge(dart)
index = self._sorted_darts[node].index(dart)
n_neigh = len(self._sorted_darts[node])
# Get next dart, wrap-around if out of range
return self._sorted_darts[node][(index + 1) % n_neigh]
## Get other half edge.
# for each dart, return the other dart associated with the same edge.
@staticmethod
def alpha(dart: str) -> str:
return edge2dart(tuple(reversed(dart2edge(dart))))
## Get next outgoing dart.
# For a given incoming dart, return the next outgoing dart in counter-clockwise
# direction.
def phi(self, dart: str) -> str:
return self.sigma(self.alpha(dart))
## compute boundary cycles.
# iterate on phi until all darts have been accounted for.
# This will produce a list of boundary cycles. These cycles
# are stored internally and should only be accessed through
# the public member functions.
def set_boundary_cycles(self) -> None:
self._boundary_cycles = []
all_darts = self.darts.copy()
while all_darts:
cycle = [all_darts.pop()]
next_dart = self.phi(cycle[0])
while next_dart != cycle[0]:
all_darts.remove(next_dart)
cycle.append(next_dart)
next_dart = self.phi(next_dart)
self._boundary_cycles.append(cycle)
## Get boundary cycle nodes.
# The function returns the node numbers of each boundary cycle. The nodes
# are left in cyclic order, used mostly for plotting.
def boundary_cycle_nodes_ordered(self) -> list:
return [tuple([dart2edge(dart)[0] for dart in cycle]) for cycle in self._boundary_cycles]
## Get Boundary Cycles.
# This will access the boundary cycles, and return them with each boundary cycle's darts
# in a sorted order for a unique representation.
def get_boundary_cycles(self) -> list:
return [tuple(sorted(cycle)) for cycle in self._boundary_cycles]
## Get Rotational Data from points.
# This function is used to compute the rotational data from point data if not explicitly given.
def get_rotational_data(graph, points) -> list:
sorted_edges = [[] for _ in range(graph.order())]
for node in graph.nodes():
neighbors = list(graph.neighbors(node))
# zip neighbors with associated coordinates for sorting
neighbor_zip = list(zip(neighbors, [points[n] for n in neighbors]))
anticlockwise, clockwise = False, True
# sort w.r.t angle from x axis
def theta(a, center):
oa = (a[0] - center[0], a[1] - center[1])
return atan2(oa[1], oa[0])
# Sort
sorted_zip = sorted(neighbor_zip,
key=lambda pair: theta(pair[1], points[node]),
reverse=anticlockwise)
# Extract sorted edges
sorted_edges[node] = [(node, n) for (n, _) in sorted_zip]
return sorted_edges
| StarcoderdataPython |
1618374 | class Solution:
def minDistance(self, word1: str, word2: str) -> int:
m = len(word1)
n = len(word2)
mat = [[0] * (n + 1) for _ in range(m + 1)]
for i in range(m + 1):
mat[i][0] = i
for j in range(n + 1):
mat[0][j] = j
print(mat)
for i in range(1,m+1):
for j in range(1,n+1):
if word1[i-1]==word2[j-1]:
mat[i][j]=mat[i-1][j-1]
else:
x = min(mat[i][j-1],mat[i-1][j],mat[i-1][j-1])
mat[i][j]=x+1
return mat[-1][-1]
| StarcoderdataPython |
42750 | <filename>awsrun/student/tasks.py
import os
import time
from abc import ABC, abstractmethod, ABCMeta
from common.commands import Compress, Upload, SendMsg, Download, Decompress
from common.configuration import AWSConfig
from common.protocol import IOTask, AWSMsg, AWSIDRegistration
from common.resources import Folder, File, OSPath
from multipledispatch import dispatch
class Issuer(ABC):
@abstractmethod
def issue(self, task: AWSMsg):
pass
class AWSIssuer(Issuer):
def __init__(self, awsconfig: AWSConfig):
self._awsconfig = awsconfig
@staticmethod
def dependencies(task: IOTask):
deps = []
cwd = Folder.cwd()
deps.extend(map(lambda f: cwd.relative(f),
map(lambda p: OSPath.new(p), filter(lambda arg: os.path.exists(arg), task.command.shell))))
deps.extend(map(lambda f: cwd.relative(f), task.command.deps))
return deps
def _operands(self, task: IOTask):
resources = Compress(task.workspace.input, *AWSIssuer.dependencies(task)).execute()
uploaded = Upload(self._awsconfig.serverpath, self._awsconfig.bucketpath, resources).execute()
# Echo status back to user.
print("Resources {0} is transfered\n".format(uploaded.path))
time.sleep(1)
def _operator(self, task: IOTask):
return SendMsg(self._awsconfig.serverpath, self._awsconfig.taskpath, task).execute()
def _clean_files(self, task: IOTask):
os.remove(task.workspace.local_input)
os.remove(task.workspace.local_output)
def _output(self, task: IOTask):
retrieved = Download(self._awsconfig.serverpath, self._awsconfig.bucketpath, task.workspace.output,
task.command.timeout).execute()
cwd = Folder(os.path.normpath(os.getcwd()))
# files to extract
stdout_report = File('stdout')
stderr_report = File('stderr')
target = Decompress(cwd, retrieved, stdout_report, stderr_report).execute()
# report
target.relative(stdout_report).content(header=" STDOUT ")
target.relative(stderr_report).content(header=" STDERR ")
#
if task.perf_file:
Decompress(task.lwd.relative(task.workspace.root).create(), task.workspace.local_input).execute()
self._clean_files(task)
@dispatch(IOTask)
def issue(self, task):
self._operands(task)
self._operator(task)
self._output(task)
@dispatch(AWSIDRegistration)
def issue(self, reg):
SendMsg(self._awsconfig.serverpath, self._awsconfig.regpath, reg).execute()
| StarcoderdataPython |
3326452 | <gh_stars>10-100
import falcon
from .hello import HelloName
from .root import Root
def get_routes(app: falcon.App):
app.add_route("/", Root())
app.add_route("/hello/{name}", HelloName())
| StarcoderdataPython |
152409 | <filename>conftest.py
import pytest
import requests
import shutil
from pathlib import Path
from typing import Generator
from readimc import MCDFile, TXTFile
_imc_test_data_asset_url = (
"https://github.com/BodenmillerGroup/TestData"
"/releases/download/v1.0.7/210308_ImcTestData_raw.tar.gz"
)
_imc_test_data_raw_dir = "datasets/210308_ImcTestData/raw"
_imc_test_data_mcd_file = "20210305_NE_mockData1/20210305_NE_mockData1.mcd"
_imc_test_data_txt_file = (
"20210305_NE_mockData1/20210305_NE_mockData1_ROI_001_1.txt"
)
def _download_and_extract_asset(tmp_dir_path: Path, asset_url: str):
asset_file_path = tmp_dir_path / "asset.tar.gz"
response = requests.get(asset_url, stream=True)
if response.status_code == 200:
with asset_file_path.open(mode="wb") as f:
f.write(response.raw.read())
shutil.unpack_archive(asset_file_path, tmp_dir_path)
@pytest.fixture(scope="session")
def imc_test_data_raw_path(tmp_path_factory) -> Generator[Path, None, None]:
tmp_dir_path = tmp_path_factory.mktemp("raw")
_download_and_extract_asset(tmp_dir_path, _imc_test_data_asset_url)
yield tmp_dir_path / Path(_imc_test_data_raw_dir)
shutil.rmtree(tmp_dir_path)
@pytest.fixture
def imc_test_data_mcd_file(
imc_test_data_raw_path: Path,
) -> Generator[MCDFile, None, None]:
path = imc_test_data_raw_path / Path(_imc_test_data_mcd_file)
with MCDFile(path) as f:
yield f
@pytest.fixture
def imc_test_data_txt_file(
imc_test_data_raw_path: Path,
) -> Generator[TXTFile, None, None]:
path = imc_test_data_raw_path / Path(_imc_test_data_txt_file)
with TXTFile(path) as f:
yield f
| StarcoderdataPython |
166824 | import os
import sys
from plistlib import Plist
from . import __version__ as bdist_mpkg_version
from . import tools
from .py3k import unicode, u, any_str_type
def _major_minor(v):
rval = [0, 0]
try:
for i, rev in enumerate(v.version):
rval[i] = int(rev)
except (TypeError, ValueError, IndexError):
pass
return rval
def common_info(name, version):
# Keys that can appear in any package
name, version = unicode(name), tools.Version(version)
major, minor = _major_minor(version)
return dict(
CFBundleGetInfoString=u('%s %s' % (name, version)),
CFBundleIdentifier=u('org.pythonmac.%s' % (name,)),
CFBundleName=name,
CFBundleShortVersionString=unicode(version),
IFMajorVersion=major,
IFMinorRevision=minor,
IFPkgFormatVersion=0.10000000149011612,
IFRequirementDicts=[path_requirement(u('/'))],
PythonInfoDict=dict(
PythonLongVersion=unicode(sys.version),
PythonShortVersion=unicode(sys.version[:3]),
PythonExecutable=unicode(sys.executable),
bdist_mpkg=dict(
version=unicode(bdist_mpkg_version),
),
),
)
def pkg_info(name, version):
d = common_info(name, version)
# Keys that can only appear in single packages
d.update(dict(
IFPkgFlagAllowBackRev=False,
IFPkgFlagAuthorizationAction=u('AdminAuthorization'),
#IFPkgFlagDefaultLocation=u'/Library/Python/2.3',
IFPkgFlagFollowLinks=True,
IFPkgFlagInstallFat=False,
IFPkgFlagIsRequired=False,
IFPkgFlagOverwritePermissions=False,
IFPkgFlagRelocatable=False,
IFPkgFlagRestartAction=u('NoRestart'),
IFPkgFlagRootVolumeOnly=True,
IFPkgFlagUpdateInstalledLangauges=False,
))
return d
def path_requirement(SpecArgument, Level=u('requires'), **kw):
return dict(
Level=Level,
SpecType=u('file'),
SpecArgument=tools.unicode_path(SpecArgument),
SpecProperty=u('NSFileType'),
TestOperator=u('eq'),
TestObject=u('NSFileTypeDirectory'),
**kw
)
FRIENDLY_PREFIX = {
os.path.expanduser(u('~/Library/Frameworks')) : u('User'),
u('/System/Library/Frameworks') : u('Apple'),
u('/Library/Frameworks') : u('python.org'),
u('/opt/local') : u('DarwinPorts'),
u('/usr/local') : u('Unix'),
u('/sw') : u('Fink'),
}
def python_requirement(pkgname, prefix=None, version=None, **kw):
if prefix is None:
prefix = sys.prefix
if version is None:
version = sys.version[:3]
prefix = os.path.realpath(prefix)
fmwkprefix = os.path.dirname(os.path.dirname(prefix))
is_framework = fmwkprefix.endswith('.framework')
if is_framework:
dprefix = os.path.dirname(fmwkprefix)
else:
dprefix = prefix
dprefix = tools.unicode_path(dprefix)
name = u('%s Python %s' % (FRIENDLY_PREFIX.get(dprefix, dprefix), version))
kw.setdefault('LabelKey', name)
title = u('%s requires %s to install.' % (pkgname, name,))
kw.setdefault('TitleKey', title)
kw.setdefault('MessageKey', title)
return path_requirement(prefix, **kw)
def mpkg_info(name, version, packages=[]):
d = common_info(name, version)
# Keys that can appear only in metapackages
npackages = []
for p in packages:
items = getattr(p, 'items', None)
if items is not None:
p = dict(items())
else:
if isinstance(p, any_str_type):
p = [p]
p = dict(zip(
(u('IFPkgFlagPackageLocation'), u('IFPkgFlagPackageSelection')),
p
))
npackages.append(p)
d.update(dict(
IFPkgFlagComponentDirectory=u('./Contents/Packages'),
IFPkgFlagPackageList=npackages,
))
return d
def checkpath_plugin(path):
# This does not appear to be called in this codebase
if not isinstance(path, unicode):
path = unicode(path, encoding) # encoding not defined
return dict(
searchPlugin=u('CheckPath'),
path=path,
)
def common_description(name, version):
name, version = unicode(name), tools.Version(version)
return dict(
IFPkgDescriptionTitle=name,
IFPkgDescriptionVersion=unicode(version),
)
def write(dct, path):
p = Plist()
p.update(dct)
p.write(path)
| StarcoderdataPython |
157249 | # Copyright (C) 2015 by <NAME> <<EMAIL>>
#
# muniments is freely distributable under the terms of an MIT-style license.
# See COPYING or http://www.opensource.org/licenses/mit-license.php.
from test_muniments.test_fixtures import register_fixture
from test_muniments.test_integration.test_external.test_couchdb import CouchDbReadFixture
from test_muniments.test_integration.test_external.test_couchdb import CouchDbWriteFixture
from test_muniments.test_unit.test_scheduler.test_models import test_schedule
for module in [ getattr(test_schedule, module_name) for module_name in dir(test_schedule) if module_name.startswith('model_') ]:
register_fixture(globals(), ( CouchDbReadFixture, ), module.DATA)
register_fixture(globals(), ( CouchDbWriteFixture, ), module.DATA)
| StarcoderdataPython |
4839375 | <gh_stars>1-10
# -*- coding:UTF-8 -*-
# Author:<NAME>
# Date: Fri, 26 Feb 2021, 00:55
# Project Euler # 060 Prime pair sets
#==================================================================Solution
import os, sys
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
sys.path.append(BASE_DIR)
sys.path.append(BASE_DIR.partition('P060')[0] + 'Utility Class')
from prime_utility import *
limit = 10000
prime_class = Prime(limit)
prime_class._traverse()
prime_list = prime_class._get_prime_list()
def is_prime(p):
if p == 1:
return False
if p == 2:
return True
for i in range(2, int(p ** 0.5) + 1):
if p % i == 0:
return False
return True
def is_prime_pair(p1, p2):
if is_prime(int(str(p1) + str(p2))) == True and is_prime(int(str(p2) + str(p1))) == True:
return True
return False
def is_prime_pair_set(prime_set):
for p1 in range(len(prime_set) - 1):
for p2 in range(p1 + 1, len(prime_set)):
if is_prime_pair(prime_set[p1], prime_set[p2]) != True:
return False
return True
def find_ans():
for i1 in range(len(prime_list) - 4):
for i2 in range(i1 + 1, len(prime_list) - 3):
if is_prime_pair_set([prime_list[i1], prime_list[i2]]) == True:
for i3 in range(i2 + 1, len(prime_list) - 2):
if is_prime_pair_set([prime_list[i1], prime_list[i2], prime_list[i3]]) == True:
for i4 in range(i3 + 1, len(prime_list) - 1):
if is_prime_pair_set([prime_list[i1], prime_list[i2], prime_list[i3], prime_list[i4]]) == True:
for i5 in range(i4 + 1, len(prime_list)):
if is_prime_pair_set([prime_list[i1], prime_list[i2], prime_list[i3], prime_list[i4], prime_list[i5]]) == True:
return sum([prime_list[i1], prime_list[i2], prime_list[i3], prime_list[i4], prime_list[i5]])
print(find_ans()) | StarcoderdataPython |
1640605 | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Union, Sequence
from .result_with_mhc_class import ResultWithMhcClass
from .species import Species
from .gene import Gene
class Class2Locus(ResultWithMhcClass):
def __init__(
self,
species : Species,
name : str,
mhc_class : str = "II",
genes : Sequence[Gene] = [],
raw_string : Union[str, None] = None):
ResultWithMhcClass.__init__(
self,
species=species,
mhc_class=mhc_class,
raw_string=raw_string)
self.name = name
self.genes = genes
@property
def locus_name(self):
return self.name
@classmethod
def eq_field_names(cls):
return ("species", "name")
@classmethod
def str_field_names(cls):
return ("species", "name")
def to_string(self, include_species=True, use_old_species_prefix=False):
species_string = self.species.to_string(
include_species=include_species,
use_old_species_prefix=use_old_species_prefix)
return species_string + "-" + self.name
def compact_string(self, include_species=False, use_old_species_prefix=False):
return self.to_string(
include_species=include_species,
use_old_species_prefix=use_old_species_prefix)
@property
def gene_names(self):
return [g.name for g in self.genes]
@classmethod
def endswith_ignore_digits(cls, s1, s2):
while s1[-1].isdigit():
s1 = s1[:-1]
return s1.endswith(s2)
@property
def alpha_chain_genes(self):
results = []
for g in self.genes:
name = g.name
if self.endswith_ignore_digits(name.lower(), "a"):
results.append(g)
return results
@property
def beta_chain_genes(self):
results = []
for g in self.genes:
name = g.name
if self.endswith_ignore_digits(name.lower(), "b"):
results.append(g)
return results
@property
def alpha_chain_gene_names(self):
return [g.name for g in self.alpha_chain_genes]
@property
def beta_chain_gene_names(self):
return [g.name for g in self.beta_chain_genes]
@classmethod
def get(cls, species_prefix, locus_name):
if type(species_prefix) is Species:
species = species_prefix
else:
species = Species.get(species_prefix)
if species is None:
return None
raw_string = locus_name
locus_name = species.find_matching_class2_locus_name(locus_name)
if locus_name is None:
return None
gene_names = species.class2_locus_to_gene_names[locus_name]
genes = []
for gene_name in gene_names:
gene = Gene.get(species, gene_name)
if gene is None:
continue
genes.append(gene)
return Class2Locus(
species=species,
name=locus_name,
genes=genes,
raw_string=raw_string)
| StarcoderdataPython |
194385 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: broker/dev/service_class.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
DESCRIPTOR = _descriptor.FileDescriptor(
name='broker/dev/service_class.proto',
package='istio.broker.dev',
syntax='proto3',
serialized_pb=_b('\n\x1e\x62roker/dev/service_class.proto\x12\x10istio.broker.dev\"o\n\x0cServiceClass\x12\x30\n\ndeployment\x18\x01 \x01(\x0b\x32\x1c.istio.broker.dev.Deployment\x12-\n\x05\x65ntry\x18\x02 \x01(\x0b\x32\x1e.istio.broker.dev.CatalogEntry\"\x1e\n\nDeployment\x12\x10\n\x08instance\x18\x01 \x01(\t\"=\n\x0c\x43\x61talogEntry\x12\x0c\n\x04name\x18\x01 \x01(\t\x12\n\n\x02id\x18\x02 \x01(\t\x12\x13\n\x0b\x64\x65scription\x18\x03 \x01(\tB\x19Z\x17istio.io/api/broker/devb\x06proto3')
)
_SERVICECLASS = _descriptor.Descriptor(
name='ServiceClass',
full_name='istio.broker.dev.ServiceClass',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='deployment', full_name='istio.broker.dev.ServiceClass.deployment', index=0,
number=1, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='entry', full_name='istio.broker.dev.ServiceClass.entry', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=52,
serialized_end=163,
)
_DEPLOYMENT = _descriptor.Descriptor(
name='Deployment',
full_name='istio.broker.dev.Deployment',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='instance', full_name='istio.broker.dev.Deployment.instance', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=165,
serialized_end=195,
)
_CATALOGENTRY = _descriptor.Descriptor(
name='CatalogEntry',
full_name='istio.broker.dev.CatalogEntry',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='name', full_name='istio.broker.dev.CatalogEntry.name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='id', full_name='istio.broker.dev.CatalogEntry.id', index=1,
number=2, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
_descriptor.FieldDescriptor(
name='description', full_name='istio.broker.dev.CatalogEntry.description', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None, file=DESCRIPTOR),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=197,
serialized_end=258,
)
_SERVICECLASS.fields_by_name['deployment'].message_type = _DEPLOYMENT
_SERVICECLASS.fields_by_name['entry'].message_type = _CATALOGENTRY
DESCRIPTOR.message_types_by_name['ServiceClass'] = _SERVICECLASS
DESCRIPTOR.message_types_by_name['Deployment'] = _DEPLOYMENT
DESCRIPTOR.message_types_by_name['CatalogEntry'] = _CATALOGENTRY
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
ServiceClass = _reflection.GeneratedProtocolMessageType('ServiceClass', (_message.Message,), dict(
DESCRIPTOR = _SERVICECLASS,
__module__ = 'broker.dev.service_class_pb2'
# @@protoc_insertion_point(class_scope:istio.broker.dev.ServiceClass)
))
_sym_db.RegisterMessage(ServiceClass)
Deployment = _reflection.GeneratedProtocolMessageType('Deployment', (_message.Message,), dict(
DESCRIPTOR = _DEPLOYMENT,
__module__ = 'broker.dev.service_class_pb2'
# @@protoc_insertion_point(class_scope:istio.broker.dev.Deployment)
))
_sym_db.RegisterMessage(Deployment)
CatalogEntry = _reflection.GeneratedProtocolMessageType('CatalogEntry', (_message.Message,), dict(
DESCRIPTOR = _CATALOGENTRY,
__module__ = 'broker.dev.service_class_pb2'
# @@protoc_insertion_point(class_scope:istio.broker.dev.CatalogEntry)
))
_sym_db.RegisterMessage(CatalogEntry)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('Z\027istio.io/api/broker/dev'))
# @@protoc_insertion_point(module_scope)
| StarcoderdataPython |
73375 | <gh_stars>0
import pytest
from graphapi.tests.utils import populate_db
from openstates.data.models import Organization, Person, Membership
from utils.common import pretty_url
@pytest.mark.django_db
def setup():
populate_db()
house = Organization.objects.get(
classification="lower", jurisdiction__name="Alaska"
)
r = Organization.objects.create(
name="Robots",
classification="committee",
parent=house,
jurisdiction=house.jurisdiction,
)
w = Organization.objects.create(
name="Wizards",
classification="committee",
parent=house,
jurisdiction=house.jurisdiction,
)
# one robot
p = Person.objects.get(name="<NAME>")
Membership.objects.create(person=p, organization=r)
# all are wizards
for p in Person.objects.all()[:5]:
Membership.objects.create(person=p, organization=w)
@pytest.mark.skip("committees view is disabled for now")
@pytest.mark.django_db
def test_committees_view(client, django_assert_num_queries):
with django_assert_num_queries(3):
resp = client.get("/ak/committees/")
assert resp.status_code == 200
assert resp.context["state"] == "ak"
assert resp.context["state_nav"] == "committees"
assert len(resp.context["chambers"]) == 2
assert len(resp.context["committees"]) == 2
# check member_counts
one, two = resp.context["committees"]
if one["name"] == "Robots":
robots, wizards = one, two
else:
robots, wizards = two, one
assert robots["member_count"] == 1
assert wizards["member_count"] == 5
@pytest.mark.django_db
def test_committee_detail(client, django_assert_num_queries):
o = Organization.objects.get(name="Wizards")
with django_assert_num_queries(9):
resp = client.get(pretty_url(o))
assert resp.status_code == 200
assert resp.context["state"] == "ak"
assert resp.context["state_nav"] == "committees"
org = resp.context["committee"]
assert org.name == "Wizards"
assert len(resp.context["memberships"]) == 5
@pytest.mark.django_db
def test_canonicalize_committee(client):
o = Organization.objects.get(name="Wizards")
url = pretty_url(o).replace("wizards", "xyz")
assert "xyz" in url
resp = client.get(url)
assert resp.status_code == 301
assert resp.url == pretty_url(o)
| StarcoderdataPython |
176996 | # flake8: noqa
from .about import __version__
from .candidate import Candidate
from .ice import Connection
| StarcoderdataPython |
123240 | <reponame>frenzylabs/ancilla
'''
node.py
ancilla
Created by <NAME> (<EMAIL>) on 01/08/20
Copyright 2019 FrenzyLabs, LLC.
'''
from .base import Event, State
class NodeEvent(Event):
key = "node"
events = dict(
services = "created",
updated = "updated",
deleted = "deleted",
state = State
)
| StarcoderdataPython |
1730965 | <reponame>avmi/plato-research-dialogue-system<filename>plato/agent/component/dialogue_policy/reinforcement_learning/reward_function.py
"""
Copyright (c) 2019-2020 Uber Technologies, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
__author__ = "<NAME>"
from abc import ABC, abstractmethod
from copy import deepcopy
"""
Reward is the parent abstract class for reward functions, primarily used for
reinforcement learning.
"""
class Reward(ABC):
@abstractmethod
def initialize(self, **kwargs):
"""
Initialize internal parameters
:param kwargs:
:return:
"""
pass
@abstractmethod
def calculate(self, state, action):
"""
Calculate the reward to be assigned for taking action from state.
:param state: the current state
:param action: the action taken from the current state
:return: the calculated reward
"""
pass
class SlotFillingReward(Reward):
def __init__(self):
"""
Set default values for turn penalty, success, and failure.
"""
self.goal = None
self.turn_penalty = -0.05
self.failure_penalty = -1
self.success_reward = 20
def initialize(self, **kwargs):
"""
Initialize parameters for turn penalty, success, and failure.
:param kwargs: turn penalty, failure penalty, and success reward
:return: Nothing
"""
if 'turn_penalty' in kwargs:
self.turn_penalty = kwargs['turn_penalty']
if 'failure_penalty' in kwargs:
self.failure_penalty = kwargs['failure_penalty']
if 'success_reward' in kwargs:
self.success_reward = kwargs['success_reward']
def calculate(self, state, actions, goal=None, force_terminal=False,
agent_role='system'):
"""
Calculate the reward to be assigned for taking action from state.
:param state: the current state
:param actions: the action taken from the current state
:param goal: the agent's goal, used to assess success
:param force_terminal: force state to be terminal
:param agent_role: the role of the agent
:return: a number, representing the calculated reward
"""
reward = self.turn_penalty
if goal is None:
print('Warning: SlotFillingReward() called without a goal.')
return 0, False, False
else:
dialogue_success = False
if state.is_terminal() or force_terminal:
# Check that an offer has actually been made
if state.system_made_offer:
dialogue_success = True
# Check that the item offered meets the user's constraints
for constr in goal.constraints:
if goal.ground_truth:
# Multi-agent case
if goal.ground_truth[constr] != \
goal.constraints[constr].value and \
goal.constraints[constr].value != \
'dontcare':
reward += self.failure_penalty
dialogue_success = False
break
elif state.item_in_focus:
# Single-agent case
if state.item_in_focus[constr] != \
goal.constraints[constr].value and \
goal.constraints[constr].value != \
'dontcare':
reward += self.failure_penalty
dialogue_success = False
break
# Check that all requests have been addressed
if dialogue_success:
not_met = 0
if agent_role == 'system':
# Check that the system has responded to all
# requests (actually) made by the user
for req in goal.actual_requests:
if not goal.actual_requests[req].value:
not_met += 1
elif agent_role == 'user':
# Check that the user has provided all the
# requests in the goal
for req in goal.requests:
if not goal.requests[req].value:
not_met += 1
if not_met > 0:
reward += self.failure_penalty
dialogue_success = False
else:
reward = self.success_reward
else:
reward += self.failure_penalty
dialogue_success = False
# Liu & Lane ASRU 2017 Definition of task success
task_success = None
if agent_role == 'system':
task_success = True
# We don't care for slots that are not in the goal constraints
for slot in goal.constraints:
# If the system proactively informs about a slot the user has
# not yet put a constraint upon,
# the user's DState is updated accordingly and the user would
# not need to put that constraint.
if goal.ground_truth:
if goal.ground_truth[slot] != \
goal.constraints[slot].value and \
goal.constraints[slot].value != 'dontcare':
task_success = False
break
# Fall back to the noisier signal, that is the slots filled.
elif state.slots_filled[slot] != \
goal.constraints[slot].value and \
goal.constraints[slot].value != 'dontcare':
task_success = False
break
for req in goal.requests:
if not goal.requests[req].value:
task_success = False
break
return reward, dialogue_success, task_success
class SlotFillingGoalAdvancementReward(Reward):
def __init__(self):
"""
Initialize the internal structures.
"""
self.prev_state = None
self.prev_goal = None
self.failure_penalty = -1
self.success_reward = 1
def initialize(self, **kwargs):
"""
Initialize the failure penalty and success reward
:param kwargs: dictionary containing failure penalty and success reward
:return: Nothing
"""
if 'failure_penalty' in kwargs:
self.failure_penalty = kwargs['failure_penalty']
if 'success_reward' in kwargs:
self.success_reward = kwargs['success_reward']
if 'state' in kwargs:
self.prev_state = deepcopy(kwargs['state'])
else:
self.prev_state = None
if 'goal' in kwargs:
self.prev_goal = deepcopy(kwargs['goal'])
else:
self.prev_goal = None
def calculate(self, state, actions, goal=None, force_terminal=False,
agent_role='system'):
"""
Calculate the reward based on whether the action taken advanced the
goal or not. For example, if the action resulted in filling one more
slot the conversation advanced towards the goal.
:param state: the current state
:param actions: the action taken from the current state
:param goal: the agent's goal, used to assess success
:param force_terminal: force state to be terminal
:param agent_role: the role of the agent
:return: a number, representing the calculated reward
"""
if goal is None:
print('Warning: SlotFillingGoalAdvancementReward() called without '
'a goal.')
return -1, False, False
elif self.prev_state is None or self.prev_goal is None:
reward = 1
# Check if the goal has been advanced
else:
# If the new state has more slots filled than the old one
if sum([1 if self.prev_state.slots_filled[s] else 0 for s in
self.prev_state.slots_filled]) < \
sum([1 if state.slots_filled[s] else 0 for s in
state.slots_filled]):
reward = 1
# Or if the new state has more requests filled than the old one
elif sum([1 if self.prev_goal.actual_requests[r] else 0 for r in
self.prev_goal.actual_requests]) < \
sum([1 if goal.actual_requests[r] else 0 for r in
goal.actual_requests]):
reward = 1
# Or if the system made a request for an unfilled slot?
else:
reward = -1
success = False
if state.is_terminal() or force_terminal:
# Check that an offer has actually been made
if state.system_made_offer:
success = True
# Check that the item offered meets the user's constraints
for constr in goal.constraints:
if goal.ground_truth:
# Multi-agent case
if goal.ground_truth[constr] != \
goal.constraints[constr].value:
success = False
break
elif state.item_in_focus:
# Single-agent case
if state.item_in_focus[constr] != \
goal.constraints[constr].value:
success = False
break
self.prev_state = deepcopy(state)
self.prev_goal = deepcopy(goal)
task_success = None
if agent_role == 'system':
task_success = True
# We don't care for slots that are not in the goal constraints
for slot in goal.constraints:
# If the system proactively informs about a slot the user has
# not yet put a constraint upon,
# the user's DState is updated accordingly and the user would
# not need to put that constraint.
if goal.ground_truth:
if goal.ground_truth[slot] != \
goal.constraints[slot].value and \
goal.constraints[slot].value != 'dontcare':
task_success = False
# Fall back to the noisier signal, that is the slots filled.
elif state.slots_filled[slot] != \
goal.constraints[slot].value and \
goal.constraints[slot].value != 'dontcare':
task_success = False
for req in goal.requests:
if not goal.requests[req].value:
task_success = False
return reward, success, task_success
| StarcoderdataPython |
60688 | #! /usr/bin/env python3
# -*- coding: utf-8 -*-
import os, json
def main():
fp = 'vocab.json'
with open(fp, 'r') as f:
caption_token_to_idx = json.load(f)['question_token_to_idx']
caption_tokens = list(caption_token_to_idx.keys())
print(caption_tokens[5:])
with open("clevr-scene-nouns.txt", "w") as output:
for row in caption_tokens[5:]:
output.write(str(row) + '\n')
if __name__ == '__main__':
main() | StarcoderdataPython |
3242460 |
# coding: utf-8
# In[1]:
s = [15,17,13,20,19,17,25,10,18,40,21,35,11,25,30,29,35,34,25,18,27,45,36,22,28,38,46,47,24,11,14,16,27,61,52,33,35,48,16,27,26,48,51,50,60,62,54,58,25,27,35,42,47,45,55,38,70,61,47,44,34,36,35,61,65,70,53,54,54,56,47,11,21,22,24,53,52,53,70,62,61,21,18,11,21,18,27,24,25,26,22,12,11,22,23,33,35,41,18,11]
# In[2]:
len(s)
# In[3]:
s.sort()
# In[4]:
print(s)
# In[5]:
from collections import Counter
from fractions import Fraction
import operator
from matplotlib import pyplot as plt
import math
d = Counter(s)
d.keys()
# In[7]:
for i in d.keys():
print('{0}, {2}, {1}'.format(d[i], i, Fraction(d[i], 100)))
# In[8]:
print('Мода {0}, Медіана {1}, Розмах {2}'.format(max(d, key=d.get), (s[49]+s[50])/2, s[99]-s[0]))
# In[9]:
plt.figure(figsize=(20, 10))
plt.grid()
plt.plot(d.keys(), d.values())
plt.xticks(s)
# In[10]:
plt.figure(figsize=(20, 10))
plt.bar(d.keys(), d.values())
plt.xticks(s)
plt.show(s)
# In[21]:
interv = []
suma = 0
t = 10
for i in range(20,90,10):
for j in d.keys():
if j < i and j >= t:
suma += d[j]
interv.append(((t,i),suma, Fraction(suma,100)))
suma = 0
t = i
# In[22]:
interv
# In[35]:
ser = 0
for i in d.keys():
ser += i*d[i]
print('Середнє вибіркове {0}'.format(ser/100))
# In[36]:
ser = 0
for i in interv:
ser+=(i[0][1]+i[0][0])/2 * i[1]
ser_int = ser/100
print('Середнє вибіркове {0}'.format(ser_int))
# In[47]:
dispersion = 0
for i in interv:
dispersion += ((i[0][1]+i[0][0])/2)**2 * i[1]
dispersion /= 100
dispersion -= ser_int**2
ser_quad = math.sqrt(dispersion)
print('Дисперсія {0}, Середньоквадратичне відхилення {1}'.format(dispersion, ser_quad))
# In[48]:
print('Коефіцієнт варіації {0}'.format(ser_quad/ser_int))
# In[49]:
emp3 = 0
emp4 = 0
for i in interv:
emp3 += ((i[0][1]+i[0][0])/2 - ser_int)**3 * i[1]
emp4 += ((i[0][1]+i[0][0])/2 - ser_int)**4 * i[1]
emp3 /= 100
emp4 /= 100
print('Центральні емпіричні моменти 3-го {0} і 4-го порядків {1}'.format(emp3, emp4))
# In[50]:
print('Асиметрія {0} і Ексцес {1}'.format(emp3/(ser_quad**3), emp4/(ser_quad**4)-3))
| StarcoderdataPython |
3202568 | <reponame>theomarzaki/TrafficOrchestrator
# This script provides a way for the TO to predict the neighrest cars to the merging car to
# provide a state space for the lane merge enviroment. Uses an LSTM Model to acheive this:
# Sequential learning -> change to minibatch learning
# multi variable classifir -> sequence to sequence regressor
# @parameters input: Road state Tensor
# @parameters output: LSTM Model to predict next car states (preceeding,merging,following) jit trace file
# Created by: <NAME>(KCL)
# -- BENCHMARK --
#
# CONFIG :
# sequence_length = 1
# input_size = 13
# hidden_size = 128
# num_layers = 2
# num_classes = 13
# batch_size = 32
# learning_rate = 1e-5
#
# + [CPU-MODE] i7 6700K Stock + 16 Go DDR4 2166 Non-ECC RAM
# |=> 0.12 Epoch/s
#
# + [CPU-MODE] i5 3570K Stock + 16 Go DDR3 1600 Non-ECC RAM
# |=> 0.08 Epoch/s
#
# + [GPU-MODE] GTX 970 Stock + i5 3570K Stock + 16 Go DDR3 1333 Non-ECC RAM
# |=> 0.58 Epoch/s | 52% + 1050MB uses with CPU bottleneck, so I think I can push a lot further. Maybe 1 Epoch/s
#
import time
import torch
import torch.utils
import torch.utils.data
import torch.nn as nn
import matplotlib.pyplot as plt
from csv_data import Data
# Device configuration
loader_batch_size = 128
view_rate = 128
dev_type = 'cpu'
num_epochs = 5000
sequence_length = 1
input_size = 13
hidden_size = 128
num_layers = 2
num_classes = 13
batch_size = 3
learning_rate = 1e-7
if torch.cuda.is_available():
dev_type = 'cuda'
view_rate /= 4
loader_batch_size *= 8
print("NVIDIA GPU detected and use !")
else: print("/!\ No NVIDIA GPU detected, we stick to the CPU !")
device = torch.device(dev_type)
data_wrapper = Data()
data = data_wrapper.get_data()
featuresTrain,targetsTrain = data_wrapper.get_training_lstm_data()
featuresTest, targetsTest = data_wrapper.get_testing_lstm_data()
train = torch.utils.data.TensorDataset(featuresTrain,targetsTrain)
test = torch.utils.data.TensorDataset(featuresTest,targetsTest)
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train,batch_size=loader_batch_size,shuffle=False,
num_workers = 3, pin_memory=True)
test_loader = torch.utils.data.DataLoader(dataset=test,batch_size=loader_batch_size,shuffle=False,
num_workers = 3, pin_memory=True)
# Recurrent neural network (many-to-one)
class RNN(nn.Module):
def __init__(self,train_loader,test_loader):
super(RNN, self).__init__()
self.train_loader = train_loader
self.test_loader = test_loader
self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True)
self.fc = nn.Linear(hidden_size, num_classes)
def init_hidden(self):
# This is what we'll initialise our hidden state as
return (torch.zeros(num_layers, batch_size, hidden_size),
torch.zeros(num_layers, batch_size, hidden_size))
def init_hidden(self):
# This is what we'll initialise our hidden state as
return (torch.zeros(self.num_layers, self.batch_size, self.hidden_size),
torch.zeros(self.num_layers, self.batch_size, self.hidden_size))
def forward(self, x):
# Set initial hidden and cell states
h0 = torch.zeros(num_layers, x.size(0), hidden_size).to(device)
c0 = torch.zeros(num_layers, x.size(0), hidden_size).to(device)
# Forward propagate LSTM
out, _ = self.lstm(x, (h0, c0)) # out: tensor of shape (batch_size, seq_length, hidden_size)
# Decode the hidden state of the last time step
out = self.fc(out[:, -1, :])
return out
def train(model):
print("Go !")
criterion = nn.MSELoss()
current_time = time.time()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)
hist = []
total_step = len(model.train_loader)
for epoch in range(num_epochs):
model.zero_grad()
# model.hidden = model.init_hidden()
loss = None
for i, (locations, labels) in enumerate(model.train_loader):
locations = locations.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
# Forward pass
outputs = model(locations)
loss = criterion(outputs, labels)
hist.append(loss.item())
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % view_rate == 0:
print('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
bufftime = time.time()
delta_time = bufftime - current_time
print("+ "+str(delta_time)+" Secs")
print("Perf: "+str(1/delta_time)+" Epoch/s\n")
current_time = bufftime
return hist
def test(model):
with torch.no_grad():
correct = 0
total = 0
for locations, labels in test_loader:
locations = locations.reshape(-1, sequence_length, input_size).to(device)
labels = labels.to(device)
outputs = model(locations)
_, predicted = torch.max(outputs.data, 1)
total += 1
for idx, actual in enumerate(locations):
if ((actual == outputs.data).all()):
correct += 1
# correct += (predicted.long() == labels.long()).sum().item()
print('Test Accuracy of the model of the model: {} %'.format(100 * correct / total))
def main():
model = RNN(train_loader,test_loader).to(device,non_blocking=True)
hist = train(model)
test(model)
traced_script_module = torch.jit.trace(model, torch.rand(1,1,input_size).to(device))
traced_script_module.save("lstm_model.pt")
plt.plot(hist)
plt.show()
if __name__ == '__main__':
main()
| StarcoderdataPython |
4822101 | """
@ Filename: Cluster.py
@ Author: <NAME>
@ Create Date: 2019-05-15
@ Update Date: 2019-05-20
@ Description: Implement Cluster
"""
import sys
import numpy as np
import preProcess
import pickle
import random
import matplotlib.pyplot as plt
class KMeans:
def __init__(self, norm_type="Normalization", k=4, distance_type="Euclidean", cluster_type="KMeans++"):
self.norm_type = norm_type
self.k = k
self.distance_type = distance_type
self.cluster_type = cluster_type
self.centers = None # cluster centers
self.distances = None # distance between sample and cluster
'''
Function: calcuateDistance
Description: calcuate the distance between input vector and train data
Input: x1 dataType: ndarray description: input vector
x2 dataType: ndarray description: input vector
Output: d dataType: float description: distance between input vectors
'''
def calculateDistance(self, x1, x2):
d = 0
if self.distance_type == "Euclidean":
d = np.sqrt(np.power(np.sum(x1 - x2, axis=1), 2))
elif self.distance_type == "Cosine":
d = np.dot(x1, x2)/(np.linalg.norm(x1)*np.linalg.norm(x2))
elif self.distance_type == "Manhattan":
d = np.sum(x1 - x2)
else:
print("Error Type!")
sys.exit()
return d
'''
Function: createCenter
Description: create cluster center
Input: train_data dataType: ndarray description: input vector
Output: centers dataType: ndarray description: cluster centers
'''
def createCenter(self, train_data):
feature_dim = np.shape(train_data)[1]
centers = np.zeros([self.k, feature_dim])
for i in range(feature_dim):
min_value = np.min(train_data[:, i])
max_value = np.max(train_data[:, i])
temp = min_value + (max_value - min_value) * np.random.rand(self.k)
centers[:, i] = temp # generate a cluster center
return centers
'''
Function: adjustCluster
Description: adjust cluster when the cluster determined
Input: centers dataType: ndarray description: cluster centers
distances dataType: ndarray description: distance between sample and its corresponding cluster(cluster, distance)
train_data dataType: ndarray description: train data
k dataType: int description: the number of cluster
Output: centers dataType: ndarray description: cluster centers
distances dataType: ndarray description: distance between sample and its corresponding cluster(cluster, distance)
'''
def adjustCluster(self, centers, distances, train_data, k):
sample_num = len(train_data)
flag = True # If True, update cluster_center
while flag:
flag = False
d = np.zeros([sample_num, len(centers)])
for i in range(len(centers)):
# calculate the distance between each sample and each cluster center
d[:, i] = self.calculateDistance(train_data, centers[i])
# find the minimum distance between each sample and each cluster center
old_label = distances[:, 0].copy()
distances[:, 0] = np.argmin(d, axis=1)
distances[:, 1] = np.min(d, axis=1)
if np.sum(old_label - distances[:, 0]) != 0:
flag = True
# update cluster_center by calculating the mean of each cluster
for j in range(k):
current_cluster = train_data[distances[:, 0] == j] # find the samples belong to the j-th cluster center
if len(current_cluster) != 0:
centers[j, :] = np.mean(current_cluster, axis=0)
return centers, distances
'''
Function: kmeans
Description: normal kmeans algorithm
Input: train_data dataType: ndarray description: features
Output: centers dataType: ndarray description: cluster centers
distances dataType: ndarray description: distance between sample and its corresponding cluster(cluster, distance)
'''
def kmeans(self, train_data, k):
sample_num = len(train_data)
distances = np.zeros([sample_num, 2]) # (index, distance)
centers = self.createCenter(train_data)
centers, distances = self.adjustCluster(centers, distances, train_data, self.k)
return centers, distances
'''
Function: biKmeans
Description: binary kmeans algorithm
Input: train_data dataType: ndarray description: features
Output: centers dataType: ndarray description: cluster centers
distances dataType: ndarray description: distance between sample and its corresponding cluster(cluster, distance)
'''
def biKmeans(self, train_data):
sample_num = len(train_data)
distances = np.zeros([sample_num, 2]) # (index, distance)
initial_center = np.mean(train_data, axis=0) # initial cluster #shape (1, feature_dim)
centers = [initial_center] # cluster list
# clustering with the initial cluster center
distances[:, 1] = np.power(self.calculateDistance(train_data, initial_center), 2)
# generate cluster centers
while len(centers) < self.k:
# print(len(centers))
min_SSE = np.inf
best_index = None # index of cluster for best split
best_centers = None # best the cluster center
best_distances = None # the distance between samples and cluster center
# find the best split
for j in range(len(centers)):
centerj_data = train_data[distances[:, 0] == j] # find the samples belong to the j-th center
split_centers, split_distances = self.kmeans(centerj_data, 2) # clustering the samples belong to j-th center into two cluster
split_SSE = np.sum(split_distances[:, 1]) ** 2 # calculate the distance for after clustering
other_distances = distances[distances[:, 0] != j] # the samples don't belong to j-th center
other_SSE = np.sum(other_distances[:, 1]) ** 2 # calculate the distance don't belong to j-th center
# save the best split result
if (split_SSE + other_SSE) < min_SSE:
best_index = j # the best split index
best_centers = split_centers # best cluster centers
best_distances = split_distances # the corresponding distance
min_SSE = split_SSE + other_SSE
# save the spilt data
best_distances[best_distances[:, 0] == 1, 0] = len(centers) # samples of cluster 1 denote as a new cluster
best_distances[best_distances[:, 0] == 0, 0] = best_index # samples of cluster 0 denote as the split-index cluster
centers[best_index] = best_centers[0, :] # update cluster
centers.append(best_centers[1, :]) # add a new cluster
distances[distances[:, 0] == best_index, :] = best_distances # save the distances
centers = np.array(centers) # transform form list to array
return centers, distances
'''
Function: kmeansplusplus
Description: kmeans++ algorithm
Input: train_data dataType: ndarray description: features
Output: centers dataType: ndarray description: cluster centers
distances dataType: ndarray description: distance between sample and its corresponding cluster(cluster, distance)
'''
def kmeansplusplus(self,train_data):
sample_num = len(train_data)
distances = np.zeros([sample_num, 2]) # (index, distance)
# randomly select a sample as the initial cluster
initial_center = train_data[np.random.randint(0, sample_num-1)]
centers = [initial_center]
while len(centers) < self.k:
d = np.zeros([sample_num, len(centers)])
for i in range(len(centers)):
# calculate the distance between each sample and each cluster center
d[:, i] = self.calculateDistance(train_data, centers[i])
# find the minimum distance between each sample and each cluster center
distances[:, 0] = np.argmin(d, axis=1)
distances[:, 1] = np.min(d, axis=1)
# Roulette Wheel Selection
prob = np.power(distances[:, 1], 2)/np.sum(np.power(distances[:, 1], 2))
index = self.rouletteWheelSelection(prob, sample_num)
new_center = train_data[index, :]
centers.append(new_center)
# adjust cluster
centers = np.array(centers) # transform form list to array
centers, distances = self.adjustCluster(centers, distances, train_data, self.k)
return centers, distances
'''
Function: rouletteWheelSelection
Description: Roulette Wheel Selection
Input: prob dataType: ndarray description: features
Output: i dataType: ndarray description: the selected cluster
'''
def rouletteWheelSelection(self, prob, sample_num):
acc_prob = np.zeros(sample_num)
acc_prob[0] = prob[0]
p = random.uniform(0, 1)
for i in range(1, len(prob)):
acc_prob[i] = acc_prob[i-1] + prob[i]
if acc_prob[i] > p:
return i
'''
Function: cluster
Description: train the model
Input: train_data dataType: ndarray description: features
Output: centers dataType: ndarray description: cluster centers
distances dataType: ndarray description: distance between sample and its corresponding cluster(cluster, distance)
'''
def cluster(self, train_data, display="True"):
if self.norm_type == "Standardization":
train_data = preProcess.Standardization(train_data)
else:
train_data = preProcess.Normalization(train_data)
if self.cluster_type == "KMeans":
self.centers, self.distances = self.kmeans(train_data, self.k)
elif self.cluster_type == "biKMeans":
self.centers, self.distances = self.biKmeans(train_data)
elif self.cluster_type == "KMeans++":
self.centers, self.distances = self.kmeansplusplus(train_data)
else:
print("Wrong cluster type!")
sys.exit()
if display:
self.plotResult(train_data)
return self.distances[:, 0]
'''
Function: plotResult
Description: show the clustering result
'''
def plotResult(self, train_data):
plt.scatter(train_data[:, 0], train_data[:, 1], c=self.distances[:, 0])
plt.scatter(self.centers[:, 0], self.centers[:, 1], c=['b', 'b', 'b', 'b'], marker="+")
if self.cluster_type == "KMeans":
plt.title('KMeans')
elif self.cluster_type == "biKMeans":
plt.title('biKMeans')
elif self.cluster_type == "KMeans++":
plt.title('KMeans++')
plt.show()
'''
Function: save
Description: save the model as pkl
Input: filename dataType: str description: the path to save model
'''
def save(self, filename):
f = open(filename, 'w')
model = {'centers': self.centers, 'distances': self.distances}
pickle.dump(model, f)
f.close()
'''
Function: load
Description: load the model
Input: filename dataType: str description: the path to save model
Output: self dataType: obj description: the trained model
'''
def load(self, filename):
f = open(filename)
model = pickle.load(f)
self.centers = model['centers']
self.distances = model['distances']
return self
class DBSCAN:
def __init__(self, norm_type="Normalization", distance_type="Euclidean", eps=0.5, m=5):
self.norm_type = norm_type
self.distance_type = distance_type
self.eps = eps # neighbor
self.m = m # the min number of sample in a neighbor
self.label = None
self.neighbor = None
'''
Function: calcuateDistance
Description: calcuate the distance between input vector and train data
Input: x1 dataType: ndarray description: input vector
x2 dataType: ndarray description: input vector
Output: d dataType: float description: distance between input vectors
'''
def calculateDistance(self, x1, x2):
d = 0
if self.distance_type == "Euclidean":
d = np.sqrt(np.power(np.sum(x1 - x2), 2))
elif self.distance_type == "Cosine":
d = np.dot(x1, x2)/(np.linalg.norm(x1)*np.linalg.norm(x2))
elif self.distance_type == "Manhattan":
d = np.sum(x1 - x2)
else:
print("Error Type!")
sys.exit()
return d
'''
Function: neighborQuery
Description: create neighbor, if the sample is not in a neighbor, add it to a neighbor
Input: train_data dataType: ndarray description: features
center dataType: ndarray description: the center points
Output: neighbor dataType: ndarray description: the center's neighbors
index dataType: ndarray description: the center's index
'''
def neighborQuery(self, train_data, center):
index = []
neighbor = []
sample_num = len(train_data)
for i in range(sample_num):
if self.calculateDistance(center, train_data[i, :]) <= self.eps: # the sample in center's neighbor
neighbor.append(train_data[i, :])
index.append(i)
return index, neighbor
'''
Function: cluster
Description: train the model
Input: train_data dataType: ndarray description: features
Output: centers dataType: ndarray description: cluster centers
distances dataType: ndarray description: distance between sample and its corresponding cluster(cluster, distance)
'''
def cluster(self, train_data, display="True"):
if self.norm_type == "Standardization":
train_data = preProcess.Standardization(train_data)
else:
train_data = preProcess.Normalization(train_data)
sample_num = len(train_data)
label = -np.ones([sample_num])
center_flag = np.zeros([sample_num]) # indicate samples which are cluster center
center_index = [] # cluster center index
C = 0
# start clustering
for i in range(sample_num):
temp_neighbor = []
if label[i] != -1: # if sample i has been clustered, stop this iteration
continue
index1, neighbor1 = self.neighborQuery(train_data, train_data[i, :])
if len(neighbor1) < self.m:
label[i] = 0 # sample i is noise point
continue # stop this iteration
C = C + 1
label[i] = C
center_flag[i] = 1
temp_neighbor.append(neighbor1)
# all the samples in i-th neighbor belong to label C
center_index = index1
j = 0
while j < len(center_index):
loc = center_index[j]
#print(center_index[j])
if center_flag[j] != 1: # samples in i-th sample's neighbor but not cluster center
if label[loc] == 0:
label[loc] = C
if label[loc] != -1:
j = j + 1
continue
label[loc] = C
index2, neighbor2 = self.neighborQuery(train_data, train_data[loc, :])
if len(neighbor2) >= self.m:
center_index = list(set(index1).union(set(index2))) # merge the cluster centers which are directly reachable
center_flag[loc] = 1 # if there are more than m samples in j-th neighbor, add j-th sample as a cluster center
j = j + 1
self.label = label
if display:
self.plotResult(train_data)
return label
'''
Function: plotResult
Description: show the clustering result
'''
def plotResult(self, train_data):
plt.scatter(train_data[:, 0], train_data[:, 1], c=self.label)
plt.title('DBSCAN')
plt.show()
'''
Function: save
Description: save the model as pkl
Input: filename dataType: str description: the path to save model
'''
def save(self, filename):
f = open(filename, 'w')
model = {'label': self.label, 'neighbor': self.neighbor}
pickle.dump(model, f)
f.close()
'''
Function: load
Description: load the model
Input: filename dataType: str description: the path to save model
Output: self dataType: obj description: the trained model
'''
def load(self, filename):
f = open(filename)
model = pickle.load(f)
self.label = model['label']
self.neighbor = model['neighbor']
return self
| StarcoderdataPython |
3559 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_orthoexon
----------------------------------
Tests for `orthoexon` module.
"""
import os
import pytest
@pytest.fixture
def exon_id_with_quotes():
return "'ENSE00001229068.1'"
@pytest.fixture
def exon_id():
return "ENSE00001229068.1"
def test_separate_with_quotes(exon_id_with_quotes):
from orthoexon.util import separate
test = separate(exon_id_with_quotes)
true = "ENSE00001229068"
assert test == true
def test_separate(exon_id):
from orthoexon.util import separate
test = separate(exon_id)
true = "ENSE00001229068"
assert test == true
@pytest.fixture
def location():
return "chr20:10256140-10256211:+:0"
def test_splitstart(location):
from orthoexon.util import splitstart
test = splitstart(location)
true = '10256140'
assert test == true
def test_splitend(location):
from orthoexon.util import splitend
test = splitend(location)
true = '10256211'
assert test == true
@pytest.fixture
def human_gtf_filename(table_folder):
return os.path.join(table_folder, 'humanrbfox2andfmr1andsnap25.gtf')
@pytest.fixture
def human_gtf_database(table_folder):
return os.path.join(table_folder, 'humanrbfox2andfmr1andsnap25.gtf.db')
@pytest.fixture
def human_fasta(table_folder):
return os.path.join(table_folder, 'GRCm38.p3.genome.fa')
def test_translate(exon_id, human_fasta, human_gtf_database):
from orthoexon.util import translate
from orthoexon.util import separate
for index, species1gene in enumerate(human_gtf_database.features_of_type('gene')):
species1gffutilsgeneid = str(species1gene['gene_id'])
species1geneid = separate(species1gffutilsgeneid)
for exon in human_gtf_database.children(species1geneid,
featuretype='CDS',
order_by='start'):
if exon_id == exon:
test = translate(exon, human_fasta)
break
break
true = 'MAEDADMRNELEEMQRRADQLADE'
assert test == true
# def test_getsequence(exon, human_gtf_database):
# from orthoexon.util import getsequence
#
# test = getsequence(exon, human_gtf_database)
# true = 'ATGGCCGAAGACGCAGACATGCGCAATGAGCTGGAGGAGATGCAGCGAAGGGCTGACCAGTT' \
# 'GGCTGATGAG'
#
# assert test == true
# def test_make_sequence_array(finalsequencedf):
# from orthoexon.util import make_sequence_array
#
# test = make_sequence_array(finalsequencedf)
# true = ......
#
# assert test == true | StarcoderdataPython |
1672519 | # -*- coding: utf-8 -*-
import re
from collections import defaultdict
from .utils import u, get, dump
try:
import click
except ImportError:
print('Install this package with the [cli] extras to enable the CLI.')
raise
@click.group()
def cli():
pass
@cli.command()
def update():
"""
Update the homoglyph data files from https://www.unicode.org
"""
generate_categories()
generate_confusables()
def generate_categories():
"""Generates the categories JSON data file from the unicode specification.
:return: True for success, raises otherwise.
:rtype: bool
"""
# inspired by https://gist.github.com/anonymous/2204527
code_points_ranges = []
iso_15924_aliases = []
categories = []
match = re.compile(r'([0-9A-F]+)(?:\.\.([0-9A-F]+))?\W+(\w+)\s*#\s*(\w+)',
re.UNICODE)
url = 'http://www.unicode.org/Public/UNIDATA/Scripts.txt'
file = get(url)
for line in file:
p = re.findall(match, line)
if p:
code_point_range_from, code_point_range_to, alias, category = p[0]
alias = u(alias.upper())
category = u(category)
if alias not in iso_15924_aliases:
iso_15924_aliases.append(alias)
if category not in categories:
categories.append(category)
code_points_ranges.append((
int(code_point_range_from, 16),
int(code_point_range_to or code_point_range_from, 16),
iso_15924_aliases.index(alias), categories.index(category))
)
code_points_ranges.sort()
categories_data = {
'iso_15924_aliases': iso_15924_aliases,
'categories': categories,
'code_points_ranges': code_points_ranges,
}
dump('categories.json', categories_data)
def generate_confusables():
"""Generates the confusables JSON data file from the unicode specification.
:return: True for success, raises otherwise.
:rtype: bool
"""
url = 'http://www.unicode.org/Public/security/latest/confusables.txt'
file = get(url)
confusables_matrix = defaultdict(list)
match = re.compile(r'[0-9A-F ]+\s+;\s*[0-9A-F ]+\s+;\s*\w+\s*#'
r'\*?\s*\( (.+) → (.+) \) (.+) → (.+)\t#',
re.UNICODE)
for line in file:
p = re.findall(match, line)
if p:
char1, char2, name1, name2 = p[0]
confusables_matrix[char1].append({
'c': char2,
'n': name2,
})
confusables_matrix[char2].append({
'c': char1,
'n': name1,
})
dump('confusables.json', dict(confusables_matrix))
| StarcoderdataPython |
3203581 | """ launcher program for the music player """
import music_player.music_player as mp
from RPi import GPIO
def main():
try:
music_player = mp.Music_player()
music_player.start()
except KeyboardInterrupt:
GPIO.cleanup()
if __name__ == "__main__":
main()
| StarcoderdataPython |
87580 | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import json
import warnings
import pulumi
import pulumi.runtime
from . import utilities, tables
class GetCallerIdentityResult:
"""
A collection of values returned by getCallerIdentity.
"""
def __init__(__self__, account_id=None, arn=None, user_id=None, id=None):
if account_id and not isinstance(account_id, str):
raise TypeError("Expected argument 'account_id' to be a str")
__self__.account_id = account_id
"""
The AWS Account ID number of the account that owns or contains the calling entity.
"""
if arn and not isinstance(arn, str):
raise TypeError("Expected argument 'arn' to be a str")
__self__.arn = arn
"""
The AWS ARN associated with the calling entity.
"""
if user_id and not isinstance(user_id, str):
raise TypeError("Expected argument 'user_id' to be a str")
__self__.user_id = user_id
"""
The unique identifier of the calling entity.
"""
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
__self__.id = id
"""
id is the provider-assigned unique ID for this managed resource.
"""
async def get_caller_identity(opts=None):
"""
Use this data source to get the access to the effective Account ID, User ID, and ARN in
which Terraform is authorized.
"""
__args__ = dict()
__ret__ = await pulumi.runtime.invoke('aws:index/getCallerIdentity:getCallerIdentity', __args__, opts=opts)
return GetCallerIdentityResult(
account_id=__ret__.get('accountId'),
arn=__ret__.get('arn'),
user_id=__ret__.get('userId'),
id=__ret__.get('id'))
| StarcoderdataPython |
3224339 | <gh_stars>1000+
import argparse
import pandas as pd
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description="Check for missing colors & locations",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--metadata', type=str, nargs='+', required=True, help="input region adjusted metadata")
parser.add_argument('--colors', type=str, nargs='+', required=True, help="input region specific color file")
parser.add_argument('--latlong', type=str, required=True, help="input lat-long file")
args = parser.parse_args()
things_to_exclude_orig = ['Africa', 'Asia', 'South America', 'Europe',
'North America', 'Oceania', 'Grand princess cruise ship',
'diamond princess']
things_to_exclude = [x.lower() for x in things_to_exclude_orig]
all_metadatas = [pd.read_csv(met, delimiter='\t') for met in args.metadata]
metadata = pd.concat(all_metadatas, sort=False)
all_colors = [pd.read_csv(col, delimiter='\t', header=None) for col in args.colors]
colors = pd.concat(all_colors, sort=False)
latlong = pd.read_csv(args.latlong, delimiter='\t', header=None)
for geo_value in ['location', 'division', 'country']:
locs_w_color_orig = colors.loc[colors[0]==geo_value,1].values
locs_w_color = [x.lower() for x in locs_w_color_orig]
locs_w_latlong_orig = latlong.loc[latlong[0]==geo_value,1].values
locs_w_latlong = [x.lower() for x in locs_w_latlong_orig]
locs_in_meta_orig = [x for x in metadata[geo_value].unique() if not pd.isna(x)]
locs_in_meta = [x.lower() for x in locs_in_meta_orig]
missing_color_locs = [loc for loc in locs_in_meta if loc not in locs_w_color and loc not in things_to_exclude]
if missing_color_locs:
print("The following {} are missing colors:".format(geo_value))
print(missing_color_locs)
print("\n")
if geo_value != 'country':
missing_latlong_locs = [loc for loc in locs_in_meta if loc not in locs_w_latlong and loc not in things_to_exclude]
if missing_latlong_locs:
print("The following {} are missing lat-long values:".format(geo_value))
print(missing_latlong_locs)
print("\n")
print("Please remember this does *not* check lat-longs for countries!!")
| StarcoderdataPython |
3213182 | <reponame>izilly/izdvd<gh_stars>0
# -*- coding: utf-8 -*-
#
# Copyright (c) 2013 <NAME>
# Distributed under the terms of the Modified BSD License.
# The full license is in the file LICENSE, distributed with this software.
PROG_NAME = 'izdvd'
PROG_URL = 'https://github.com/izzilly/izdvd'
VIDEO_PLAYER = 'mplayer'
IMAGE_VIEWER = 'display'
RE_PARTS_SEP = r'[ _.-]'
RE_VOL_PREFIXES = r'cd|dvd|part|pt|disk|disc|d'
RE_VOL_NUMS = r'[0-9]'
RE_VOL_LETTERS = r'[a-d]'
MODE_NAMES = {'dvd': 'izdvd',
'menu': 'izdvdmenu',
'bg': 'izdvdbg'}
| StarcoderdataPython |
3389549 | """Proof of concept for a federated digital data exchange."""
__version__ = '0.1.0-dev'
| StarcoderdataPython |
4842088 | <filename>PythonTKinter/LayoutPackSide.py
from tkinter import *
janela = Tk()
lb1 = Label(janela,text='RIGHT',bg='white')
lb2 = Label(janela,text='BOTTOM',bg='white')
lb3 = Label(janela,text='LEFT',bg='white')
lb4 = Label(janela,text='TOP',bg='white')
lb1.pack(side=RIGHT)
lb2.pack(side=BOTTOM)
lb3.pack(side=LEFT)
lb4.pack(side=TOP)
janela['bg'] = 'black'
janela.geometry('400x300+200+200')
janela.mainloop()
| StarcoderdataPython |
4832837 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""TIFFIOTensor"""
import tensorflow as tf
from tensorflow_io.core.python.ops import io_tensor_ops
from tensorflow_io.core.python.ops import core_ops
class TIFFIOTensor(
io_tensor_ops._CollectionIOTensor
): # pylint: disable=protected-access
"""TIFFIOTensor"""
# =============================================================================
# Constructor (private)
# =============================================================================
def __init__(self, filename, internal=False):
with tf.name_scope("TIFFIOTensor"):
# TIFF can fit into memory so load TIFF first
data = tf.io.read_file(filename)
shapes, dtypes = core_ops.io_decode_tiff_info(data)
# NOTE: While shapes returned correctly handles 3 or 4 channels
# we can only handle RGBA so fix shape as 4 for now,
# until decode_tiff is updated.
spec = tuple(
[
tf.TensorSpec(tf.TensorShape(shape.tolist()[0:2] + [4]), dtype)
for (shape, dtype) in zip(shapes.numpy(), dtypes.numpy())
]
)
columns = [i for i, _ in enumerate(spec)]
elements = [
io_tensor_ops.TensorIOTensor(
core_ops.io_decode_tiff(data, i), internal=internal
)
for i in columns
]
super().__init__(spec, columns, elements, internal=internal)
| StarcoderdataPython |
3202967 | <gh_stars>0
import copy
import warnings
from mmcls.core import support, precision_recall_f1
from mmcls.datasets import BaseDataset, DATASETS
import numpy as np
import pyarrow as pa
from mmcls.models import accuracy
import csv
@DATASETS.register_module()
class GoogleLandmarkDataset(BaseDataset):
def __init__(self, **kwargs):
super(GoogleLandmarkDataset, self).__init__(**kwargs)
self.unique_ids = np.load('configs/unique_landmark_ids.np.npy') # 81311
self.id_map = {self.unique_ids[i]: i for i in range(len(self.unique_ids))}
def process_filename(self, x):
return f'{x[0]}/{x[1]}/{x[2]}/{x}.jpg'
def load_annotations(self):
ann_file = self.ann_file
with open(ann_file, 'r') as f:
content = f.readlines()
content = [x.strip().split(' ') for x in content]
data_infos = []
for data in content:
if len(data) == 1:
data = (data[0], 1)
data_infos.append(data)
# filename, gt_label = data
# gt_label = int(gt_label)
# assert gt_label >= 1
# # info = {'img_prefix': self.data_prefix}
# info = {}
# info['img_info'] = {'filename': filename}
# # info['gt_label'] = np.array(gt_label - 1, dtype=np.int64)
# info['gt_label'] = self.id_map[gt_label]
# data_infos.append(info)
return pa.array(data_infos)
# def __len__(self):
# return 3000
def prepare_data(self, idx):
data = copy.deepcopy(self.data_infos[idx].as_py())
filename = self.process_filename(data[0])
gt_label = self.id_map[int(data[1])]
results = {}
results['img_prefix'] = self.data_prefix
results['gt_label'] = np.array(gt_label, dtype=np.int64)
results['img_info'] = {'filename': filename}
return self.pipeline(results)
def format_results(self, results, thr):
keys = [x['img_info']['filename'][6:-4] for x in self.data_infos]
values = [(x[0][0], x[1][0]) for x in results]
with open('submission.csv', 'w', newline='') as submission_csv:
csv_writer = csv.DictWriter(submission_csv, fieldnames=['id', 'landmarks'])
csv_writer.writeheader()
for image_id, prediction in zip(keys, values):
label = self.unique_ids[prediction[0]]
score = prediction[1]
if score >= thr:
csv_writer.writerow({'id': image_id, 'landmarks': f'{label} {score}'})
else:
csv_writer.writerow({'id': image_id, 'landmarks': ''})
# data_dict = dict(id=keys, landmarks=values)
# data = pd.DataFrame(data_dict)
# data.to_csv('submission.csv', index=False, quoting=csv.QUOTE_NONE, doublequote=False, escapechar=None, sep=',')
def get_gt_labels(self):
"""Get all ground-truth labels (categories).
Returns:
list[int]: categories for all images.
"""
gt_labels = [self.id_map[int(x[1])] for x in self.data_infos]
gt_labels = np.array(gt_labels)
return gt_labels
def evaluate(self,
results,
metric='accuracy',
metric_options=None,
logger=None):
"""Evaluate the dataset.
Args:
results (list): Testing results of the dataset.
metric (str | list[str]): Metrics to be evaluated.
Default value is `accuracy`.
metric_options (dict, optional): Options for calculating metrics.
Allowed keys are 'topk', 'thrs' and 'average_mode'.
Defaults to None.
logger (logging.Logger | str, optional): Logger used for printing
related information during evaluation. Defaults to None.
Returns:
dict: evaluation results
"""
if metric_options is None:
metric_options = {'topk': (1, 5)}
if isinstance(metric, str):
metrics = [metric]
else:
metrics = metric
allowed_metrics = [
'accuracy', 'precision', 'recall', 'f1_score', 'support'
]
eval_results = {}
results = np.vstack(results)
gt_labels = self.get_gt_labels()
num_imgs = len(results) // 2
if len(gt_labels) != num_imgs:
warnings.warn('Clip the length of test images')
gt_labels = gt_labels[:num_imgs]
# assert len(gt_labels) == num_imgs, 'dataset testing results should ' \
# 'be of the same length as gt_labels.'
invalid_metrics = set(metrics) - set(allowed_metrics)
if len(invalid_metrics) != 0:
raise ValueError(f'metirc {invalid_metrics} is not supported.')
topk = metric_options.get('topk', (1, 5))
thrs = metric_options.get('thrs')
average_mode = metric_options.get('average_mode', 'macro')
if 'accuracy' in metrics:
if thrs is not None:
acc = accuracy(results, gt_labels, topk=topk, thrs=thrs)
else:
acc = accuracy(results, gt_labels, topk=topk)
if isinstance(topk, tuple):
eval_results_ = {
f'accuracy_top-{k}': a
for k, a in zip(topk, acc)
}
else:
eval_results_ = {'accuracy': acc}
if isinstance(thrs, tuple):
for key, values in eval_results_.items():
eval_results.update({
f'{key}_thr_{thr:.2f}': value.item()
for thr, value in zip(thrs, values)
})
else:
eval_results.update(
{k: v.item()
for k, v in eval_results_.items()})
if 'support' in metrics:
support_value = support(
results, gt_labels, average_mode=average_mode)
eval_results['support'] = support_value
precision_recall_f1_keys = ['precision', 'recall', 'f1_score']
if len(set(metrics) & set(precision_recall_f1_keys)) != 0:
if thrs is not None:
precision_recall_f1_values = precision_recall_f1(
results, gt_labels, average_mode=average_mode, thrs=thrs)
else:
precision_recall_f1_values = precision_recall_f1(
results, gt_labels, average_mode=average_mode)
for key, values in zip(precision_recall_f1_keys,
precision_recall_f1_values):
if key in metrics:
if isinstance(thrs, tuple):
eval_results.update({
f'{key}_thr_{thr:.2f}': value
for thr, value in zip(thrs, values)
})
else:
eval_results[key] = values
return eval_results
| StarcoderdataPython |
3262264 | import sys
import random
from test_base import *
class TestStride2(TestBase):
def generate(self):
self.clear_tag()
for x in range(1,13):
stride = (2**x)-1
taddr = 0
while taddr < self.MAX_ADDR:
self.send_sb(taddr)
taddr += stride
taddr = 0
while taddr < self.MAX_ADDR:
self.send_lb(taddr)
taddr += stride
# done
self.tg.done()
# main()
if __name__ == "__main__":
t = TestStride2()
t.generate() | StarcoderdataPython |
48836 | <gh_stars>1-10
from lixian_commands.util import *
from lixian_cli_parser import *
from lixian_encoding import default_encoding
import lixian_help
import lixian_query
@command_line_parser(help=lixian_help.readd)
@with_parser(parse_login)
@with_parser(parse_logging)
@command_line_option('deleted')
@command_line_option('expired')
@command_line_option('all')
def readd_task(args):
if args.deleted:
status = 'deleted'
elif args.expired:
status = 'expired'
else:
raise NotImplementedError('Please use --expired or --deleted')
client = create_client(args)
if status == 'expired' and args.all:
return client.readd_all_expired_tasks()
to_readd = lixian_query.search_tasks(client, args)
non_bt = []
bt = []
if not to_readd:
return
print "Below files are going to be re-added:"
for x in to_readd:
print x['name'].encode(default_encoding)
if x['type'] == 'bt':
bt.append((x['bt_hash'], x['id']))
else:
non_bt.append((x['original_url'], x['id']))
if non_bt:
urls, ids = zip(*non_bt)
client.add_batch_tasks(urls, ids)
for hash, id in bt:
client.add_torrent_task_by_info_hash2(hash, id)
| StarcoderdataPython |
1646572 | from flask import Flask, request
import tensorflow as tf
from correct_text import create_model, DefaultMovieDialogConfig, decode
from text_corrector_data_readers import MovieDialogReader
data_path = '/input/data/movie_dialog_train.txt'
model_path = '/input/model'
tfs = tf.Session()
config = DefaultMovieDialogConfig()
print('Loading model from path: %s' % model_path)
model = create_model(tfs, True, model_path, config=config)
print('Using data from path: %s' % data_path)
data_reader = MovieDialogReader(config, data_path)
app = Flask(__name__)
@app.route('/', methods=['POST'])
def correct_handler():
corrective_tokens = data_reader.read_tokens(data_path)
request.get_data()
decodings = decode(tfs, model=model, data_reader=data_reader,
data_to_decode=[request.data.split()],
corrective_tokens=corrective_tokens)
return ' '.join(next(decodings))
if __name__ == '__main__':
app.run(host='0.0.0.0')
| StarcoderdataPython |
3270183 | '''
This module will draw the national flag in inscape
'''
from pyautogui import *
from time import sleep
def draw_rectangle():
press('s')
click(568,300)
location =[[775,-168,167,-775,568,534,"ff6600ff"],[775,-171,171,-775,568,705,"ffffff"],[775,-169,169,-775,568,300,"449900ff"]]
click(568,367,duration=0.2) #locate
press('p')
for item in location:
top,left,right,bottom,x,y, color = item
sleep(0.5)
drag(top,0,duration=1)
# click()
print("done top")
drag(0, right,duration=1)
# click()
print("done right")
drag(bottom,0,duration=1)
# click()
print("done bottom")
drag(0, left,duration=1)
print("done left")
# click()
# press('s')
# click()
sleep(1)
#right click
hotkey('shift','ctrl','f')
sleep(1)
#flat color
click(1534,223)
sleep(1)
#color select
click(1791,477,clicks=2)
sleep(1)
#type color name
typewrite(color,interval=0.01)
sleep(1)
#exit the color pallet
click(1829,143)
#for the next rectangle
click(x,y)
def draw_outer_circle():
#draw outer circle
press('e')
mouseDown(911,561)
sleep(1)
drag(126,120)
press('s')
# click(955,570)
#add color
sleep(1)
#enter color pallet
hotkey('shift','ctrl','f')
sleep(1)
# flat color
click(1517,181)
sleep(1)
click(1534,223)
sleep(1)
#color select
click(1791,477,clicks=2)
sleep(1)
#type color name
typewrite("ffffff",interval=0.01)
sleep(1)
#stroke - color
click(1615,183)
sleep(0.3)
click(1534,223)
sleep(1)
#color select
click(1791,477,clicks=2)
sleep(1)
#type color name
typewrite("1128fffc",interval=0.01)
sleep(1)
#stroke
click(1763,181)
sleep(0.2)
click(1596,225,clicks=2)
hotkey('ctrl','a')
typewrite('2.5')
#exit the color pallet
click(1829,143)
sleep(1)
print("outer circle - done")
click(568,300)
def draw_lines():
lines = [[(910,624),(1036,623)],[(919,594),(1027,651)],[(940,572),(1008,669)],[(974,562),(974,680)],
[(1003,569),(945,674)],[(1025,589),(924,656)]]
click(568,300)
odd_index,even_index,count = 0,0,0
for line in lines:
press('p')
start,end = line
x1,y1 = start
x2,y2 = end
sleep(1)
move(x1,y1)
sleep(1)
click(x1,y1,duration=0.5)
click(x2,y2,duration=0.4)
# enter color pallet
hotkey('shift','ctrl','f')
sleep(1)
# flat color
click(1517,181)
sleep(1)
click(1534,223)
sleep(1)
#color select
click(1791,477,clicks=2)
sleep(1)
#type color name
typewrite("1128fffc",interval=0.01)
sleep(1)
#stroke - color
click(1615,183)
sleep(0.3)
click(1534,223)
sleep(1)
#color select
click(1791,477,clicks=2)
sleep(1)
#type color name
typewrite("1128fffc",interval=0.01)
sleep(1)
# line - thickness
click(1763,181)
sleep(0.2)
click(1596,225,clicks=2)
hotkey('ctrl','a')
typewrite('1.5')
press('s')
click(568,300)
def draw_inner_circle():
# draw inner circle
press('s')
sleep(1)
press('e')
mouseDown(966,613)
sleep(1)
drag(18,17)
press('s')
# click(955,570)
#add color
sleep(0.51)
#enter color pallet
hotkey('shift','ctrl','f')
sleep(1)
#flat color
click(1517,181)
sleep(1)
click(1534,223)
sleep(1)
#color select
click(1791,477,clicks=2)
sleep(1)
#type color name
typewrite("1128ffff",interval=0.01)
sleep(1)
# stroke - color
click(1615,183)
sleep(0.3)
click(1534,223)
sleep(1)
#color select
click(1791,477,clicks=2)
sleep(1)
#type color name
typewrite("1128ffff",interval=0.01)
sleep(1)
#exit the color pallet
click(1829,143)
def main():
sleep(2)
#open the terminal
hotkey('winleft')
sleep(2)
typewrite("inkscape",interval=0.3)
press('enter')
sleep(2)
hotkey('winleft','up')
sleep(2)
draw_rectangle()
draw_outer_circle()
draw_lines()
draw_inner_circle()
press('s')
click(568,300)
if __name__ == "__main__":
print("Bot Started!!!")
main()
'''
Created By SanthoshGoku
'''
| StarcoderdataPython |
1702986 | from math import sin, cos, pi,tan, atan2,log
import math
from itertools import groupby
from operator import itemgetter
import tf
import rospy
import numpy as np
from geometry_msgs.msg import TransformStamped
from geometry_msgs.msg import PointStamped
class distance_obj:
def __init__(self,dis)
self.distance[] = dis
class localmap:
def __init__(self, height, width, resolution,morigin):
self.height=height
self.width=width
self.resolution=resolution
self.punknown=-1.0
self.distancemap = [distance_obj]*int(self.width/self.resolution)*int(self.height/self.resolution)
# self.localmap=[self.punknown]*int(self.width/self.resolution)*int(self.height/self.resolution)
self.origin=int(math.ceil(morigin[0]/resolution))+int(math.ceil(width/resolution)*math.ceil(morigin[1]/resolution))
# self.pfree=log(0.3/0.7)
# self.pocc=log(0.9/0.1)
# self.prior=log(0.5/0.5)
# self.max_logodd=100.0
# self.max_logodd_belief=10.0
# self.max_scan_range=1.0
self.map_origin=morigin
def updatemap(self,dis,pose):
x = pose[0]
y = pose[1]
self.distancemap[x][y] = dis
| StarcoderdataPython |
1736369 | <reponame>hugmyndakassi/Facedancer
# MAXUSBApp.py
#
# Contains class definition for MAXUSBApp.
import time
from ..core import FacedancerApp
from ..USB import *
from ..USBDevice import USBDeviceRequest
class MAXUSBApp(FacedancerApp):
reg_ep0_fifo = 0x00
reg_ep1_out_fifo = 0x01
reg_ep2_in_fifo = 0x02
reg_ep3_in_fifo = 0x03
reg_setup_data_fifo = 0x04
reg_ep0_byte_count = 0x05
reg_ep1_out_byte_count = 0x06
reg_ep2_in_byte_count = 0x07
reg_ep3_in_byte_count = 0x08
reg_ep_stalls = 0x09
reg_clr_togs = 0x0a
reg_endpoint_irq = 0x0b
reg_endpoint_interrupt_enable = 0x0c
reg_usb_irq = 0x0d
reg_usb_interrupt_enable = 0x0e
reg_usb_control = 0x0f
reg_cpu_control = 0x10
reg_pin_control = 0x11
reg_revision = 0x12
reg_function_address = 0x13
reg_io_pins = 0x14
# bitmask values for reg_endpoint_irq = 0x0b
is_setup_data_avail = 0x20 # SUDAVIRQ
is_in3_buffer_avail = 0x10 # IN3BAVIRQ
is_in2_buffer_avail = 0x08 # IN2BAVIRQ
is_out1_data_avail = 0x04 # OUT1DAVIRQ
is_out0_data_avail = 0x02 # OUT0DAVIRQ
is_in0_buffer_avail = 0x01 # IN0BAVIRQ
# bitmask values for reg_usb_control = 0x0f
usb_control_vbgate = 0x40
usb_control_connect = 0x08
# bitmask values for reg_pin_control = 0x11
interrupt_level = 0x08
full_duplex = 0x10
ep0_in_nak = (1 << 5)
ep2_in_nak = (1 << 6)
ep3_in_nak = (1 << 7)
# TODO: Support a generic MaxUSB interface that doesn't
# depend on any GoodFET details.
@staticmethod
def bytes_as_hex(b, delim=" "):
return delim.join(["%02x" % x for x in b])
# HACK: but given the limitations of the MAX chips, it seems necessary
def send_on_endpoint(self, ep_num, data, blocking=False):
if ep_num == 0:
fifo_reg = self.reg_ep0_fifo
bc_reg = self.reg_ep0_byte_count
elif ep_num == 2:
fifo_reg = self.reg_ep2_in_fifo
bc_reg = self.reg_ep2_in_byte_count
elif ep_num == 3:
fifo_reg = self.reg_ep3_in_fifo
bc_reg = self.reg_ep3_in_byte_count
else:
raise ValueError('endpoint ' + str(ep_num) + ' not supported')
# FIFO buffer is only 64 bytes, must loop
while len(data) > 64:
self.write_bytes(fifo_reg, data[:64])
self.write_register(bc_reg, 64, ack=True)
data = data[64:]
self.write_bytes(fifo_reg, data)
self.write_register(bc_reg, len(data), ack=True)
if self.verbose > 1:
print(self.app_name, "wrote", self.bytes_as_hex(data), "to endpoint",
ep_num)
# HACK: but given the limitations of the MAX chips, it seems necessary
def read_from_endpoint(self, ep_num):
if ep_num != 1:
return b''
byte_count = self.read_register(self.reg_ep1_out_byte_count)
if byte_count == 0:
return b''
data = self.read_bytes(self.reg_ep1_out_fifo, byte_count)
if self.verbose > 1:
print(self.app_name, "read", self.bytes_as_hex(data), "from endpoint",
ep_num)
return data
def stall_endpoint(self, ep_number, direction=0):
"""
Stalls an arbitrary endpoint.
ep_number: The endpoint number to be stalled
direction: 0 for out, 1 for in
"""
if self.verbose > 0:
print(self.app_name, "stalling endpoint {}".format(ep_number))
# TODO: Verify our behavior, here. The original facedancer code stalls
# EP0 both _in_ and out, as well as uses the special STALL SETUP bit.
# Is this really what we want?
if ep_number == 0:
self.write_register(self.reg_ep_stalls, 0x23)
elif ep_number < 4:
self.write_writer(self.reg_ep_stalls, 1 << (ep_num + 1))
else:
raise ValueError("Invalid endpoint for MAXUSB device!")
def stall_ep0(self):
return self.stall_endpoint(0)
def get_version(self):
return self.read_register(self.reg_revision)
def connect(self, usb_device, max_ep0_packet_size=64):
if self.read_register(self.reg_usb_control) & self.usb_control_connect:
self.write_register(self.reg_usb_control, self.usb_control_vbgate)
time.sleep(.1)
self.write_register(self.reg_usb_control, self.usb_control_vbgate |
self.usb_control_connect)
self.connected_device = usb_device
if self.verbose > 0:
print(self.app_name, "connected device", self.connected_device.name)
def disconnect(self):
self.write_register(self.reg_usb_control, self.usb_control_vbgate)
if self.verbose > 0:
print(self.app_name, "disconnected device", self.connected_device.name)
self.connected_device = None
def clear_irq_bit(self, reg, bit):
self.write_register(reg, bit)
def service_irqs(self):
irq = self.read_register(self.reg_endpoint_irq)
in_nak = self.read_register(self.reg_pin_control)
if self.verbose > 3:
print(self.app_name, "read endpoint irq: 0x%02x" % irq)
print(self.app_name, "read pin control: 0x%02x" % in_nak)
if self.verbose > 2:
if irq & ~ (self.is_in0_buffer_avail \
| self.is_in2_buffer_avail | self.is_in3_buffer_avail):
print(self.app_name, "notable irq: 0x%02x" % irq)
if irq & self.is_setup_data_avail:
self.clear_irq_bit(self.reg_endpoint_irq, self.is_setup_data_avail)
b = self.read_bytes(self.reg_setup_data_fifo, 8)
if (irq & self.is_out0_data_avail) and (b[0] & 0x80 == 0x00):
data_bytes_len = b[6] + (b[7] << 8)
b += self.read_bytes(self.reg_ep0_fifo, data_bytes_len)
req = self.connected_device.create_request(b)
self.connected_device.handle_request(req)
if irq & self.is_out1_data_avail:
data = self.read_from_endpoint(1)
if data:
self.connected_device.handle_data_available(1, data)
self.clear_irq_bit(self.reg_endpoint_irq, self.is_out1_data_avail)
if irq & self.is_in2_buffer_avail:
self.connected_device.handle_buffer_available(2)
if irq & self.is_in3_buffer_avail:
self.connected_device.handle_buffer_available(3)
# Check to see if we've NAK'd on either of our IN endpoints,
# and generate the relevant events.
if in_nak & self.ep2_in_nak:
self.connected_device.handle_nak(2)
self.clear_irq_bit(self.reg_pin_control, in_nak | self.ep2_in_nak)
if in_nak & self.ep3_in_nak:
self.connected_device.handle_nak(3)
self.clear_irq_bit(self.reg_pin_control, in_nak | self.ep3_in_nak)
def set_address(self, address, defer=False):
"""
Sets the device address of the Facedancer. Usually only used during
initial configuration.
address: The address that the Facedancer should assume.
"""
# The MAXUSB chip handles this for us, so we don't need to do anything.
pass
def configured(self, configuration):
"""
Callback that's issued when a USBDevice is configured, e.g. by the
SET_CONFIGURATION request. Allows us to apply the new configuration.
configuration: The configuration applied by the SET_CONFIG request.
"""
# For the MAXUSB case, we don't need to do anything, though it might
# be nice to print a message or store the active configuration for
# use by the USBDevice, etc. etc.
pass
| StarcoderdataPython |
1626102 | <gh_stars>0
# Copyright (C) 2020-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
"""FedCurv Aggregation function module."""
import numpy as np
from .weighted_average import WeightedAverage
class FedCurvWeightedAverage(WeightedAverage):
"""Aggregation function of FedCurv algorithm.
Applies weighted average aggregation to all tensors
except Fisher matrices variables (u_t, v_t).
These variables are summed without weights.
FedCurv paper: https://arxiv.org/pdf/1910.07796.pdf
"""
def call(self, local_tensors, db_iterator, tensor_name, fl_round, tags):
"""Apply aggregation."""
if (
tensor_name.endswith('_u')
or tensor_name.endswith('_v')
or tensor_name.endswith('_w')
):
tensors = [local_tensor.tensor for local_tensor in local_tensors]
agg_result = np.sum(tensors, axis=0)
return agg_result
return super().call(local_tensors)
| StarcoderdataPython |
146337 | import unittest
from itcc import ITCC
import pandas as pd
class TestITCC(unittest.TestCase):
def test_itcc_ingest(self):
i = ITCC()
df = i.get_path_matrix()
self.assertEqual(type(df), pd.DataFrame)
print(df.describe())
print("=" * 80)
def test_getCXY(self):
i = ITCC()
df = i.getCXY()
print(df.head())
print(f"Shape: {df.shape}")
print("=" * 80)
def test_generate_artifact(self):
i = ITCC()
druggene_mappings = pd.read_csv(
"/Users/mtaruno/Documents/DevZone/Stem-Away-group-5/data/artifacts/druggene_mappings.csv",
index_col=0,
)
path_mappings = pd.read_csv(
"/Users/mtaruno/Documents/DevZone/Stem-Away-group-5/data/artifacts/path_mappings.csv",
index_col=0,
)
df = i.generate_artifact(
druggene_mappings=druggene_mappings, path_mappings=path_mappings
)
print(df.head())
df.to_csv(
"/Users/mtaruno/Documents/DevZone/Stem-Away-group-5/data/artifacts/ebc_artifact.csv"
)
print("=" * 80)
unittest.main()
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.