index int64 0 1,000k | blob_id stringlengths 40 40 | code stringlengths 7 10.4M |
|---|---|---|
986,000 | 36d39e067f8b1015207ff912ffa26bcd05fe5c93 | __author__ = 'kako'
from django import forms
from ..common.forms import OwnedEntitiesForm, OwnedEntityForm
from .models import Location
class LocationSearchForm(OwnedEntitiesForm):
"""
Search form for location list.
"""
name__icontains = forms.CharField(min_length=4, max_length=32, required=False, label='Location name')
class LocationForm(OwnedEntityForm):
"""
Edit form for location instance.
"""
class Meta:
model = Location
exclude = ('owner',)
|
986,001 | dfb4893363bfd18e0a9fc528d790b84a72592961 | a = input('First number:')
b = input('Second number:')
try:
answer = int(a) / int(b)
except ZeroDivisionError:
print("You can't devide by 0")
else:
print(answer) |
986,002 | cc97679ebce5dafd3f5d241f1b4cb1b34344da82 | """
How to send Attach Request by RNC to SGSN.
"""
def run(msg, ctxts):
#1. fetch context
#2. update context
#3. set message parameter
msg.setdefault('layer1', {'protocol': 'SCCP',
'message': 'Connection Request',
'parameters': {}})
msg.setdefault('layer2', {'protocol': 'RANAP',
'message': 'Initial UE',
'parameters': {}})
msg['layer1']['parameters'].setdefault('sccp_id', '0')
msg['layer3']['parameters'].setdefault('ti', '10000')
#4. create context
user_ctxt = ctxts.create_context('user_context',
imsi=msg['layer3']['parameters']['imsi'])
nas_ctxt = ctxts.create_context('nas_transaction_context',
ti=msg['layer3']['parameters']['ti'],
user_ctxt_id=user_ctxt['context_id'])
sccp_ctxt_ = ctxts.create_context('sccp_context',
sccp_id=msg['layer1']['parameters']['sccp_id'],
nas_ctxt_id=nas_ctxt['context_id'])
#5. delete context
#6. return message
return msg
|
986,003 | 6480e4784595bcd98c899ceea01f357c9367293c | import pandas as pd
import numpy as np
#To DO
# Columns with uniform distribution ??
# Interaction effects ???
# Balance click classes
def data_input(path, complete=False, nrows=10000):
"""
Function to read a datafile.
Arguments: path, complete, nrows
path: Filepath to the data
complete: Read the whole dataset (True) or specific number of rows (nrows)
nrows: Specify number of rows for input (default: 10000)
"""
if complete:
df = pd.read_csv(path)
else:
df = pd.read_csv(path, nrows=nrows)
df["date_time"] = pd.to_datetime(
df["date_time"], format="%Y-%m-%d %H:%M:%S")
#Maybe we could get rid of the exact timestamp if not useful
#-> .apply(lambda x: x.date())
return df
def drop_attributes(df, cutoff=25, extra_add=[]):
"""
Function that drops attributes with a % condition
Arguments: df, cutoff, extra_add
df: Dataframe as input
cutoff: Percentage of desired cutoff from attributes (default: more than 25% missing)
extra_add: Insert column name for manual drop a attribute (default: empty)
"""
df_copy = df.copy()
attributs_drop = []
for var in sorted(df.columns):
series = df[var]
perc_missing = 100 - series.count() / len(series) * 100
if perc_missing > cutoff:
attributs_drop.append(var)
else:
continue
if len(extra_add) == 0:
df_copy.drop(attributs_drop, axis=1, inplace=True)
else:
attributs_drop = attributs_drop + extra_add
df_copy.drop(attributs_drop, axis=1, inplace=True)
return df_copy
def correlation_drop(df, threshold):
"""
Finds correlations between attributes
and drops them from the dataset
Arguments: df, threshold
df: Dataframe as input
threshold: Set threshold of correlation (e.g. 0.5) when columns get deleted
"""
df_copy = df.copy()
col_corr = set()
corr_matrix = df_copy.corr()
for i in range(len(corr_matrix.columns)):
for j in range(i):
if (corr_matrix.iloc[i, j] >= threshold) and (corr_matrix.columns[j] not in col_corr):
colname = corr_matrix.columns[i]
col_corr.add(colname)
if colname in df_copy.columns:
del df_copy[colname]
print(col_corr)
return df_copy
def impute(df, median=False, mean=False, negative=False, zero=False, list_missing=[]):
"""
Imputes the the missing values either
with median, mean or -1 (negative value)
Arguments: df, median, mean, negative
df: Dataframe as input
median: Set median=True for imputation with median value of column
mean: Set mean=True for imputation with mean value of column
negative: Set negative=True for -1 values instead of nan's
Attention: Always only set one argument "True"
"""
df_copy = df.copy()
if len(list_missing) == 0:
list_missing = df_copy.columns[df_copy.isna().any()].tolist()
if median:
#Impute missing values with median
for i in list_missing:
df_copy[i].fillna(
(df_copy[i].median()), inplace=True)
print("Imputation with median done")
elif mean:
#Impute missing values with mean
for i in list_missing:
df_copy[i].fillna(
(df_copy[i].mean()), inplace=True)
print("Imputation with mean done")
elif negative:
for i in list_missing:
df_copy[i].fillna(-1, inplace=True)
print("Imputation with negative value done")
elif zero:
for i in list_missing:
df_copy[i].fillna(0, inplace=True)
print("Imputation with zero done")
else:
print("No method choosen: Missing values at: ", list_missing)
return df_copy
def agg_competitors(df):
"""
Aggregates the data of the 8 competitors to single columns
Arguments: df
df: Dataframe
agg_comp_rate = Aggregates the competitors rate
agg_comp_inv = Aggregates the competitors availability
agg_comp_rate_perc = Aggregates the competitors absolute percentage difference
"""
df_copy = df.copy()
df_copy["agg_comp_rate"] = df_copy.filter(
regex=("comp.*rate$")).mean(axis=1)
df_copy["agg_comp_inv"] = df_copy.filter(regex=("comp.*inv")).mean(axis=1)
df_copy["agg_comp_rate_perc"] = df_copy.filter(
regex=("comp.*rate_perc")).mean(axis=1)
df_copy = df_copy.loc[:, ~df_copy.columns.str.startswith('comp')]
return df_copy
def test_impute_test(df):
"""
Imputation for different categories in different ways
"""
df_copy = df.copy()
# set missing original distances to max() for each searchquery and -1 if no info
df_copy[['srch_id', 'orig_destination_distance']].fillna(
df_copy[['srch_id', 'orig_destination_distance']].groupby('srch_id').transform('max').squeeze(), inplace=True)
df_copy.orig_destination_distance.fillna(-1, inplace=True)
# competitor info: aggregate with mean w.r.t searchquery and otherwise 0
df_copy[['srch_id', 'agg_comp_rate']].fillna(df_copy[['srch_id', 'agg_comp_rate']].groupby(
'srch_id').transform('mean').squeeze(), inplace=True)
df_copy[['srch_id', 'agg_comp_rate_perc']].fillna(df_copy[['srch_id', 'agg_comp_rate_perc']].groupby(
'srch_id').transform('mean').squeeze(), inplace=True)
df_copy[['srch_id', 'agg_comp_inv']].fillna(df_copy[['srch_id', 'agg_comp_inv']].groupby(
'srch_id').transform('mean').squeeze(), inplace=True)
df_copy[['agg_comp_rate', 'agg_comp_rate_perc', 'agg_comp_inv']] = df_copy[[
'agg_comp_rate', 'agg_comp_rate_perc', 'agg_comp_inv']].fillna(0)
return df_copy
|
986,004 | 4e757fa90d8af33e65d1990b90f6b355ffd4e477 | import random
#from random import randrange
# Set up counter
player_wins = 0
computer_wins = 0
winning_score = 2
# for time in range(3):
while player_wins < winning_score and computer_wins < winning_score:
print(f"Player score: {player_wins} Computer score: {computer_wins}")
print(f"")
print("...rock...")
print("...paper...")
print("...scissors!...")
# Get player1 Input
player = input("Player 1 enter choice: ").lower()
if player == "quit":
break
# Get computer Input
rand_num = random.randrange(3)
computer = None
if rand_num == 0:
computer = "rock"
elif rand_num == 1:
computer = "paper"
else:
computer = "scissors"
print(f"The computer plays {computer}")
print("Shoot!")
# Compare inputs based on rock-paper-scissor rules and print winner
if player and computer:
if player == computer:
print("It's a tie! Play again")
elif player == "rock":
if computer == "paper":
print("Paper covers rock! computer wins!")
computer_wins += 1
elif computer == "scissors":
print("Rock breaks scissors! player wins!")
player_wins += 1
elif player == "paper":
if computer == "rock":
print("Paper covers rock! Player wins!")
player_wins += 1
elif computer == "scissors":
print("Scissors cut paper! Computer wins!")
computer_wins += 1
elif player == "scissors":
if computer == "rock":
print("Rock breaks scissors! Computer wins!")
computer_wins += 1
elif computer == "paper":
print("Scissors cut paper! Player wins!")
player_wins += 1
else:
print("Something went horribly wrong!")
if player_wins > computer_wins:
print("You win!")
else:
print("You suck!")
|
986,005 | fb5ab25e87300645ee0fffbf46ce003b2129251d | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
import numpy as np
# In[2]:
df_final = pd.read_csv('textos_videos.csv')
# In[3]:
import nltk
from nltk.corpus import stopwords
stopwords = set(stopwords.words('english'))
nltk.download('wordnet')
nltk.download('stopwords')
from nltk.tokenize import TweetTokenizer
from nltk.corpus import wordnet as wn
tknzr = TweetTokenizer()
def get_tokens(sentence):
# tokens = nltk.word_tokenize(sentence) # now using tweet tokenizer
tokens = tknzr.tokenize(sentence)
tokens = [token for token in tokens if (token not in stopwords and len(token) > 1)]
tokens = [get_lemma(token) for token in tokens]
return (tokens)
def get_lemma(word):
lemma = wn.morphy(word)
if lemma is None:
return word
else:
return lemma
token_list = (df_final['texto'].apply(get_tokens))
# In[4]:
from keras.preprocessing.text import Tokenizer
from keras.layers import Embedding
# In[5]:
t = Tokenizer()
t.fit_on_texts(token_list)
vocab_size = len(t.word_index) + 1
# In[6]:
# The maximum number of words to be used. (most frequent)
MAX_NB_WORDS = 1000
# Max number of words in each complaint.
MAX_SEQUENCE_LENGTH = 50
# This is fixed.
EMBEDDING_DIM = 768
# tokenizer = Tokenizer(num_words=MAX_NB_WORDS, filters='!"#$%&()*+,-./:;<=>?@[\]^_`{|}~', lower=True)
# tokenizer.fit_on_texts(X.values)
# word_index = tokenizer.word_index
print('Found %s unique tokens.' % len(t.word_index))
# In[7]:
from bert_serving.client import BertClient
bc = BertClient(ip='150.165.75.172')
# In[ ]:
embedding_matrix = np.random.random((len(t.word_index) + 1, EMBEDDING_DIM))
for word, i in t.word_index.items():
embedding_vector = bc.encode([word])
if embedding_vector is not None:
# words not found in embedding index will be all-zeros.
embedding_matrix[i] = embedding_vector
# In[ ]:
embedding_layer = Embedding(len(t.word_index) + 1,
EMBEDDING_DIM,weights=[embedding_matrix],
input_length=MAX_SEQUENCE_LENGTH,trainable=True)
# In[ ]:
from sklearn.model_selection import train_test_split
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
# In[ ]:
X_1 = tokenizer.texts_to_sequences(df_final['texto'])
X_1 = pad_sequences(X_1, maxlen=MAX_SEQUENCE_LENGTH)
print('Shape of data tensor:', X_1.shape)
# In[ ]:
Y_1 = pd.get_dummies(df_final['classes_problema']).values
print('Shape of label tensor:', Y_1.shape)
# In[ ]:
X_train, X_test, Y_train, Y_test = train_test_split(X_1,Y_1, test_size = 0.20, random_state = 42)
print(X_train.shape,Y_train.shape)
print(X_test.shape,Y_test.shape)
# In[ ]:
from keras.layers import Dense, Dropout, Embedding, LSTM, Bidirectional, TimeDistributed, Flatten, Input
from keras.models import Model
# In[ ]:
input = Input(shape=(MAX_SEQUENCE_LENGTH,))
model = Embedding(vocab_size,768,weights=[embedding_matrix],input_length=MAX_SEQUENCE_LENGTH)(input)
model = Bidirectional (LSTM (100,return_sequences=True,dropout=0.50),merge_mode='concat')(model)
model = TimeDistributed(Dense(100,activation='relu'))(model)
model = Flatten()(model)
model = Dense(100,activation='relu')(model)
output = Dense(27,activation='softmax')(model)
model = Model(input,output)
model.compile(loss='categorical_crossentropy',optimizer='adam', metrics=['accuracy'])
# In[ ]:
model.fit(X_train,Y_train,validation_split=0.15, epochs = 20, verbose = 2)
# In[ ]:
# evaluate the model
loss, accuracy = model.evaluate(X_test, Y_test, verbose=2)
print('Accuracy: %f' % (accuracy*100))
# In[66]:
from sklearn.metrics import classification_report,confusion_matrix
Y_pred = model.predict(X_test)
y_pred = np.array([np.argmax(pred) for pred in Y_pred])
print(' Classification Report:\n',classification_report(Y_test,y_pred),'\n')
|
986,006 | d09a0b9faa55c07d6f2de2edc355b8680881603b | '''
Created on 07-Jun-2018
@author: pritika sambyal
'''
class Alpha:
def pprint(self):
print('Print from Alpha')
class Beta:
def pprint(self):
print('Print from beta')
class Charlie(Beta,Alpha):
def pprint(self):
Alpha.pprint(self) #way to print pprint of specific class
super().pprint() # it will invoke pprint of beta as comes first in hierarchy
if __name__ == '__main__':
Charlie().pprint()
print()
print(Charlie.mro()) #method resolution order, order in which inheritance is followed
|
986,007 | e2a740b0acbc66d25f167df36e266d15eba95fda | #!/bin/python
import sys
n = int(raw_input().strip())
c = map(int,raw_input().strip().split(' '))
i = 0
a = 0
while i<n-1:
if (i+2)>=n:
a += 1
break
elif c[i+2]==1:
i += 1
a += 1
elif c[i+2]!=1:
i += 2
a += 1
print a
|
986,008 | 0b68b20901864f5046c1d99c7d7a752f8a268aa3 | # coding: UTF-8
import math
import random
import numpy as np
from scipy import stats
def midiNoteToHz(midiNote):
# ref:https://drumimicopy.com/audio-frequency/
return 440 * 2**( (midiNote - 69)/12 ) if midiNote > -1 else midiNote
def a16beatToSec( bpm):
return 60/bpm/4
def toBytes(wave):
return (wave * float(2 ** (32 - 1) ) ).astype(np.int32).tobytes()
def add(wav_list, w_list):
len_list = []
for np_a in wav_list:
len_list.append(len(np_a))
length = np.min(np.array(len_list))
wave = np.zeros(length)
for idx, np_a in enumerate(wav_list):
wave = wave + np_a[0:length] * w_list[idx]
return wave/sum(w_list)
def softmax(a, t = 1):
temp = np.empty(len(a))
for i in range(len(a)):
temp[i] = math.exp(a[i]/t)
return temp / temp.sum()
def add_stereo(wav_list, w_list):
min_len = None
for w in wav_list :
if min_len is None :
min_len = len(w)
else :
if min_len > len(w):
min_len = len(w)
wave_r = np.zeros(min_len)
wave_l = np.zeros(min_len)
wave = np.zeros(min_len*2)
for idx, w in enumerate(wav_list):
wave_r += w[0:min_len] * w_list[idx]['vol'] * w_list[idx]['r'] / (w_list[idx]['r'] + w_list[idx]['l'])
wave_l += w[0:min_len] * w_list[idx]['vol'] * w_list[idx]['l'] / (w_list[idx]['r'] + w_list[idx]['l'])
wave_r = wave_r / max(np.abs(wave_r)) if max(np.abs(wave_r)) > 0 else wave_r
wave_l = wave_l / max(np.abs(wave_l)) if max(np.abs(wave_l)) > 0 else wave_l
wave[1::2] = wave_r
wave[0::2] = wave_l
return wave
def dice(pkIn):
xk = np.arange(len(pkIn))
pk = (pkIn)
custm = stats.rv_discrete(name='custm', values=(xk, pk))
return (custm.rvs(size=1))[0]
def throwSomeCoins(pOn, n):
pOn = pOn * n
if pOn > 1 :
return 1
elif dice([1 - pOn, pOn ]) > 0 :
return 1
else :
return 0
|
986,009 | 116e430f26c7e0ad64c118f97a3361428b15ca65 | t = [0.460858906320354, -1.84046012825185, -1.02533106640510, 2.59692421208403, -1.34897127280327, 2.68541539468966,
-0.418996215615209, -0.746488172616949, -5.30610555107729, -0.398473934941990, -1.01198684950968,
-1.53547279000320, -0.332241023752871, -0.911421770357180, -2.22433406454404, 0.235614717268163, -1.03373124948493,
-5.48872381500109, -0.912899619902788, -2.25544413747299, -5.03208832356098, -1.99759141054766, -1.44079411463581,
-2.78869612326865, -3.68391051390947, -4.30357467420981, -0.440795370692229, -0.140438444601244, 0.470407425211448,
-4.24081214242986, -0.398236196737486, -0.831575907158168, -0.759383042018243, -4.10758992379802,
-0.230056921257926, -1.23715260977835, 2.76897536983495, 2.64778983820726, -1.00736951810241, -1.39765240118463,
-4.00094782341222, -1.52886082983190, -0.423589534345624, 2.84466788307623, -1.46337864159720, -1.13496139482628,
0.314007850921882, 0.653675862724157, 0.853257410201366, 0.675416512100327, -0.590813906134542, -3.60950520510168,
-0.793894353436016, -2.65431048759474, -1.03752410023968, 2.92926603816925, -0.961939299966886, -2.08235760768240,
-1.51411456792111, -0.245938002573979, -2.37669687651177, 0.936675879412192, 2.64778983820726, 1.56117349855518,
0.367515067267050, -1.28969457363998, -1.60849047474345, -2.57460193451603, -0.850570362135997, 0.768629590234184,
-0.571413834350423, -0.236443591036860, -0.787573056531812, -0.890799117435151, -0.642876416071260,
-0.0351619306867690, -1.41452957323867, 0.768629590234184, -1.91195546999275, 0.847790610277741, 3.56034183208335,
0.591726150651908, 0.892508604017479, -2.00697060966462, -0.0202064708037000, -1.69089460131575, 0.310729974875898,
2.40092200827348, 0.668681222863583, -1.84665838067349, -0.554991453162486, -2.58417858848564, 2.23106369716531,
0.362290297870615, -2.50641792139484, -3.51651530394796, -2.71287279297334, -2.26437317520332, 0.310729974875898,
0.565694290872955, -3.84028378114921, 1.60508600950401, -3.59613366154012, 2.23013815060492, -3.21379642033006,
0.636304789453320, 3.75096374748695, -2.27999955120627, -0.814785405351017, -0.107783004660178, 1.13922155116786,
-0.366667706545712, 0.111537619993484, -0.675786528626460, -0.224870658635496, -2.92974408624640,
-3.31970471748109, -2.54604782326285, -0.936427674772020, -0.967916608790063, -1.06768182305926, 1.50813737018153,
-1.06872476742224, -0.0327226050096385, -3.79165098890756, 1.81436658081411, -1.82241962472904, -0.650641437466845,
-6.15204944271106, 0.0878313148523375, -1.46744703166898, -1.89100193048907, 2.08804453000725, -2.11260224840808,
0.727108884391086, -2.70350949118351, 0.116343183128436, -1.94036338456964, -1.35819564080727, -1.45202584029111,
-0.413035719707750, -1.40665288749761, -1.05930411588326, -1.37502827541744, -1.03516865387199, -0.565311172762861,
-0.258072324749506, -1.04704226869286, -1.04390561937578, 0.635469149372341, -0.413035719707750,
-0.895630452144602, -2.41195546999275, -3.53073888656824, -0.413035719707750, -0.216810269839343, 1.34020947480080,
1.05394622372961, 0.201750309320076, 0.688625492874124, 2.08804453000725, -1.39795936548065, -0.0648529569247671,
-5.93437158644701, -1.76358614772255, -1.14608287967549, -2.28147536280980, 2.05608267471152, 1.08804453000725,
-0.773900654006511, -2.91292582815646, -1.76358614772255, -1.16454548371030, -2.85755207031133, -1.78791623087528,
-2.71790083444662, -1.19383459543418, -2.26197032376778, -2.68868166609248, -1.19999871025166, -0.0292722318037022,
-0.946461800216767, 0.727108884391086, 0.177649006206252, 0.453147207001024, 0.0687586801899442,
-0.0236511590376267, 0.0906943052791718, -3.41195546999275, 0.371964280117410, -1.72183549030837,
-1.47362165597131, 0.326162311644424, -6.57778293095244, -1.85306143165533, -0.872180884950964, -0.105420347444735,
2.08804453000725, 1.58804453000725, 2.49608146255748, -2.41310414475731, 3.83335462228645, -2.31899859793819,
-7.16183518487144, -7.18718377879803, -3.62557795041626, -2.21254890242142, 2.08804453000725, -2.24017215307804,
0.0869642802922499, -1.45124364052610, -2.02443928780616, -0.416915601217582, -0.233075145794771,
-1.43711127286117, -1.75628084082712, -0.159002603807821, 1.20475774954336, -0.715755337260504, -1.92705125783741,
-0.801382438476239, 2.08804453000725, -0.149106440374695, 3.20766715987692, 2.08804453000725, 2.78405294105902,
-2.91195546999275, -1.53780599844605, 2.90442710025329, -0.150803237508910, 1.82348528494856, -2.91195546999275,
-1.58600039023422, 3.36515183620536, 1.63993754737790, 2.26635756087924, -0.321644973390647, 0.464190487755545,
-1.51248184297900, 3.20766715987692, -1.31781224410992, -0.746411206430452, -1.70633771769586, 1.81380632916358,
-2.91195546999275, -1.84730288485238, -2.79954759652007, -0.180728750827214, -2.12491540611890, -0.409063929494835,
-0.274978228014632, -3.16228898028329, -1.88675836733583, 0.584616038166004, -1.80848797554620, -1.52628028638287,
-1.90552238116591, -4.03391208411200, -4.06131509784440, -1.09823054870283, 1.05394622372961, -1.94473115637989,
-1.59460431483591, -1.28364849083835, -0.804538959525303, 2.08804453000725, -2.68342616231655, 0.688625492874124,
-2.41195546999275, -4.17931045038199, -0.911955469992748, -1.32937671137103, 2.08804453000725, -2.45710493544368,
-0.172848326443699, -3.63924851773712, -0.238150927152688, 0.688625492874124, -3.89067376766477, -3.05109865950419,
-5.12097382210997, -1.95662115687239, -2.88203059386288, -0.859724048524427, -0.447320527438379,
-0.338194565324969, -4.63274529597405, -0.504449318802159, -0.461033844129673, -1.57363006580651,
-0.939560320514005, -0.281409817220379, -3.17100418204043, -3.59062720358011, -2.02443300201261, -2.68364318378677,
-2.91195546999275, 2.08804453000725, -0.368521457301085, 0.124978961634397, 0.123968064544911, -0.320344955660005,
2.08804453000725, -0.339421768648862, -1.01508350890430, -1.11693805871356, 0.288391144265304, -1.18399832041595,
-0.384840870134724, -1.21004184548583, -1.20790428730038, -0.842570299557465, -0.433494282168908,
-1.12051377126447, -0.793497594220625, -0.983534335433618, -0.000135396707833024, -2.99340756321558,
-2.70120559513487, -0.622791909409173, -1.14608287967549, -0.218724745258257, -1.22091732409751, -2.15730182934499,
-1.75245620086741, -0.574278506690931, 1.04504382660983, -3.62527071040667, -1.76335096185190, -1.05205789854712,
-1.09191363065544, -1.15536310004657, 0.111883360399375, -1.48333358628101, -1.14608287967549, -0.707146519337996,
-0.263985030707886, -1.61045592614047, -2.48522716645140, -2.54025066930221, -1.14608287967549, -1.01508350890430,
-0.862578424665927, 0.597265112095130, -1.03225247476770, -4.11415497604402, -0.583153167747256,
-0.238277231867798, -1.53715892064197, -1.54398219007005, -0.514104290525665, -0.729416425796430,
-1.71093648923795, 0.183724678213352, -0.0256384813624777, -0.0556102721164820, 1.61934922405837,
0.102723821545936, 0.939565349139429, -0.894540019697203, -2.26893630283469, -1.96145405429459, -3.98597424856300,
-2.00417082861844, -1.95003263227045, -2.61760313182504, -3.54568187412498, -2.12274873835860, -3.11994318323661,
-1.42942268321319, -3.55583174620891, 0.0391824184428771, -4.91195546999275, 2.83504233033517, -0.0374889757164425,
-0.733713135823436, -3.85480941276795, -1.34841905003019, 0.438714908617641, -3.78369249960295, -0.302122960508768,
0.598801595605522, 0.392890038910338, 2.08804453000725, 2.08804453000725, -0.867258818117520, 0.325734725271971,
-1.68705381041630, 0.453147207001024, 1.08804453000725, 0.483693498538528, -2.01189389098933, 2.08804453000725,
-0.651256017427045, 4.87129893406535, -2.02116429588034, 2.74010830011171, 3.32850046105905, 1.22001402709462,
6.31095515829519, 2.08804453000725, -1.24988107774730, -3.48304543161365, -3.67770717461632, -0.911955469992748,
0.0880445300072523, 4.34054756114601, 0.789663334353421, 2.64062524772673, 0.588044530007252, 2.57154408877629,
-1.65685937415998, 0.128721382527175, -3.61704805102215, -0.434371586447014, -4.40637400912529, -2.93455579918715,
5.13261651028201, -4.25585025811978, -1.92550398873617, -0.482754634716557, -0.861056513268750, -1.50987052545436,
4.92098718474675, -4.46139888200730, 7.08804453000725, -1.41736153931667, 4.17098224439908, 4.89455145682116,
-0.519739447592443, 4.85527551187197, -1.91033361366769, 0.111883360399375, 0.336414026295685, 3.96648312243744,
7.46174616213813, 3.92931558291039, 5.63347586606363, 4.82962901253391, -3.03719366383989, 1.00816523942318,
0.247559693435093, 3.11995007538467, -4.01451583260889, 5.63307075698054, 5.62754158196681, 0.380711574772874,
0.344313060284208, -4.24229965367979, 11.4852768633533, 0.308883060557491, -0.0483920372436728, -0.270610862445594,
-4.43407899508898, 2.51337510108057, 0.623279179326303, 1.76692485420523, 1.50491811214904, -1.23441069690057,
12.4395031189417, 0.0213703263477214, -0.831575907158168, -0.927132166265665, 0.152123482996956,
-0.0789687238916059, -1.70215112668546, -0.433337571529378, 12.9917980794833, -0.623345606243433, 3.61027266690742,
13.1949956591007, 0.191495776718696, -0.131751680808307, -1.85915354754113, 0.0909856845462684, -0.885152318339877,
13.8180003587578, 12.7586436765299, -2.13331497149093, -0.427051975440691, 13.1312627662206, -3.08296100535260,
0.0949854120553653, -0.976581349216659, -2.99662996155261, 2.78161181178420, -0.271593732073796, 1.42643348545251,
-1.33957733006713, -0.664516899393107, -1.55985330423212, -0.882262658503663, -1.22991953663129, -4.49044832357784,
-3.91195546999275, -2.83705604631356, -2.51296366669217, -2.26393634489942, -1.19775673360593, 1.49723667357995,
0.677578437144234, -0.269751070132131, -0.202639110310272, -0.590292563538952, -4.30043888566215,
-1.87271971814350, -3.82179919305057, 0.174232224989609, -2.50114816875855, -2.02268792298358, 0.901626598293959,
-5.91195546999275, -1.50653001329438, 0.331257841253048, -4.41195546999275, -3.02356819601370, -0.731075645881930,
-3.66646796058181, -2.43909214228057, -4.48710684479298, 0.900877757320473, -2.31132181759247, 1.53431491896808,
-2.23899370256052, 0.864788508718576, -2.54898216465497, -0.671496398686756, -5.26089113849682, 0.199970746703130,
-3.32016783072825, -1.40882123239403, -1.55405411068296, -2.14867834173744, -3.30423260745577, -2.17067086620730,
-3.79135699411761, -0.786224663222200, -4.22138916363268, -1.87628995792079, -1.26242474557512, 2.08804453000725,
0.262801168195139, -1.95581297195692, 1.82942407514307, 1.70216084006984, -3.58513810071717, 1.57202668749511,
2.67516639393214, -0.843824738240883, -2.14593949454753, -1.88967861555828, -2.41195546999275, -1.87877889177149,
-1.65770662834099, -2.95994276603284, -2.04216301261310, -1.60323714708838, -2.51255809804361, 2.76279241130712,
-3.80955907389223, -1.79954115742007, 0.498863005067719, 0.588044530007252, 2.08804453000725, -2.91195546999275,
0.588044530007252, -0.843824738240883, -4.21484893434250, 0.508061338568353, -2.98336553197503, 2.99279457796467,
-0.306213641721854, -3.60660544644909, -1.46661284517091, -5.67869707384416, -2.54551204896049, -1.29346622333777,
1.04763475975346, -1.03737875689843, -2.54551204896049, 1.14442921435032, -0.117616545611611, -1.35798363135358,
-1.56263013190672, 0.0880445300072523, -3.23590585324236, 2.08804453000725, -0.315362756773965, -5.40882123239403,
0.526841797839950, -1.50903823738604, -3.75669397363706, -2.39807014985027, 2.08804453000725, -1.01510426672012,
-0.411955469992748, -2.27359251881502, -1.90235042280963, -3.16795903196167, -2.55388169100789, -3.17083083175604,
-0.652510248082962, -0.742082641569496, -5.61595375446207, -0.204059409165879, -0.499489482698223,
-3.95863656149018, -2.52267877442009, -0.283510615003749, -6.55650449679294, -0.696002793928798, -1.11614610634120,
-1.19857908897832, -3.68224471758062, 1.20141855642530, -1.70811463451982, -2.20881582790270, -0.533242314330295,
-0.415726752306881, -0.899308916331293, -2.29658071968294, 0.857589709061585, -0.145510234895383,
-3.40882123239403, -0.107156451360282, 0.0531900902884246, -1.53019032726061, -2.23899370256052, -2.55842333336209,
-1.70931378056478, -2.58249460010903, -0.309712218883092, -2.22151261662125, -6.41195546999275, -3.42945361467704,
0.987360811157018, -0.117554465199888, -0.784251897964921, -1.82849038359586, 3.27259493672987, -2.20329648886353,
3.82417212782451, -4.33534785600317, 4.82911780115241, -3.91195546999275, -0.829072219546859, -1.69208188635481,
-1.35048937185979, -0.246755794298863, -4.05204218575777, -2.25837219673139, -0.164614524475738, -2.23727051002895,
-2.41195546999275, -2.56263013190672, -1.92117622094683, -4.91195546999275, 4.88733536574529, -1.41195546999275,
2.40353275801776, 2.40353275801776, 2.14345115961383, -3.28862524559338, -0.411955469992748, 2.07202438500006,
-4.11328059257559, -1.94279200137541, -2.15999685986216, -2.23271971805245, 1.55452992405841, -2.83530387019864,
-2.41195546999275, -2.27231384418293, -4.41195546999275, -0.843824738240883, -3.61170429781700, -2.30889340150671,
-5.00936390214704, -0.911955469992748, -1.74237890667557, -3.27415361299039, -2.57277798917819, -2.27231384418293,
-3.70020923065619, 4.73250226601260, 3.75784258762047, 0.880976172649056, 0.142562203938681, 6.36919031850627,
2.26710312782591, 1.69110659849329, -1.87500694060203, -1.53220361678270, -1.61856150202495, 1.75754299589862,
-0.908235556788242, 4.23796759009083, -2.10881476770304, -2.64463022373909, 3.60939177305252, 0.375743245866346,
4.74335535043045, -0.946809909711575, 7.17586597440456, -4.77048204514070, 2.63705169695075, 4.60028157910264,
1.30162423633620, -1.71407836698944, 0.485923331420267, 2.08804453000725, 5.02358397054512, -4.91195546999275,
-1.39466535601412, 2.89673539139778, 4.79910685906523, 3.19028778111691, 3.90843535590108, 0.287899395873694,
3.28508202435090, 4.75585876283145, 3.10116967667311, -1.37543002951580, -4.77048204514070, 3.82417212782451,
3.96467123618068, 0.548645597605614, 2.10287508063888, -3.41195546999275, 0.430509663026243, 0.0880445300072523,
-3.46964068453674, 5.13498909374003, 2.05993150911401, 2.70125169629763, -1.37893861988698, 1.83091472911964,
-2.03218970865522, 15.3351053702517, 11.2651231438555, -1.39466535601412, 14.0084424561417, 5.21377533677780,
1.48789768875915, 2.08804453000725, -0.185384471462427, -1.39466535601412, -4.91195546999275, 16.0361567444385,
14.5832844369583, 12.9247470306743, -1.79469720778657, 0.0690632236145774, -0.634478791498665, 6.15749874084649,
-4.56640048756634, 3.21883871395805, -1.28476950738688, 0.0618559833117160, 13.4813926744449, 3.54864990953040,
-2.14071188117953, 1.78107655747395, -2.16755543287069, 14.1253112358102, 2.16398929430729, -2.64736998745832,
2.07052781260670, -4.15316273572054, -1.03135277002352, 2.57435926495356, -1.66152425646311, 13.9954957916907,
-0.429864156511428, 14.4779047573301, -1.74170999347196, 11.6364136926197, 7.01290101117915, 1.61499932356681,
0.164230321931669, 3.31608079868849, -3.74893828307729, -2.91195546999275, -4.30807910889806, -2.71135898975379,
7.08804453000725, -1.91195546999275, 12.5224874573593, 15.8277783427532, 12.3488409026905, 14.9190082360853,
-2.62493247066587, 16.8635392061751, 11.9598631393314, -2.91195546999275, 13.9793866388939, 12.0880445300073,
12.0852062774357, -1.72568691054441, 15.3542271383377, 1.50783221235773, 7.08804453000725, 2.36075649780921,
3.44663492415907, 9.77335604662214, 3.75682731687263, -0.412859857485145, 13.6079253675178, 13.9793866388939,
-0.631673152825599, 3.99343583174210, 4.02974784553562, 4.31814574610932, 4.16536519862609, 10.1844542401322,
3.50908741202714, 2.41567293625084, 1.50759098485668, 4.57422654838142, 3.17814777058136, 1.08804453000725,
3.93633677017783, 2.80459950987134, 3.08525659683069, 2.30334514801358, 2.18931561162464, -4.03168163763907,
-3.00134538863614, 9.11341323345674, 14.9819529542758, -0.747835924808015, 1.54213599332549, 17.8659767125671,
-0.631862056972551, 13.2090107524147]
for i, e in enumerate(t):
if abs(e) > 3:
print i + 1
|
986,010 | 00fec62d3ee703ef7a42ff0fdda241a7a730530e | from django.urls import path
from .views import create_post, view_post, edit_post, delete_post
app_name = 'post'
urlpatterns = [
path('create', create_post, name='create'),
path('<uuid:pk>', view_post, name='view_post'),
path('<uuid:pk>/edit', edit_post, name='edit_post'),
path('<uuid:pk>/delete', delete_post, name='delete_post')
]
|
986,011 | 962c3726db0dea9f08fe532a1420748a084a3823 | def bi_sort(tree) :
index = len(tree)-1
while (tree[index] < tree[index//2]) :
tree[index], tree[index//2] = tree[index//2], tree[index]
index = index // 2
T = int(input())
# 여러개의 테스트 케이스가 주어지므로, 각각을 처리합니다.
for test_case in range(1, T + 1):
# ///////////////////////////////////////////////////////////////////////////////////
N = int(input())
bi_tree = [0]
queue = list(map(int, input().split()))
result = 0
for num in queue :
bi_tree.append(num)
bi_sort(bi_tree)
index = N
while (index//2 != 0) :
index = index//2
result += bi_tree[index]
print("#{0} {1}".format(test_case, result))
# /////////////////////////////////////////////////////////////////////////////////// |
986,012 | f07896e393847aa5c755a7c5b5153840105be6e4 | recl1 =int(input('enter first rectangle length:'))
recw1 =int(input('enter first rectangle width:'))
recw2 =int(input('enter second rectangle length:'))
recl2 =int(input('enter second rectangle width:'))
u1 = input('input your units:')
r1 = recl1 * recw1
r2 = recl2 * recw2
if r1 > r2:
print('rectangle one is larger')
print(r1, u1)
elif r1 < r2:
print('rectangle two is larger')
print(r2, u1)
|
986,013 | 05827b6c5663215311385a86859ddd4a6a0a3faf | raise NotImplementedError("Not implemented you big dodo bird!")
|
986,014 | 87c0b367b73829a0192357744d1d3cc251dd3e5e | import logging
from flask import Blueprint
class NLPBlueprint(Blueprint):
def __init__(self, name, import_name, init_func=None, url_prefix=None):
self.init_func = init_func
self.logger = logging.getLogger(__name__)
super().__init__(name, import_name, url_prefix=url_prefix)
def register(self, app, options, first_registration=False):
self.logger.debug('Initialize BluePrint {}'.format(self.name))
try:
if self.init_func is not None:
self.init_func()
except Exception as e:
self.logger.exception(
'BluePrint {} initialization failed, Exception: {}'.format(self.name, e))
super().register(app, options, first_registration)
|
986,015 | 0f045bf17bcd6416c3c2e639b9e6b2a1d7aef6a5 | import token_generator as token
import ast
import config
import urllib.request
def api_symptom_result(symptoms_ids, gender, year_of_birth):
authKey = token.tokenGen()
api_results_dict = {}
get_symptoms_result = config.priaid_health_url + "/diagnosis?symptoms=[" + symptoms_ids + "]&gender=" + gender + "&year_of_birth=" + year_of_birth + "&token=" + authKey + "&format=json&language=en-gb"
results = urllib.request.urlopen(get_symptoms_result).read()
api_results = ast.literal_eval(results.decode("utf-8"))
for result in api_results:
accuracy = result['Issue']['Accuracy']
api_results_dict[accuracy] = result['Issue']['Name']
return api_results_dict
|
986,016 | 252b9a0f12fb28f583aaf3257678494f5fd6e0b8 | #-*- coding: utf-8 -*-
# @Time : 2018/9/28 16:16
# @Author : Z
# @Email : S
# @File : 4.0dict.py
#定义
d1={"apple":1,"banana":2}
print d1
#zip函数
d2=dict(zip(["apple","banana"],[1,2]))
print d2
#dict函数
d3=dict(apple=1,banana=2)
print d3
#字典的删除
# del d2
# print d2 #NameError: name 'd2' is not defined
#字典的清空
d2.clear()
print d2
#字典中key必须是具有hash值的类型----这种类型必须是不可变的类型---tuple
# print hash([1,2,3]) #TypeError: unhashable type: 'list'
# print hash((1,3,4))
d3={["Apple"]:"pear",1:"pear"}
print d3 #TypeError: unhashable type: 'list'
d4={("apple","pear"):"pear",1:"pear"}
print d4
#key不能重复
d5={"apple":1,"pear":2,"apple":3}
print d5 |
986,017 | 9355fba9b1a5b50c548d4ee032bfb3f2195e2057 | # Copyright 2015–2020 Kullo GmbH
#
# This source code is licensed under the 3-clause BSD license. See LICENSE.txt
# in the root directory of this source tree for details.
from WebConfig.settings.base import *
DEBUG = True
SECRET_KEY = 'cnz1i$692*1$6lz(ik+*vc%yb+47dstfr_^lckd-8(+fm&)1_s'
EMAIL_BACKEND = 'django.core.mail.backends.console.EmailBackend'
INSTALLED_APPS += [
'debug_toolbar',
]
MIDDLEWARE += [
'debug_toolbar.middleware.DebugToolbarMiddleware',
]
INTERNAL_IPS = [
'127.0.0.1',
]
|
986,018 | 63ec2bfa51b83aee9f782b8dbe4ec8176cc633ef | # If you want MPE to log MPI calls, you have to add the two lines
# below at the very beginning of your main bootstrap script.
import mpi4py.rc
mpi4py.rc.profile('MPE', logfile='cpilog')
# Import the MPI extension module
from mpi4py import MPI
if 0: # <- use '1' to disable logging of MPI calls
MPI.Pcontrol(0)
# Import the MPE extension module
from mpi4py import MPE
if 1: # <- use '0' to disable user-defined logging
# This has to be explicitly called !
MPE.initLog(logfile='cpilog')
# Set the log file name (note: no extension)
MPE.setLogFileName('cpilog')
import numpy
from numpy import random
# User-defined MPE events
cpi_begin = MPE.newLogEvent("ComputePi-Begin", "yellow")
cpi_end = MPE.newLogEvent("ComputePi-End", "pink")
# User-defined MPE states
synchronization = MPE.newLogState("Synchronize", "orange")
communication = MPE.newLogState("Comunicate", "red")
computation = MPE.newLogState("Compute", "green")
cpi_begin
comm=MPI.COMM_WORLD
rank=comm.Get_rank()
size=comm.Get_size()
if(rank==0):
v=random.random((1000,4801))
if size>=len(v):
size=len(v)
slice_size=int(numpy.ceil(float(len(v))/float(size)))
slice_for_last_node=len(v)-(size-1)*slice_size
#xtra_slices=len(v)%size
rows=len(v)
print "slice_size:",slice_size
print "slice_for_last_node:",slice_for_last_node
else:
slice_size=slice_for_last_node=rows=None
size=comm.bcast(size,root=0)
slice_size=comm.bcast(slice_size,root=0)
slice_for_last_node=comm.bcast(slice_for_last_node,root=0)
rows=comm.bcast(rows,root=0)
def doSum(x):
return numpy.sum(x)
if rank==0:
print 'thats v_random:\n', v
count=1
cur_dest=0
for i in range(len(v)):
if(count>slice_size and cur_dest<size-1):
cur_dest+=1
count=1
if(cur_dest>=size-1):
cur_dest=size-1
comm.send(v[i],dest=cur_dest)
count+=1
if rank<size-1:
count=1
while count<=slice_size: #slices per proc
data=comm.recv(source=0)
count+=1
print 'my rank is {0} and my output is {1}\n'.format(rank,doSum(data))
elif rank==size-1:
count=1
while count<=slice_for_last_node:
data=comm.recv(source=0)
count+=1
print 'my rank is {0} and my output is {1}\n'.format(rank,doSum(data))
|
986,019 | 5cefc1917c814b5496d98d9a7cbdc063b41f5b96 | import socket
c1=socket.socket()
c1.connect(('localhost',9999))
print(c1.recv(1024).decode())
chat= input("enter your message : ")
c1.send(bytes(chat,'utf-8'))
#print("message send")
|
986,020 | 6a370578eaf176520231a707d6cc6da8599166b7 | class Stack:
def __init__(self):
self.array = []
def push(self, num):
self.array.append(num)
def pop(self):
if self.empty() is False:
return self.array.pop()
else:
return -1
def size(self):
return len(self.array)
def empty(self):
if self.size() > 0:
return False
else:
return True
def solution(str):
s = Stack()
for c in str:
if c == "(":
s.push(c)
else:
if s.pop() == -1:
return "NO"
if s.empty():
return "YES"
else:
return "NO"
|
986,021 | ce0102e2e8b54b35d287eb45a48fe93c24e4551c |
# This file was *autogenerated* from the file count_lattice.sage
from sage.all_cmdline import * # import sage library
_sage_const_2 = Integer(2); _sage_const_1 = Integer(1); _sage_const_0 = Integer(0)
import math
import numpy as np
import scipy
import scipy.optimize
#import matplotlib.pyplot as plt
import csv
def exist(pts, latt):
latt = np.array(latt)
for i in range(pts.shape[_sage_const_0 ]):
if pts[i][_sage_const_0 ]==latt[_sage_const_0 ]:
if pts[i][_sage_const_1 ]==latt[_sage_const_1 ]:
if pts[i][_sage_const_2 ]==latt[_sage_const_2 ]:
return _sage_const_1
return _sage_const_0
def dist(p1, p2):
return sqrt((p1[_sage_const_0 ]-p2[_sage_const_0 ])**_sage_const_2 +(p1[_sage_const_1 ]-p2[_sage_const_1 ])**_sage_const_2 +(p1[_sage_const_2 ]-p2[_sage_const_2 ])**_sage_const_2 )
def on_edge(latt, poly):
edges = poly.faces(_sage_const_1 )
num_edges = len(edges)
for i in range(num_edges):
pt1 = list(edges[i].vertices()[_sage_const_0 ])
pt2 = list(edges[i].vertices()[_sage_const_1 ])
if (dist(pt1, pt2) == (dist(pt1, latt) + dist(pt2, latt))):
return _sage_const_1
return _sage_const_0
def on_face(latt, poly):
faces = poly.faces(_sage_const_2 )
for face in poly.faces(_sage_const_2 ):
face_pts = [list(face.vertices()[i]) for i in range(len(face.vertices()))]
face_poly = Polyhedron(face_pts)
if face_poly.contains(latt) == _sage_const_1 :
return _sage_const_1
return _sage_const_0
def count_pts(pts):
# Count the number of corner points, edge points, face points, and body points
num_corner = len(pts)
num_edge = _sage_const_0
num_face = _sage_const_0
num_body = _sage_const_0
#edge = []
#face = []
#body = []
pts_max = int(max(np.amax(pts, axis=_sage_const_0 )))+_sage_const_1
pts_min = int(min(np.amin(pts, axis=_sage_const_0 )))-_sage_const_1
#print 'pts_max: ', pts_max
#print 'pts_min: ', pts_min
poly = Polyhedron(pts)
pts_new = pts
for i in range(pts_min, pts_max):
for j in range(pts_min, pts_max):
for k in range(pts_min, pts_max):
latt = [i,j,k]
if exist(np.array(pts), latt) == _sage_const_1 :
continue
if on_edge(latt, poly) == _sage_const_1 :
num_edge += _sage_const_1
#edge.append(latt)
elif on_face(latt, poly) == _sage_const_1 :
num_face += _sage_const_1
#face.append(latt)
elif poly.interior_contains(latt) == _sage_const_1 :
num_body += _sage_const_1
#body.append(latt)
#print 'edge: ', edge
#print 'face: ', face
#print 'body: ', body
return [num_corner, num_edge, num_face, num_body]
input_path = 'output/polygon/cube/'
output_path = 'output/topology/cube/'
file_path = ['1x1.txt', '2x2.txt', '3x3.txt', '4x4.txt', '5x5.txt']
for path in file_path:
input_file = open(input_path + path , 'r')
output_file = open(output_path + path, 'w')
for line in input_file:
pts = eval(line)
topo_list = count_pts(pts)
output_file.write('%s\n' % topo_list)
print('Done.')
|
986,022 | 97a26f7848d194325363da63fba99a8ca4114f0d | import sys
import numpy as np
import random
def main():
# Comprobacion de errores
if len(sys.argv) < 5:
print("Es necesario ejecutar el programa con los siguientes parametros:")
print(" - python3 Crear_alfabeto num_copias num_errores fich_entrada fich_salida")
return
# Almacenamos los parametros de entrada del programa
num_copias = int(sys.argv[1])
num_errores = int(sys.argv[2])
fichero_entrda = sys.argv[3]
fichero_salida = sys.argv[4]
# Lista con todas las letras del abecedario
# Comprobacion de errores
if (num_copias > 0) and (num_errores <= 0):
print("El parametro num_copias solo puede ser > 0 cuando num_errores > 0")
# Almacena como listas la informacion del fichero formateada
fichero_leido = []
# Leemos el fichero de entrada
with open (fichero_entrda, "r") as f_in:
# Leemos el fichero y lo almacenamos eliminando espacios y /n's
for linea in f_in:
salida = linea.split("\n")
salida.remove("")
fichero_leido.append(salida)
fichero_leido = [lista[0].split(" ") for lista in fichero_leido if lista != ['']]
# Generamos un diccionario con todas las letras del abecedario como clave y su valor como cadena de binarios
letras_dict = {}
for i, dato in enumerate(fichero_leido):
if("Letra" in dato[0]):
letra = dato[0].split(" ")
letra = letra[2].split(":")
letra_valor = []
else:
letra_valor += dato
if i != 0:
letras_dict[letra[0]] = letra_valor
# for value in letras_dict.values():
# for j, atrib in enumerate(value):
# if atrib == '0':
# value[j] = '-1'
# Generacion del fichero de salida en el caso de que num_copias sea <0
with open(fichero_salida, "w") as f_out:
# Escritura de la primera linea de todos los ficheros, n_entradas, n_salidas
f_out.write(str(len(list(letras_dict.values())[0])) +" " +str(len(list(letras_dict.values())[0])) +"\n")
for letra in letras_dict:
if num_copias != 0:
for i in range(num_copias):
# Copiamos la letra original para no modificarla entre iteraciones
letra_final = letras_dict[letra].copy()
# Para cada copia generamos una lista de num_errores con los indices entre el 0 y el 34
indices_errores = random.sample(range(35), num_errores)
# Modificamos esos valores
for indice in indices_errores:
if letra_final[indice] == '1':
letra_final[indice] = '0'
else:
letra_final[indice] = '1'
# Escribimos en el fichero de salida los atributos
for pixel in letra_final:
f_out.write(str(pixel) +" ")
# Escribimos en el fichero de salida las clases de salida
for i, pixel in enumerate(letras_dict[letra]):
if i != (len(list(letras_dict[letra])) - 1):
f_out.write((str(pixel)) +" ")
else:
f_out.write((str(pixel)))
f_out.write("\n")
else:
# Escribimos en el fichero de salida los atributos
for pixel in letras_dict[letra]:
f_out.write((str(pixel)) + " ")
# Escribimos en el fichero de salida las clases
for i, pixel in enumerate(letras_dict[letra]):
if i != (len(list(letras_dict[letra])) - 1):
f_out.write((str(pixel)) + " ")
else:
f_out.write((str(pixel)))
f_out.write("\n")
if __name__== "__main__":
main() |
986,023 | dcfd413e1b1863ffeb0fd179a1586fbbc37cf222 | from rest_framework import serializers
from .models import *
class ClusterAvailabilityLevelSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = ClusterAvailabilityLevel
fields = '__all__'
class ClusterCpuOversubscriptionSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = ClusterCpuOversubscription
fields = '__all__'
class ClusterMemoryOversubscriptionSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = ClusterMemoryOversubscription
fields = '__all__'
class CpuArchitectureSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = CpuArchitecture
fields = '__all__'
class CustomRamAngeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = CustomRamAnge
fields = '__all__'
class DatabaseTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = DatabaseType
fields = '__all__'
class DisasterRecoveryTenancySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = DisasterRecoveryTenancy
fields = '__all__'
class EncryptionStateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = EncryptionState
fields = '__all__'
class EsxHostCpuOversubscriptionSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = EsxHostCpuOversubscription
fields = '__all__'
class EsxHostMemoryOversubscriptionSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = EsxHostMemoryOversubscription
fields = '__all__'
class EventCategorySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = EventCategory
fields = '__all__'
class EventOriginSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = EventOrigin
fields = '__all__'
class EventourceSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Eventource
fields = '__all__'
class EventtatusSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Eventtatus
fields = '__all__'
class EventTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = EventType
fields = '__all__'
class HostStateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = HostState
fields = '__all__'
class Hp3parTierSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Hp3parTier
fields = '__all__'
class IpAddressAllocationStateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = IpAddressAllocationState
fields = '__all__'
class IpAddressAllocationPurposeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = IpAddressAllocationPurpose
fields = '__all__'
class LamaTemplateStateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LamaTemplateState
fields = '__all__'
class LamaTemplateStatusSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LamaTemplateStatus
fields = '__all__'
class LicenseEventTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LicenseEventType
fields = '__all__'
class LifecycleActionTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LifecycleActionType
fields = '__all__'
class LifecycleStateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LifecycleState
fields = '__all__'
class LvmLevelSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LvmLevel
fields = '__all__'
class LvmTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LvmType
fields = '__all__'
class MacAddressAllocationStateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = MacAddressAllocationState
fields = '__all__'
class MigrationStateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = MigrationState
fields = '__all__'
class SapOfferingSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = SapOffering
fields = '__all__'
class ServiceLevelSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = ServiceLevel
fields = '__all__'
class ServiceRoleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = ServiceRole
fields = '__all__'
class SidOriginSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = SidOrigin
fields = '__all__'
class SidTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = SidType
fields = '__all__'
class StorageStatusSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = StorageStatus
fields = '__all__'
class StorageTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = StorageType
fields = '__all__'
class StorageWorkloadTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = StorageWorkloadType
fields = '__all__'
class SubscriptionStatusSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = SubscriptionStatus
fields = '__all__'
class SuitabilitySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Suitability
fields = '__all__'
class SupplierSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Supplier
fields = '__all__'
class TicketClosureCodeInterpretationSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = TicketClosureCodeInterpretation
fields = '__all__'
class TicketImplementationStatusSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = TicketImplementationStatus
fields = '__all__'
class TicketOriginSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = TicketOrigin
fields = '__all__'
class TickettateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Tickettate
fields = '__all__'
class TicketTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = TicketType
fields = '__all__'
class TopologySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Topology
fields = '__all__'
class VirtualizationSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Virtualization
fields = '__all__'
class VlanRoleSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = VlanRole
fields = '__all__'
class VlanTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = VlanType
fields = '__all__'
class VolumeGroupTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = VolumeGroupType
fields = '__all__'
class VolumeTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = VolumeType
fields = '__all__'
class WorkloadTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = WorkloadType
fields = '__all__'
class ZoneSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Zone
fields = '__all__'
class DataCenterSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = DataCenter
fields = '__all__'
class VcenterSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Vcenter
fields = '__all__'
class TenantSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Tenant
fields = '__all__'
class ClusterSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Cluster
fields = '__all__'
class Hp3parSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Hp3par
fields = '__all__'
class CpgSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Cpg
fields = '__all__'
class CustomCpuValueSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = CustomCpuValue
fields = '__all__'
class TierSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Tier
fields = '__all__'
class CustomSizeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = CustomSize
fields = '__all__'
class DatastoreSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Datastore
fields = '__all__'
class EsxHostSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = EsxHost
fields = '__all__'
class EventCodeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = EventCode
fields = '__all__'
class LvmSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Lvm
fields = '__all__'
class OrderTrackingSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = OrderTracking
fields = '__all__'
class SystemSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = System
fields = '__all__'
class TicketClosureCodeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = TicketClosureCode
fields = '__all__'
class TicketSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Ticket
fields = '__all__'
class EventSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Event
fields = '__all__'
class Hp3parCapacitySerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Hp3parCapacity
fields = '__all__'
class Hp3parPairSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Hp3parPair
fields = '__all__'
class OperatingSystemSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = OperatingSystem
fields = '__all__'
class ImageSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Image
fields = '__all__'
class SizeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Size
fields = '__all__'
class VlanSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Vlan
fields = '__all__'
class InstanceHostSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = InstanceHost
fields = '__all__'
class NicTypeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = NicType
fields = '__all__'
class IpAddressPoolSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = IpAddressPool
fields = '__all__'
class MacAddressPoolSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = MacAddressPool
fields = '__all__'
class InstanceHostNicConfigSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = InstanceHostNicConfig
fields = '__all__'
class LamaTemplateTargetSidInstanceHostSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LamaTemplateTargetSidInstanceHost
fields = '__all__'
class LamaTemplateTargetSidInstanceHostNicConfigSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LamaTemplateTargetSidInstanceHostNicConfig
fields = '__all__'
class LifecycleActionSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LifecycleAction
fields = '__all__'
class LamaTemplateSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LamaTemplate
fields = '__all__'
class LamaTemplateTargetSidSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LamaTemplateTargetSid
fields = '__all__'
class ServiceRoleDetailSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = ServiceRoleDetail
fields = '__all__'
class LamaTemplateTargetSidServiceSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LamaTemplateTargetSidService
fields = '__all__'
class LamaTemplateTargetSidVolumeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = LamaTemplateTargetSidVolume
fields = '__all__'
class ServiceSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Service
fields = '__all__'
class Service_vipSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Service_vip
fields = '__all__'
class SidLvmLicenseEventSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = SidLvmLicenseEvent
fields = '__all__'
class TenantUserGroupSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = TenantUserGroup
fields = '__all__'
class VlanTripletSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = VlanTriplet
fields = '__all__'
class VlaSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Vla
fields = '__all__'
class VmnetworkSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Vmnetwork
fields = '__all__'
class VolumeGroupSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = VolumeGroup
fields = '__all__'
class VolumeGroupFileSystemSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = VolumeGroupFileSystem
fields = '__all__'
class VolumeSerializer(serializers.ModelSerializer):
id = serializers.ReadOnlyField()
class Meta:
model = Volume
fields = '__all__'
|
986,024 | e52f23b3710f37d06a8f844f1cc89861a828330c | #!/usr/bin/env python
from __future__ import print_function
import rospy
from hal_protocol import HALProtocol, HALProtocolError
#from hal.hw.m4atxhw import M4AtxHw, M4AtxHwError
#from hal.hw.kinectauxhw import KinectAuxHw, KinectAuxHwError
class PCHardwareAbstractionLayerError(HALProtocolError):
pass
class PCHardwareAbstractionLayer(HALProtocol):
def __init__(self):
HALProtocol.__init__(self, "PC HAL")
#try:
# self._m4atx = M4AtxHw()
#except M4AtxHwError:
# raise PCHardwareAbstractionLayerError("Unable to instantiate M4AtxHw")
#try:
# self._kinectaux = KinectAuxHw()
#except KinectAuxHwError:
# raise PCHardwareAbstractionLayerError("Unable to instantiate KinectAuxHw")
def _init(self):
pass
def _startup(self):
#self._m4atx.Start()
pass
def _stop(self):
#self._m4atx.Stop()
pass
def _shutdown(self):
pass
def GetVoltages(self):
#return self._m4atx.GetVoltages()
return []
def GetTemp(self):
#return self._m4atx.GetTemp()
return 0
def GetSpeed(self):
#return self._kinectaux.GetSpeed()
return None
def GetAccel(self):
#return self._kinectaux.GetAccel()
return None
def GetTilt(self):
#return self._kinectaux.GetTilt()
return None
def SetTilt(self, angle):
#return self._kinectaux.SetTilt(angle)
return None
def SetLed(self, value):
#return self._kinectaux.SetLed(value)
return None
if __name__ == "__main__":
pc_hal = PCHardwareAbstractionLayer()
pc_hal.Startup()
while not pc_hal.Ready(True): pass
pc_hal.GetVoltages()
pc_hal.Shutdown()
while not pc_hal.Ready(False): pass
|
986,025 | 2427e130897dceccaa62fed6ea0ac27c39561ac4 | import csv
from sklearn.externals import joblib
import re
import itertools as iters
import numpy as np
import pandas as pd
def test(matrix):
with open(matrix, "r") as file:
train_fdata = [line.strip() for line in file if '>' != line[0]]
SAA = ('ACDEFGHIKLMNPQRSTVWY')
DIPEPTIDE = []
for dipeptide in iters.product(SAA, repeat=3):
DIPEPTIDE.append(''.join(dipeptide))
df = pd.read_csv(r'800features.csv',index_col = 0)
cols = list(df.iloc[:,0])
matrix = np.zeros((len(train_fdata),800))
for j in range(len(cols)):
gap1 = int(cols[j][1])
gap2 = int(cols[j][2])
tripeptide = cols[j][0] + cols[j][3] + cols[j][6]
for i in range(len(train_fdata)):
protein = train_fdata[i]
n = 0
loops = len(train_fdata[i])- int(gap1) - int(gap2) -2
for start in range(loops):
dipeptide = protein[start] + protein[start + gap1 + 1] + protein[start + 2 + int(gap1) + int(gap2)]
if dipeptide == tripeptide:
n += 1
matrix[i,j] = n/loops
matrix=pd.DataFrame(matrix,columns = cols)
# matrix.to_csv('first800dimension.csv')
# matrix= pd.read_csv('first800dimension.csv',index_col = 0)
#6次降维
for number in range(6):
filename = '%s%d%s' % ('/var/www/FlaskApp/FlaskApp/models/lda/order/lda_order__', number, '.csv') #这里读取的是每一次迭代记录的降维的特征
reader = csv.reader(open(filename, 'r', errors='ignore'))
transform_test_x = [] #用来把两两降维的特征合在一起再转化为新的降维后的矩阵
for items in reader:
for item in items:
order=re.findall('\d+',item)
first=int(order[0])
try:
second=int(order[1])
LDA=joblib.load('%s%d%s%d%s%d%s' %('/var/www/FlaskApp/FlaskApp/models/lda/model/LDA__',number,'_',first,'_',second,'.model')) #加载模型两两降维
feature=LDA.transform(matrix.iloc[:,[first,second]])
c=[]
for x in feature: # 将嵌套的list平整为一个list,方便转化为矩阵
c.extend(x)
transform_test_x.append(c)
except:
feature=np.array(matrix.iloc[:,[first]])
c = []
for x in feature: # 将嵌套的list平整为一个list,方便转化为矩阵
c.extend(x)
transform_test_x.append(c)
transform_matrix = pd.DataFrame(transform_test_x).T #降维后的特征形成新的矩阵进行下一次降维
# path ='%s%d%s' %('RawData/Cancerlectin/model_test_',number,'.csv') #保存每一次降维后的数据
# transform_matrix.to_csv(path)
matrix=transform_matrix
#标准化
scaler=joblib.load('/var/www/FlaskApp/FlaskApp/models/lda/model/scaler.model')
matrix=np.array(matrix)
matrix=scaler.transform(matrix)
#SVM
clf=joblib.load('/var/www/FlaskApp/FlaskApp/models/lda/model/svm.model')
score=clf.predict_proba(matrix)
return score
def main():
a = 'non-cancerlectin.txt'
np.set_printoptions(precision=4)
np.set_printoptions(suppress=True)
score=test(a)
print(score)
if __name__ == '__main__':
main()
|
986,026 | 51bc9d27855a97fca445e6acbef23ff2f80e664e | #This imports the python3 print function if this code is being run in python2
from __future__ import print_function
import sys
sys.path.append(sys.argv[0].replace("CreateDendogram.py",""))
from ETFio import LoadETFCatalogue
from createPlotArrays import createPlotArrays
from ReadConfig import plotOptions
from plotDendogram import plotDendogram
import numpy as np
import os
if(len(sys.argv)<5):
raise SystemExit("Incorrect number of arguments parsed.\n \tUsage: CreateDendogram.py <ETF file> <Num plot> <output directory> plot_config.cfg\n")
try:
nPlot = int(sys.argv[2])
except ValueError:
raise SystemExit("Please parse a int for the number to be plotted")
#Make the output directory if it does to exist
outdir = sys.argv[3]
if(os.path.isdir(outdir)==False):
os.mkdir(outdir)
#Get the plot options
plotOpt = plotOptions(sys.argv[4],outdir)
#Load the data from the ETF catalogue
opt,halodata = LoadETFCatalogue(sys.argv[1],plotOpt)
#Get the indexes of the nPlot largest branches
endSnap = "Snap_%03d" %(opt.endSnap)
indexes=np.argsort(halodata[endSnap]["Mass"])[::-1][:nPlot][::-1]
#Loop over all the indexes producing dendograms
for SelIndex in indexes:
plotData,branchIndicator,depthIndicator,sortIndx,mainBranchIDs = createPlotArrays(opt,plotOpt,halodata,SelIndex)
#Check if there is anythin to plot
if(len(branchIndicator)==0):
continue
plotDendogram(plotOpt,plotData,depthIndicator,branchIndicator,sortIndx,halodata["Snap_%03d" %opt.endSnap]["HaloID"][SelIndex],mainBranchIDs) |
986,027 | dabbca90e41aa4eb8d14b95ca519397902a484fc | from __future__ import absolute_import, division, print_function
import libtbx.load_env
from six.moves import range
from six.moves import zip
if (libtbx.env.has_module("ccp4io")):
from iotbx import mtz
else:
mtz = None
from iotbx.option_parser import option_parser
from cctbx import sgtbx
from cctbx import uctbx
from cctbx.array_family import flex
from libtbx.test_utils import Exception_expected, approx_equal, show_diff
from itertools import count
from six.moves import cStringIO as StringIO
import sys, os
def exercise_read_corrupt():
for i_trial in range(5):
f = open("tmp_iotbx_mtz_ext.mtz", "w")
if (i_trial > 0):
f.write("\0"*(40*i_trial))
f.close()
try: mtz.object(file_name="tmp_iotbx_mtz_ext.mtz")
except RuntimeError as e:
assert str(e) == "cctbx Error: MTZ file read error: tmp_iotbx_mtz_ext.mtz"
else: raise Exception_expected
def exercise_setting_nref_etc():
m = mtz.object()
assert m.n_reflections() == 0
m.adjust_column_array_sizes(10)
m.set_n_reflections(10)
assert m.n_reflections() == 10
def exercise_basic():
assert mtz.ccp4_liberr_verbosity(-1) == 0
assert mtz.ccp4_liberr_verbosity(1) == 1
assert mtz.ccp4_liberr_verbosity(-1) == 1
assert mtz.ccp4_liberr_verbosity(0) == 0
assert mtz.ccp4_liberr_verbosity(-1) == 0
mtz_object = mtz.object()
assert mtz_object.title() == ""
assert mtz_object.history().size() == 0
assert mtz_object.space_group_name() == ""
assert mtz_object.space_group_number() == 0
assert mtz_object.n_symmetry_matrices() == 0
assert mtz_object.space_group_confidence() == "\x00"
assert mtz_object.space_group().order_z() == 1
assert mtz_object.point_group_name() == ""
assert mtz_object.lattice_centring_type() == "\0"
assert mtz_object.n_batches() == 0
assert mtz_object.batches().size() == 0
assert mtz_object.n_reflections() == 0
assert mtz_object.max_min_resolution() == (-1, -1)
assert mtz_object.n_crystals() == 0
assert mtz_object.n_active_crystals() == 0
file_name = libtbx.env.find_in_repositories(
relative_path="phenix_regression/reflection_files/dano.mtz",
test=os.path.isfile)
if (file_name is None):
print("Skipping dano.mtz test: input file not available")
else:
mtz_object = mtz.object(file_name=file_name)
assert mtz_object.title() == "......"
assert mtz_object.history().size() == 17
assert mtz_object.space_group_name() == "P212121"
assert mtz_object.space_group_number() == 19
assert mtz_object.point_group_name() == "PG222"
assert mtz_object.lattice_centring_type() == "P"
assert mtz_object.n_symmetry_matrices() == 4
assert mtz_object.space_group().type().lookup_symbol() == "P 21 21 21"
assert mtz_object.n_batches() == 0
assert mtz_object.batches().size() == 0
assert mtz_object.n_reflections() == 165
assert approx_equal(mtz_object.max_min_resolution(),
(19.869975507347792, 15.001543055390009))
assert mtz_object.n_crystals() == 4
assert mtz_object.n_active_crystals() == 3
assert mtz_object.has_crystal("unknown")
assert not mtz_object.has_crystal("abc")
assert mtz_object.has_column("H")
assert not mtz_object.has_column("abc")
crystal = mtz.crystal(mtz_object=mtz_object, i_crystal=1)
assert crystal.mtz_object().n_reflections() == 165
assert crystal.i_crystal() == 1
assert mtz_object.crystals().size() == mtz_object.n_crystals()
assert crystal.id() == 2
assert crystal.name() == "unknown"
assert crystal.set_name(new_name="abc") is crystal
assert crystal.name() == "abc"
assert crystal.set_name("abc") is crystal
assert crystal.name() == "abc"
try: crystal.set_name("unknown3")
except RuntimeError as e:
assert str(e) == 'mtz::crystal::set_name(new_name="unknown3"):' \
' new_name is used already for another crystal.'
else: raise Exception_expected
assert crystal.name() == "abc"
assert crystal.set_name("unknown") is crystal
assert crystal.name() == "unknown"
assert crystal.project_name() == "unknown"
assert crystal.set_project_name(new_project_name="abc") is crystal
assert crystal.project_name() == "abc"
assert crystal.set_project_name(new_project_name="unknown") is crystal
assert crystal.project_name() == "unknown"
assert approx_equal(crystal.unit_cell_parameters(),
(84.511, 104.308, 174.103, 90, 90, 90))
assert approx_equal(crystal.unit_cell().parameters(),
(84.511, 104.308, 174.103, 90, 90, 90))
assert crystal.n_datasets() == 1
dataset = mtz.dataset(mtz_crystal=crystal, i_dataset=0)
assert dataset.mtz_crystal().i_crystal() == 1
assert dataset.i_dataset() == 0
assert dataset.mtz_object().n_crystals() == mtz_object.n_crystals()
assert dataset.id() == 1
assert dataset.name() == "unknown230103:23:14:49"
assert dataset.set_name(new_name="abc") is dataset
assert dataset.name() == "abc"
assert dataset.set_name(new_name="abc") is dataset
assert dataset.name() == "abc"
assert dataset.set_name("unknown230103:23:14:49") is dataset
assert dataset.name() == "unknown230103:23:14:49"
assert dataset.wavelength() == 0
assert dataset.set_wavelength(new_wavelength=0.12) is dataset
assert approx_equal(dataset.wavelength(), 0.12)
assert dataset.set_wavelength(0) is dataset
assert dataset.wavelength() == 0
column = mtz.column(mtz_dataset=dataset, i_column=0)
assert column.mtz_dataset().mtz_crystal().i_crystal() == 1
assert column.i_column() == 0
assert column.mtz_crystal().i_crystal() == 1
assert column.mtz_object().n_reflections() == 165
assert column.label() == "H"
assert column.set_label(new_label="New") is column
assert column.label() == "New"
try: column.set_label("a,b,c")
except RuntimeError as e:
assert str(e) == 'mtz::column::set_label(new_label="a,b,c"):' \
' new_label must not include commas.'
else: raise Exception_expected
assert column.label() == "New"
assert column.set_label(new_label="New") is column
assert column.label() == "New"
try: column.set_label(new_label="K")
except RuntimeError as e:
assert str(e) == 'mtz::column::set_label(new_label="K"):' \
' new_label is used already for another column.'
else: raise Exception_expected
assert column.set_label("H") is column
assert column.label() == "H"
assert column.type() == "H"
assert column.set_type(new_type="Nw") is column
assert column.type() == "Nw"
assert column.set_type("H") is column
assert column.type() == "H"
assert column.is_active()
if (column.source() is None):
assert column.group_name() is None
assert column.group_type() is None
assert column.group_position() == -1
else:
assert column.source() == ""
assert column.set_source(new_source="NsRc") is column
assert column.source() == "NsRc"
assert column.set_source(new_source="") is column
assert column.source() == ""
assert column.group_name() == ""
assert column.set_group_name(new_group_name="NgN") is column
assert column.group_name() == "NgN"
assert column.set_group_name(new_group_name="") is column
assert column.group_name() == ""
assert column.group_type() == ""
assert column.set_group_type(new_group_type="NgT") is column
assert column.group_type() == "NgT"
assert column.set_group_type(new_group_type="") is column
assert column.group_type() == ""
assert column.group_position() == -1
assert column.set_group_position(new_group_position=23) is column
assert column.group_position() == 23
assert column.set_group_position(new_group_position=-1) is column
assert column.group_position() == -1
assert column.array_size() == 165
assert column.array_capacity() == 200
assert column.path() == "/unknown/unknown230103:23:14:49/H"
assert column.get_other("H").i_column() == 0
assert column.get_other("K").i_column() == 1
assert column.get_other("L").i_column() == 2
assert column.n_valid_values() == 165
valid_values = column.extract_valid_values()
assert valid_values.size() == 165
assert approx_equal(flex.min(valid_values), 0)
assert approx_equal(flex.max(valid_values), 5)
assert approx_equal(flex.mean(valid_values), 2.41818189621)
assert column.selection_valid().count(True) == 165
assert approx_equal(flex.mean(column.extract_values()), 2.41818189621)
assert approx_equal(flex.mean(column.extract_values(
not_a_number_substitute=10)), 2.41818189621)
column = mtz_object.get_column("F*")
assert column.label() == "Frem"
assert column.n_valid_values() == 163
valid_values = column.extract_valid_values()
assert valid_values.size() == 163
assert approx_equal(flex.min(valid_values), 32.5101776123)
assert approx_equal(flex.max(valid_values), 2711.84350586)
assert approx_equal(flex.mean(valid_values), 615.060852051)
assert column.selection_valid().count(True) == 163
assert approx_equal(flex.mean(column.extract_values()),
615.060852051*163/165)
assert approx_equal(flex.mean(column.extract_values(13)),
(615.060852051*163+2*13)/165)
v,s = column.extract_values_and_selection_valid(
not_a_number_substitute=-97).as_tuple()
assert v.size() == 165
assert s.count(True) == 163
assert approx_equal(v.select(~s), [-97]*2)
expected_dataset_ids = iter(range(4))
expected_dataset_names = iter([
"HKL_base",
"unknown230103:23:14:49",
"unknown230103:23:14:21",
"unknown230103:23:13:49"])
expected_n_columns = iter([0,8,5,5])
expected_column_labels = iter([
"H", "K", "L",
"Frem", "SIGFrem", "DANOrem", "SIGDANOrem", "ISYMrem",
"Finf", "SIGFinf", "DANOinf", "SIGDANOinf", "ISYMinf",
"Fabs", "SIGFabs", "DANOabs", "SIGDANOabs", "ISYMabs"])
expected_column_types = iter("HHHFQDQYFQDQYFQDQY")
for i_crystal,crystal in enumerate(mtz_object.crystals()):
assert crystal.mtz_object().n_reflections() == 165
assert crystal.i_crystal() == i_crystal
assert crystal.n_datasets() == 1
for i_dataset,dataset in enumerate(crystal.datasets()):
assert dataset.mtz_crystal().i_crystal() == i_crystal
assert dataset.i_dataset() == i_dataset
assert dataset.id() == next(expected_dataset_ids)
assert dataset.name() == next(expected_dataset_names)
assert dataset.wavelength() == 0
assert dataset.n_columns() == next(expected_n_columns)
for i_column,column in enumerate(dataset.columns()):
assert column.mtz_dataset().i_dataset() == i_dataset
assert column.i_column() == i_column
assert column.label() == next(expected_column_labels)
assert column.type() == next(expected_column_types)
assert column.is_active()
assert column.array_size() == 165
assert column.array_capacity() == 200
assert column.path().endswith(column.label())
get_column = mtz_object.get_column(column.label())
assert get_column.label() == column.label()
group = mtz_object.extract_integers(
column_label="ISYMabs")
assert not group.anomalous_flag
assert group.mtz_reflection_indices.size() == 165
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
data = mtz_object.extract_integers(
mtz_reflection_indices=group.mtz_reflection_indices,
column_label="ISYMabs")
assert data.all_eq(group.data)
group = mtz_object.extract_integers_anomalous(
column_label_plus="ISYMabs",
column_label_minus="ISYMabs")
assert group.anomalous_flag
assert group.mtz_reflection_indices.size() == 330
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
group = mtz_object.extract_reals(
column_label="Frem")
assert not group.anomalous_flag
assert group.mtz_reflection_indices.size() == 163
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
data = mtz_object.extract_reals(
mtz_reflection_indices=group.mtz_reflection_indices,
column_label="Frem")
assert data.all_eq(group.data)
group = mtz_object.extract_reals_anomalous(
column_label_plus="Frem",
column_label_minus="DANOrem")
assert group.anomalous_flag
assert group.mtz_reflection_indices.size() == 326
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
group = mtz_object.extract_hendrickson_lattman(
column_label_a="Frem",
column_label_b="DANOrem",
column_label_c="Frem",
column_label_d="DANOrem")
assert not group.anomalous_flag
assert group.mtz_reflection_indices.size() == 163
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
group = mtz_object.extract_hendrickson_lattman_ab_only(
column_label_a="Frem",
column_label_b="DANOrem")
assert not group.anomalous_flag
assert group.mtz_reflection_indices.size() == 163
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
group = mtz_object.extract_hendrickson_lattman_anomalous(
column_label_a_plus="Frem",
column_label_b_plus="DANOrem",
column_label_c_plus="Frem",
column_label_d_plus="DANOrem",
column_label_a_minus="Frem",
column_label_b_minus="DANOrem",
column_label_c_minus="Frem",
column_label_d_minus="DANOrem")
assert group.anomalous_flag
assert group.mtz_reflection_indices.size() == 326
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
group = mtz_object.extract_hendrickson_lattman_anomalous_ab_only(
column_label_a_plus="Frem",
column_label_b_plus="DANOrem",
column_label_a_minus="Frem",
column_label_b_minus="DANOrem")
assert group.anomalous_flag
assert group.mtz_reflection_indices.size() == 326
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
group = mtz_object.extract_observations(
column_label_data="Frem",
column_label_sigmas="SIGFrem")
assert not group.anomalous_flag
assert group.mtz_reflection_indices.size() == 163
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
assert group.sigmas.size() == group.indices.size()
group = mtz_object.extract_observations_anomalous(
column_label_data_plus="Frem",
column_label_sigmas_plus="SIGFrem",
column_label_data_minus="DANOrem",
column_label_sigmas_minus="SIGDANOrem")
assert group.anomalous_flag
assert group.mtz_reflection_indices.size() == 272
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
assert group.sigmas.size() == group.indices.size()
group = mtz_object.extract_delta_anomalous(
column_label_f_data="Frem",
column_label_f_sigmas="SIGFrem",
column_label_d_data="DANOrem",
column_label_d_sigmas="SIGDANOrem",
column_label_isym="ISYMrem")
assert group.anomalous_flag
assert group.mtz_reflection_indices.size() == 272
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
assert group.sigmas.size() == group.indices.size()
group = mtz_object.extract_complex(
column_label_ampl="Frem",
column_label_phi="SIGFrem")
assert not group.anomalous_flag
assert group.mtz_reflection_indices.size() == 163
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
group = mtz_object.extract_complex_anomalous(
column_label_ampl_plus="Frem",
column_label_phi_plus="SIGFrem",
column_label_ampl_minus="DANOrem",
column_label_phi_minus="SIGDANOrem")
assert group.anomalous_flag
assert group.mtz_reflection_indices.size() == 326
assert group.indices.size() == group.mtz_reflection_indices.size()
assert group.data.size() == group.mtz_reflection_indices.size()
class QuickStop(Exception): pass
class exercise_extract_any(object):
def __init__(self, full=True):
self.full = full
self.counters = {
"extract_integers": 0,
"extract_reals": 0,
"extract_reals_anomalous": 0,
"extract_hendrickson_lattman": 0,
"extract_observations": 0,
"extract_observations_anomalous": 0,
"extract_delta_anomalous": 0,
"extract_complex": 0,
"extract_complex_anomalous": 0}
def all_tests_ran_at_least_once(self):
return min(self.counters.values()) > 0
def raise_if_all_tests_ran_at_least_once(self):
if (self.full): return
if (self.all_tests_ran_at_least_once()): raise QuickStop
def __call__(self, file_name, out):
mtz_object = mtz.object(file_name=file_name)
mtz_object.show_summary(out=out)
types = "".join(mtz_object.column_types())
for type_group in ["B", "H", "I", "Y"]:
i = types.find(type_group)
if (i >= 0):
label = mtz_object.column_labels()[i]
group = mtz_object.extract_integers(
column_label=label)
assert group.data.size() == group.indices.size()
self.counters["extract_integers"] += 1
for type_group in ["FQ", "JQ"]:
i = types.find(type_group)
if (i >= 0):
labels = mtz_object.column_labels()[i:i+2]
group = mtz_object.extract_reals(
column_label=labels[0])
assert group.data.size() == group.indices.size()
self.counters["extract_reals"] += 1
group = mtz_object.extract_observations(
column_label_data=labels[0],
column_label_sigmas=labels[1])
assert group.data.size() == group.indices.size()
assert group.sigmas.size() == group.indices.size()
self.counters["extract_observations"] += 1
for type_group in ["FQFQ", "JQJQ"]:
i = types.find(type_group)
if (i >= 0):
labels = mtz_object.column_labels()[i:i+4]
group = mtz_object.extract_observations_anomalous(
column_label_data_plus=labels[0],
column_label_sigmas_plus=labels[1],
column_label_data_minus=labels[2],
column_label_sigmas_minus=labels[3])
assert group.data.size() == group.indices.size()
assert group.sigmas.size() == group.indices.size()
self.counters["extract_observations_anomalous"] += 1
i = types.find("FQDQ")
if (i >= 0):
labels = mtz_object.column_labels()[i:i+4]
group = mtz_object.extract_delta_anomalous(
column_label_f_data=labels[0],
column_label_f_sigmas=labels[1],
column_label_d_data=labels[2],
column_label_d_sigmas=labels[3],
column_label_isym=mtz_object.next_isym_column_starting_at(
i_column=i+4, return_label=True))
assert group.data.size() == group.indices.size()
self.counters["extract_delta_anomalous"] += 1
i = types.find("FP")
if (i >= 0):
labels = mtz_object.column_labels()[i:i+2]
group = mtz_object.extract_complex(
column_label_ampl=labels[0],
column_label_phi=labels[1])
assert group.data.size() == group.indices.size()
self.counters["extract_complex"] += 1
for type_group in ["GLGL", "KMKM"]:
i = types.find(type_group)
if (i >= 0):
labels = mtz_object.column_labels()[i:i+4]
group = mtz_object.extract_reals_anomalous(
column_label_plus=labels[0],
column_label_minus=labels[2])
assert group.data.size() == group.indices.size()
self.counters["extract_reals_anomalous"] += 1
group = mtz_object.extract_observations_anomalous(
column_label_data_plus=labels[0],
column_label_sigmas_plus=labels[1],
column_label_data_minus=labels[2],
column_label_sigmas_minus=labels[3])
assert group.data.size() == group.indices.size()
assert group.sigmas.size() == group.indices.size()
self.counters["extract_observations_anomalous"] += 1
# work around lack of FPFP in MTZ files available
group = mtz_object.extract_complex_anomalous(
column_label_ampl_plus=labels[0],
column_label_phi_plus=labels[1],
column_label_ampl_minus=labels[2],
column_label_phi_minus=labels[3])
assert group.data.size() == group.indices.size()
self.counters["extract_complex_anomalous"] += 1
i = types.find("AAAA")
if (i >= 0):
labels = mtz_object.column_labels()[i:i+4]
group = mtz_object.extract_hendrickson_lattman(
column_label_a=labels[0],
column_label_b=labels[1],
column_label_c=labels[2],
column_label_d=labels[3])
assert group.data.size() == group.indices.size()
self.counters["extract_hendrickson_lattman"] += 1
def walk_callback(arg, top, names):
exercise_function, out = arg
for name in names:
if (not name.lower().endswith(".mtz")): continue
file_name = os.path.normpath(os.path.join(top, name))
print("Processing:", file_name, file=out)
exercise_function(file_name=file_name, out=out)
exercise_function.raise_if_all_tests_ran_at_least_once()
def exercise_walk(root_dir, full, verbose=False):
if (verbose):
out = sys.stdout
else:
out = StringIO()
exercise_function = exercise_extract_any(full=full)
try:
if sys.version_info.major == 3:
for root, dirs, files in os.walk(root_dir):
walk_callback(
arg=(exercise_function, out),
top=root,
names=files)
else:
os.path.walk(
top=root_dir, func=walk_callback, arg=(exercise_function, out))
except QuickStop:
pass
if (verbose):
print(exercise_function.counters)
def exercise_modifiers(verbose=0):
if (verbose):
out = sys.stdout
mtz_object = mtz.object()
mtz_object.set_title(title="012345678")
assert mtz_object.title() == "012345678"
mtz_object.set_title(title="012345678", append=True)
assert mtz_object.title() == "012345678 012345678"
mtz_object.set_title(title="0123456789"*10+"012345678", append=True)
assert mtz_object.title() == "012345678 "*2 + "0123456789"*5
mtz_object.set_title("0123456789"*100)
assert mtz_object.title() == "0123456789"*7
mtz_object.set_title(title="")
assert mtz_object.title() == ""
mtz_object.set_title(title="abc", append=True)
assert mtz_object.title() == "abc"
mtz_object.set_title(title="def", append=True)
assert mtz_object.title() == "abc def"
mtz_object.set_title(title="a")
assert mtz_object.title() == "a"
mtz_object.set_title(title="bc", append=True)
assert mtz_object.title() == "a bc"
mtz_object.set_title(title=" "*70)
mtz_object.set_title(title="abc", append=True)
assert mtz_object.title() == "abc"
mtz_object.set_title(title="z"*70)
mtz_object.set_title(title="abc", append=True)
assert mtz_object.title() == "z"*70
mtz_object.set_title(title="z"*69)
mtz_object.set_title(title="abc", append=True)
assert mtz_object.title() == "z"*69
mtz_object.set_title(title="z"*68)
mtz_object.set_title(title="abc", append=True)
assert mtz_object.title() == "z"*68 + " a"
mtz_object.set_title(title="z"*67)
mtz_object.set_title(title="abc", append=True)
assert mtz_object.title() == "z"*67 + " ab"
mtz_object.add_history(lines=flex.std_string(["a1", "a2"]))
assert list(mtz_object.history()) == ["a1", "a2"]
mtz_object.add_history(lines=flex.std_string(["b1", "b2"]))
assert list(mtz_object.history()) == ["b1", "b2", "a1", "a2"]
mtz_object.add_history(line="c1")
assert list(mtz_object.history()) == ["c1", "b1", "b2", "a1", "a2"]
mtz_object.set_space_group_name(name="sg"*100)
assert mtz_object.space_group_name() == "sgsgsgsgsgsgsgsgsgsg"
mtz_object.set_space_group_number(number=12)
assert mtz_object.space_group_number() == 12
mtz_object.set_point_group_name(name="pg"*100)
assert mtz_object.point_group_name() == "pgpgpgpgpg"
mtz_object.set_lattice_centring_type(symbol="C")
assert mtz_object.lattice_centring_type() == "C"
for space_group_symbols in sgtbx.space_group_symbol_iterator():
space_group = sgtbx.space_group(space_group_symbols)
mtz_object.set_space_group(space_group)
assert mtz_object.space_group() == space_group
assert mtz_object.n_symmetry_matrices() == space_group.order_z()
assert mtz_object.xml() is None
assert mtz_object.unknown_headers() is None
assert mtz_object.number_of_unknown_headers() == 0
mtz_object = mtz.object() \
.set_title(title="exercise") \
.add_history(lines=flex.std_string(["h2"])) \
.add_history(line="h1") \
.set_space_group_name("sg") \
.set_space_group_number(123) \
.set_point_group_name("pg") \
.set_space_group(sgtbx.space_group_info(number=123).group())
assert mtz_object.title() == "exercise"
assert list(mtz_object.history()) == ["h1", "h2"]
for stage in [0,1]:
for i_crystal in range(3):
if (stage == 0):
if (i_crystal % 2 == 0):
crystal = mtz_object.add_crystal(
name="crystal_%d"%i_crystal,
project_name="project_%d"%i_crystal,
unit_cell_parameters=(10+i_crystal,20,20,90,90,120))
else:
crystal = mtz_object.add_crystal(
name="crystal_%d"%i_crystal,
project_name="project_%d"%i_crystal,
unit_cell=uctbx.unit_cell((10+i_crystal,20,20,90,90,120)))
else:
crystal = mtz_object.crystals()[i_crystal]
assert crystal.i_crystal() == i_crystal
assert crystal.name() == "crystal_%d"%i_crystal
assert crystal.project_name() == "project_%d"%i_crystal
assert approx_equal(crystal.unit_cell_parameters(),
(10+i_crystal,20,20,90,90,120))
if (not verbose): out = StringIO()
assert mtz_object.show_summary(out=out) is mtz_object
if (not verbose):
assert not show_diff(out.getvalue(), """\
Title: exercise
Space group symbol from file: sg
Space group number from file: 123
Space group from matrices: P 4/m m m (No. 123)
Point group symbol from file: pg
Number of crystals: 3
Number of Miller indices: 0
History:
h1
h2
Crystal 1:
Name: crystal_0
Project: project_0
Id: 1
Unit cell: (10, 20, 20, 90, 90, 120)
Number of datasets: 0
Crystal 2:
Name: crystal_1
Project: project_1
Id: 2
Unit cell: (11, 20, 20, 90, 90, 120)
Number of datasets: 0
Crystal 3:
Name: crystal_2
Project: project_2
Id: 3
Unit cell: (12, 20, 20, 90, 90, 120)
Number of datasets: 0
""")
mtz_object.crystals()[1].set_unit_cell_parameters([13,21,23,81,82,83])
assert approx_equal(mtz_object.crystals()[1].unit_cell_parameters(),
[13,21,23,81,82,83])
mtz_object.crystals()[1].set_unit_cell_parameters([11,20,20,90,90,120])
assert approx_equal(mtz_object.crystals()[1].unit_cell_parameters(),
[11,20,20,90,90,120])
for stage in [0,1]:
for i_crystal,crystal in enumerate(mtz_object.crystals()):
for i_dataset in range(5-i_crystal):
if (stage == 0):
new_name = "dataset_%d" % i_dataset
assert not crystal.has_dataset(name=new_name)
dataset = crystal.add_dataset(name=new_name, wavelength=10-i_dataset)
assert crystal.has_dataset(name=new_name)
else:
dataset = crystal.datasets()[i_dataset]
assert dataset.name() == "dataset_%d"%i_dataset
assert approx_equal(dataset.wavelength(), 10-i_dataset)
if (not verbose): out = StringIO()
mtz_object.show_summary(out=out)
if (not verbose):
assert not show_diff(out.getvalue(), """\
Title: exercise
Space group symbol from file: sg
Space group number from file: 123
Space group from matrices: P 4/m m m (No. 123)
Point group symbol from file: pg
Number of crystals: 3
Number of Miller indices: 0
History:
h1
h2
Crystal 1:
Name: crystal_0
Project: project_0
Id: 1
Unit cell: (10, 20, 20, 90, 90, 120)
Number of datasets: 5
Dataset 1:
Name: dataset_0
Id: 1
Wavelength: 10
Number of columns: 0
Dataset 2:
Name: dataset_1
Id: 2
Wavelength: 9
Number of columns: 0
Dataset 3:
Name: dataset_2
Id: 3
Wavelength: 8
Number of columns: 0
Dataset 4:
Name: dataset_3
Id: 4
Wavelength: 7
Number of columns: 0
Dataset 5:
Name: dataset_4
Id: 5
Wavelength: 6
Number of columns: 0
Crystal 2:
Name: crystal_1
Project: project_1
Id: 2
Unit cell: (11, 20, 20, 90, 90, 120)
Number of datasets: 4
Dataset 1:
Name: dataset_0
Id: 6
Wavelength: 10
Number of columns: 0
Dataset 2:
Name: dataset_1
Id: 7
Wavelength: 9
Number of columns: 0
Dataset 3:
Name: dataset_2
Id: 8
Wavelength: 8
Number of columns: 0
Dataset 4:
Name: dataset_3
Id: 9
Wavelength: 7
Number of columns: 0
Crystal 3:
Name: crystal_2
Project: project_2
Id: 3
Unit cell: (12, 20, 20, 90, 90, 120)
Number of datasets: 3
Dataset 1:
Name: dataset_0
Id: 10
Wavelength: 10
Number of columns: 0
Dataset 2:
Name: dataset_1
Id: 11
Wavelength: 9
Number of columns: 0
Dataset 3:
Name: dataset_2
Id: 12
Wavelength: 8
Number of columns: 0
""")
#
dataset_0_0 = mtz_object.crystals()[0].datasets()[0]
assert dataset_0_0.name() == "dataset_0"
assert dataset_0_0.set_name(new_name="dataset_x") is dataset_0_0
assert dataset_0_0.name() == "dataset_x"
assert dataset_0_0.set_name(new_name="dataset_0") is dataset_0_0
assert dataset_0_0.name() == "dataset_0"
try: dataset_0_0.set_name(new_name="dataset_1")
except RuntimeError as e:
assert str(e) == 'mtz::dataset::set_name(new_name="dataset_1"):' \
' new_name is used already for another dataset.'
else: raise Exception_expected
assert dataset_0_0.name() == "dataset_0"
#
for stage in [0,1]:
i_seq_iter = count()
for i_crystal,crystal in enumerate(mtz_object.crystals()):
for i_dataset,dataset in enumerate(crystal.datasets()):
for i_column in range((i_crystal+i_dataset) % 3 + 1):
i_seq = next(i_seq_iter)
col_label = "column_%d"%i_seq
col_type = "FB?"[(i_crystal-i_dataset+i_column) % 3]
if (stage == 0):
column = dataset.add_column(label=col_label, type=col_type)
else:
column = dataset.columns()[i_column]
assert column.label() == col_label
assert column.type() == col_type
if (not verbose): out = StringIO()
mtz_object.show_summary(out=out)
if (not verbose):
assert not show_diff(out.getvalue(), """\
Title: exercise
Space group symbol from file: sg
Space group number from file: 123
Space group from matrices: P 4/m m m (No. 123)
Point group symbol from file: pg
Number of crystals: 3
Number of Miller indices: 0
History:
h1
h2
Crystal 1:
Name: crystal_0
Project: project_0
Id: 1
Unit cell: (10, 20, 20, 90, 90, 120)
Number of datasets: 5
Dataset 1:
Name: dataset_0
Id: 1
Wavelength: 10
Number of columns: 1
label #valid %valid min max type
column_0 0 0.00% None None F: amplitude
Dataset 2:
Name: dataset_1
Id: 2
Wavelength: 9
Number of columns: 2
label #valid %valid min max type
column_1 0 0.00% None None ?: *** UNDEFINED column type ***
column_2 0 0.00% None None F: amplitude
Dataset 3:
Name: dataset_2
Id: 3
Wavelength: 8
Number of columns: 3
label #valid %valid min max type
column_3 0 0.00% None None B: BATCH number
column_4 0 0.00% None None ?: *** UNDEFINED column type ***
column_5 0 0.00% None None F: amplitude
Dataset 4:
Name: dataset_3
Id: 4
Wavelength: 7
Number of columns: 1
label #valid %valid min max type
column_6 0 0.00% None None F: amplitude
Dataset 5:
Name: dataset_4
Id: 5
Wavelength: 6
Number of columns: 2
label #valid %valid min max type
column_7 0 0.00% None None ?: *** UNDEFINED column type ***
column_8 0 0.00% None None F: amplitude
Crystal 2:
Name: crystal_1
Project: project_1
Id: 2
Unit cell: (11, 20, 20, 90, 90, 120)
Number of datasets: 4
Dataset 1:
Name: dataset_0
Id: 6
Wavelength: 10
Number of columns: 2
label #valid %valid min max type
column_9 0 0.00% None None B: BATCH number
column_10 0 0.00% None None ?: *** UNDEFINED column type ***
Dataset 2:
Name: dataset_1
Id: 7
Wavelength: 9
Number of columns: 3
label #valid %valid min max type
column_11 0 0.00% None None F: amplitude
column_12 0 0.00% None None B: BATCH number
column_13 0 0.00% None None ?: *** UNDEFINED column type ***
Dataset 3:
Name: dataset_2
Id: 8
Wavelength: 8
Number of columns: 1
label #valid %valid min max type
column_14 0 0.00% None None ?: *** UNDEFINED column type ***
Dataset 4:
Name: dataset_3
Id: 9
Wavelength: 7
Number of columns: 2
label #valid %valid min max type
column_15 0 0.00% None None B: BATCH number
column_16 0 0.00% None None ?: *** UNDEFINED column type ***
Crystal 3:
Name: crystal_2
Project: project_2
Id: 3
Unit cell: (12, 20, 20, 90, 90, 120)
Number of datasets: 3
Dataset 1:
Name: dataset_0
Id: 10
Wavelength: 10
Number of columns: 3
label #valid %valid min max type
column_17 0 0.00% None None ?: *** UNDEFINED column type ***
column_18 0 0.00% None None F: amplitude
column_19 0 0.00% None None B: BATCH number
Dataset 2:
Name: dataset_1
Id: 11
Wavelength: 9
Number of columns: 1
label #valid %valid min max type
column_20 0 0.00% None None B: BATCH number
Dataset 3:
Name: dataset_2
Id: 12
Wavelength: 8
Number of columns: 2
label #valid %valid min max type
column_21 0 0.00% None None F: amplitude
column_22 0 0.00% None None B: BATCH number
""")
for column in mtz_object.columns():
assert column.array_size() == 2000
assert column.array_capacity() == 2402
mtz_object.reserve(5000)
for column in mtz_object.columns():
assert column.array_size() == 2000
assert column.array_capacity() == 5000
mtz_object.reserve(100)
for column in mtz_object.columns():
assert column.array_size() == 2000
assert column.array_capacity() == 5000
#
mtz_object = mtz.object() \
.set_title(title="exercise") \
.set_space_group_name("sg") \
.set_space_group_number(123) \
.set_point_group_name("pg") \
.set_lattice_centring_type("pg") \
.set_space_group(sgtbx.space_group_info(number=123).group())
unit_cell = uctbx.unit_cell((10,10,10,90,90,90))
mtz_object.set_hkl_base(unit_cell=unit_cell)
dataset = mtz_object.add_crystal(
name="crystal_1",
project_name="crystal_1",
unit_cell=unit_cell).add_dataset(
name="crystal_1",
wavelength=0)
try: dataset.add_column(label="a,b,c", type="H")
except RuntimeError as e:
assert str(e) == 'mtz::dataset::add_column(label="a,b,c", ...):' \
' label must not include commas.'
else: raise Exception_expected
for label in "HKL":
dataset.add_column(label=label, type="H")
column = dataset.add_column(label="F", type="F")
mtz_reflection_indices = column.set_reals(
miller_indices=flex.miller_index([(1,2,3),(2,3,4),(3,4,5)]),
data=flex.double([10,20,30]))
assert list(mtz_reflection_indices) == [0,1,2]
column = dataset.add_column(label="SigF", type="Q")
column.set_reals(
mtz_reflection_indices=mtz_reflection_indices,
data=flex.double([1,2,3]))
group = mtz_object.extract_observations(
column_label_data="F",
column_label_sigmas="SigF")
assert list(group.indices) == [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
assert approx_equal(group.data, [10, 20, 30])
assert approx_equal(group.sigmas, [1, 2, 3])
column = dataset.add_column(label="I", type="F")
mtz_reflection_indices = column.set_reals(
miller_indices=flex.miller_index([(2,3,5),(1,2,3),(3,4,5)]),
data=flex.double([11,21,31]))
assert list(mtz_reflection_indices) == [3, 0, 2]
column = dataset.add_column(label="SigI", type="Q")
column.set_reals(
mtz_reflection_indices=mtz_reflection_indices,
data=flex.double([4,5,6]))
group = mtz_object.extract_observations(
column_label_data="I",
column_label_sigmas="SigI")
assert list(group.indices) == [(1, 2, 3), (3, 4, 5), (2, 3, 5)]
assert approx_equal(group.data, [21, 31, 11])
assert approx_equal(group.sigmas, [5, 6, 4])
if (not verbose): out = StringIO()
mtz_object.show_summary(out=out)
if (not verbose):
assert not show_diff(out.getvalue(), """\
Title: exercise
Space group symbol from file: sg
Space group number from file: 123
Space group from matrices: P 4/m m m (No. 123)
Point group symbol from file: pg
Number of crystals: 2
Number of Miller indices: 4
Resolution range: 2.67261 1.41421
History:
Crystal 1:
Name: HKL_base
Project: HKL_base
Id: 0
Unit cell: (10, 10, 10, 90, 90, 90)
Number of datasets: 1
Dataset 1:
Name: HKL_base
Id: 0
Wavelength: 0
Number of columns: 0
Crystal 2:
Name: crystal_1
Project: crystal_1
Id: 1
Unit cell: (10, 10, 10, 90, 90, 90)
Number of datasets: 1
Dataset 1:
Name: crystal_1
Id: 1
Wavelength: 0
Number of columns: 7
label #valid %valid min max type
H 4 100.00% 1.00 3.00 H: index h,k,l
K 4 100.00% 2.00 4.00 H: index h,k,l
L 4 100.00% 3.00 5.00 H: index h,k,l
F 3 75.00% 10.00 30.00 F: amplitude
SigF 3 75.00% 1.00 3.00 Q: standard deviation
I 3 75.00% 11.00 31.00 F: amplitude
SigI 3 75.00% 4.00 6.00 Q: standard deviation
""")
if (not verbose): out = StringIO()
assert mtz_object.show_column_data(out=out) is mtz_object
if (not verbose):
assert not show_diff(out.getvalue(), """\
Column data:
-------------------------------------------------------------------------------
F SigF I SigI
1 2 3 10 1 21 5
2 3 4 20 2 None None
3 4 5 30 3 31 6
2 3 5 None None 11 4
-------------------------------------------------------------------------------
""")
mtz_object.write(file_name="tmp_iotbx_mtz_ext.mtz")
if (not verbose): out = StringIO()
mtz.object(file_name="tmp_iotbx_mtz_ext.mtz").show_summary(out=out)
if (not verbose):
assert not show_diff(out.getvalue(), """\
Title: exercise
Space group symbol from file: sg
Space group number from file: 123
Space group from matrices: P 4/m m m (No. 123)
Point group symbol from file: pg
Number of crystals: 2
Number of Miller indices: 4
Resolution range: 2.67261 1.41421
History:
Crystal 1:
Name: HKL_base
Project: HKL_base
Id: 0
Unit cell: (10, 10, 10, 90, 90, 90)
Number of datasets: 1
Dataset 1:
Name: HKL_base
Id: 0
Wavelength: 0
Number of columns: 0
Crystal 2:
Name: crystal_1
Project: crystal_1
Id: 2
Unit cell: (10, 10, 10, 90, 90, 90)
Number of datasets: 1
Dataset 1:
Name: crystal_1
Id: 1
Wavelength: 0
Number of columns: 7
label #valid %valid min max type
H 4 100.00% 1.00 3.00 H: index h,k,l
K 4 100.00% 2.00 4.00 H: index h,k,l
L 4 100.00% 3.00 5.00 H: index h,k,l
F 3 75.00% 10.00 30.00 F: amplitude
SigF 3 75.00% 1.00 3.00 Q: standard deviation
I 3 75.00% 11.00 31.00 F: amplitude
SigI 3 75.00% 4.00 6.00 Q: standard deviation
""")
#
original_miller_indices = mtz_object.extract_miller_indices()
assert list(original_miller_indices) \
== [(1, 2, 3), (2, 3, 4), (3, 4, 5), (2, 3, 5)]
new_miller_indices = flex.miller_index(
[(3, -1, 2), (-4, 2, -3), (5, -3, 4), (-5, 2, -3)])
assert not mtz_object.extract_miller_indices().all_eq(new_miller_indices)
mtz_object.replace_miller_indices(miller_indices=new_miller_indices)
assert mtz_object.extract_miller_indices().all_eq(new_miller_indices)
mtz_object.replace_miller_indices(miller_indices=original_miller_indices)
assert not mtz_object.extract_miller_indices().all_eq(new_miller_indices)
assert mtz_object.extract_miller_indices().all_eq(original_miller_indices)
#
c = mtz_object.get_column(label="F")
s = c.selection_valid()
v = c.extract_values(not_a_number_substitute=-1)
assert list(s) == [True, True, True, False]
assert approx_equal(v, [10.0, 20.0, 30.0, -1.0])
c.set_values(values=flex.float([5,9,3,7]), selection_valid=None)
v = c.extract_values(not_a_number_substitute=-1)
assert approx_equal(v, [5.0, 9.0, 3.0, 7.0])
c.set_values(values=flex.float([7,8,2,0]))
v = c.extract_values(not_a_number_substitute=-1)
assert approx_equal(v, [7.0, 8.0, 2.0, 0.0])
c.set_values(
values=flex.float([5,9,3,7]),
selection_valid=flex.bool([False]*4))
v = c.extract_values(not_a_number_substitute=-1)
assert approx_equal(v, [-1]*4)
for i_trial in range(10):
s = flex.random_bool(size=4, threshold=0.5)
v = flex.float(list(flex.random_double(size=4)*10-5))
c.set_values(values=v, selection_valid=s)
sx = c.selection_valid()
vx = c.extract_values(not_a_number_substitute=99)
assert list(s) == list(sx)
assert vx.select(s).all_eq(v.select(s))
assert vx.select(~s).all_ne(v.select(~s))
assert vx.select(~s).all_eq(99)
#
values_in = count()
values_out = count()
for i_batch in range(10):
batch = mtz_object.add_batch()
assert batch.num() == i_batch+1
assert batch.set_num(value=next(values_in)) is batch
assert batch.num() == next(values_out)
assert batch.set_num(value=i_batch+1) is batch
assert batch.title() == " "
assert batch.set_title("Hello MTZ") is batch
assert batch.title() == "Hello MTZ"
assert batch.set_title("Hello MTZ"*10) is batch
assert len(batch.title()) == 70
assert list(batch.gonlab()) == ["", "", ""]
assert batch.set_gonlab(
flex.std_string(["what", "ever", "this_is....."])) is batch
assert list(batch.gonlab()) == ["what", "ever", "this_is"]
assert batch.iortyp() == 0
assert batch.set_iortyp(value=next(values_in)) is batch
assert batch.iortyp() == next(values_out)
assert list(batch.lbcell()) == [0, 0, 0, 0, 0, 0]
assert batch.set_lbcell(flex.int(range(3,9))) is batch
assert list(batch.lbcell()) == list(range(3,9))
assert batch.misflg() == 0
assert batch.set_misflg(value=next(values_in)) is batch
assert batch.misflg() == next(values_out)
assert batch.jumpax() == 0
assert batch.set_jumpax(value=next(values_in)) is batch
assert batch.jumpax() == next(values_out)
assert batch.ncryst() == 0
assert batch.set_ncryst(value=next(values_in)) is batch
assert batch.ncryst() == next(values_out)
assert batch.lcrflg() == 0
assert batch.set_lcrflg(value=next(values_in)) is batch
assert batch.lcrflg() == next(values_out)
assert batch.ldtype() == 0
assert batch.set_ldtype(value=next(values_in)) is batch
assert batch.ldtype() == next(values_out)
assert batch.jsaxs() == 0
assert batch.set_jsaxs(value=next(values_in)) is batch
assert batch.jsaxs() == next(values_out)
assert batch.nbscal() == 0
assert batch.set_nbscal(value=next(values_in)) is batch
assert batch.nbscal() == next(values_out)
assert batch.ngonax() == 0
assert batch.set_ngonax(value=next(values_in)) is batch
assert batch.ngonax() == next(values_out)
assert batch.lbmflg() == 0
assert batch.set_lbmflg(value=next(values_in)) is batch
assert batch.lbmflg() == next(values_out)
assert batch.ndet() == 0
assert batch.set_ndet(value=next(values_in) % 3) is batch
assert batch.ndet() == next(values_out) % 3
assert batch.nbsetid() == 0
assert batch.set_nbsetid(value=next(values_in)) is batch
assert batch.nbsetid() == next(values_out)
assert list(batch.cell()) == [0]*6
assert batch.set_cell(flex.float(range(18,24))) is batch
assert list(batch.cell()) == list(range(18,24))
assert list(batch.umat()) == [0]*9
assert batch.set_umat(flex.float(range(16,25))) is batch
assert list(batch.umat()) == list(range(16,25))
assert list(batch.phixyz()) == [0]*6
assert batch.set_phixyz(flex.float(range(28,34))) is batch
assert list(batch.phixyz()) == list(range(28,34))
assert list(batch.crydat()) == [0]*12
assert batch.set_crydat(flex.float(range(26,38))) is batch
assert list(batch.crydat()) == list(range(26,38))
assert list(batch.datum()) == [0]*3
assert batch.set_datum(flex.float(range(26,29))) is batch
assert list(batch.datum()) == list(range(26,29))
assert batch.phistt() == 0
assert batch.set_phistt(value=next(values_in)) is batch
assert batch.phistt() == next(values_out)
assert batch.phiend() == 0
assert batch.set_phiend(value=next(values_in)) is batch
assert batch.phiend() == next(values_out)
assert list(batch.scanax()) == [0]*3
assert batch.set_scanax(flex.float(range(62,65))) is batch
assert list(batch.scanax()) == list(range(62,65))
assert batch.time1() == 0
assert batch.set_time1(value=next(values_in)) is batch
assert batch.time1() == next(values_out)
assert batch.time2() == 0
assert batch.set_time2(value=next(values_in)) is batch
assert batch.time2() == next(values_out)
assert batch.bscale() == 0
assert batch.set_bscale(value=next(values_in)) is batch
assert batch.bscale() == next(values_out)
assert batch.bbfac() == 0
assert batch.set_bbfac(value=next(values_in)) is batch
assert batch.bbfac() == next(values_out)
assert batch.sdbscale() == 0
assert batch.set_sdbscale(value=next(values_in)) is batch
assert batch.sdbscale() == next(values_out)
assert batch.sdbfac() == 0
assert batch.set_sdbfac(value=next(values_in)) is batch
assert batch.sdbfac() == next(values_out)
assert batch.phirange() == 0
assert batch.set_phirange(value=next(values_in)) is batch
assert batch.phirange() == next(values_out)
assert list(batch.e1()) == [0]*3
assert batch.set_e1(flex.float(range(71,74))) is batch
assert list(batch.e1()) == list(range(71,74))
assert list(batch.e2()) == [0]*3
assert batch.set_e2(flex.float(range(72,75))) is batch
assert list(batch.e2()) == list(range(72,75))
assert list(batch.e3()) == [0]*3
assert batch.set_e3(flex.float(range(73,76))) is batch
assert list(batch.e3()) == list(range(73,76))
assert list(batch.source()) == [0]*3
assert batch.set_source(flex.float(range(74,77))) is batch
assert list(batch.source()) == list(range(74,77))
assert list(batch.so()) == [0]*3
assert batch.set_so(flex.float(range(75,78))) is batch
assert list(batch.so()) == list(range(75,78))
assert batch.alambd() == 0
assert batch.set_alambd(value=next(values_in)) is batch
assert batch.alambd() == next(values_out)
assert batch.delamb() == 0
assert batch.set_delamb(value=next(values_in)) is batch
assert batch.delamb() == next(values_out)
assert batch.delcor() == 0
assert batch.set_delcor(value=next(values_in)) is batch
assert batch.delcor() == next(values_out)
assert batch.divhd() == 0
assert batch.set_divhd(value=next(values_in)) is batch
assert batch.divhd() == next(values_out)
assert batch.divvd() == 0
assert batch.set_divvd(value=next(values_in)) is batch
assert batch.divvd() == next(values_out)
assert list(batch.dx()) == [0]*2
assert batch.set_dx(flex.float(range(84,86))) is batch
assert list(batch.dx()) == list(range(84,86))
assert list(batch.theta()) == [0]*2
assert batch.set_theta(flex.float(range(85,87))) is batch
assert list(batch.theta()) == list(range(85,87))
assert list(batch.detlm()) == [0]*8
assert batch.set_detlm(flex.float(range(86,94))) is batch
assert list(batch.detlm()) == list(range(86,94))
if (not verbose): out = StringIO()
batch.show(out=out)
if (not verbose and i_batch == 3):
batch_3_show = out
assert not show_diff(out.getvalue(), """\
batch number: 4
batch title: Hello MTZHello MTZHello MTZHello MTZHello MTZHello MTZHello MTZHello M
names of the three axes: ['what', 'ever', 'this_is']
type of orientation block: 82
refinement flags for cell: [3, 4, 5, 6, 7, 8]
number of phixyz used (0, 1, or 2): 83
reciprocal axis closest to rotation axis: 84
crystal number: 85
mosaicity model: 0 = isotropic, 1 = anisotropic: 86
type of data: 2D (1), 3D (2), or Laue (3): 87
goniostat scan axis number: 88
number of batch scales & Bfactors (0 if unset): 89
number of goniostat axes: 90
flag for type of beam info: 91
0: for alambd, delamb; 1: also delcor, divhd, divvd
number of detectors (current maximum 2): 2
dataset id: 93
cell dimensions: [18.0, 19.0, 20.0, 21.0, 22.0, 23.0]
orientation matrix U: [16.0, 17.0, 18.0, 19.0, 20.0, 21.0, 22.0, 23.0, 24.0]
in Fortranic order, i.e. U(1,1), U(2,1) ...
missetting angles at beginning and end of oscillation: [28.0, 29.0, 30.0, 31.0, 32.0, 33.0]
mosaicity: [26.0, 27.0, 28.0, 29.0, 30.0, 31.0, 32.0, 33.0, 34.0, 35.0, 36.0, 37.0]
datum values of goniostat axes: [26.0, 27.0, 28.0]
start of phi relative to datum: 94.0
end of phi relative to datum: 95.0
rotation axis in lab frame: [62.0, 63.0, 64.0]
start time: 96.0
stop time: 97.0
batch scale: 98.0
batch temperature factor: 99.0
sd bscale: 100.0
sd bbfac: 101.0
phi range: 102.0
vectors ("Cambridge" laboratory axes) defining ngonax goniostat axes:
vector 1: [71.0, 72.0, 73.0]
vector 2: [72.0, 73.0, 74.0]
vector 3: [73.0, 74.0, 75.0]
idealised source vector: [74.0, 75.0, 76.0]
source vector: [75.0, 76.0, 77.0]
wavelength (A): 103.0
dispersion (deltalambda / lambda): 104.0
correlated component: 105.0
horizontal beam divergence: 106.0
vertical beam divergence: 107.0
xtal to detector distance: [84.0, 85.0]
detector tilt angle: [85.0, 86.0]
min & max values of detector coords (pixels): [86.0, 87.0, 88.0, 89.0, 90.0, 91.0, 92.0, 93.0]
""")
mtz_object.write(file_name="tmp_iotbx_mtz_ext.mtz")
restored = mtz.object(file_name="tmp_iotbx_mtz_ext.mtz")
assert restored.n_batches() == 10
if (not verbose): out = StringIO()
restored.batches()[3].show(out=out)
if (not verbose):
assert out.getvalue() == batch_3_show.getvalue()
for i_trial in range(10):
perm = flex.random_permutation(size=10)
for batch,new_num in zip(mtz_object.batches(), perm):
batch.set_num(value=new_num+1)
mtz_object.sort_batches()
assert [batch.num() for batch in mtz_object.batches()] \
== [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for batch,nbsetid in zip(mtz_object.batches(), [0,0,1,0,1,1,1,0,1,1]):
batch.set_nbsetid(value=nbsetid)
for crystal in mtz_object.crystals():
for dataset in crystal.datasets():
assert dataset.id() in [0,1]
assert dataset.n_batches() == [4,6][dataset.id()]
assert dataset.batches().size() == dataset.n_batches()
for batch in dataset.batches():
assert batch.nbsetid() == dataset.id()
batch = dataset.add_batch()
assert batch.nbsetid() == dataset.id()
assert dataset.n_batches() == [5,7][dataset.id()]
# quick test for delete_reflection
assert mtz_object.n_reflections() > 3
mx = mtz_object.extract_miller_indices()[1]
assert mx in mtz_object.extract_miller_indices()
mtz_object.delete_reflection(1)
assert mx not in mtz_object.extract_miller_indices()
# test for delete_reflections
isel = flex.size_t((1,0))
try: mtz_object.delete_reflections(isel)
except RuntimeError: pass
else: raise Exception_expected
isel = flex.size_t((0,2))
mx = [mtz_object.extract_miller_indices()[i] for i in isel]
for m in mx:
assert m in mtz_object.extract_miller_indices()
mtz_object.delete_reflections(isel)
for m in mx:
assert m not in mtz_object.extract_miller_indices()
def exercise():
if (mtz is None):
print("Skipping iotbx/mtz/tst_ext.py: ccp4io not available")
return
command_line = (option_parser()
.option(None, "--verbose",
action="store_true")
.option(None, "--forever",
action="store_true",
help="Infinite loop, for detection of memory leaks")
.option(None, "--walk",
action="store",
type="string",
metavar="ROOT_DIR",
help="Find and process all MTZ files under ROOT_DIR")
.option(None, "--full",
action="store_true",
help="Visit all MTZ files")
).process(args=sys.argv[1:])
exercise_read_corrupt()
exercise_basic()
exercise_setting_nref_etc()
exercise_modifiers(verbose=command_line.options.verbose)
for file_name in command_line.args:
exercise_extract_any()(file_name=file_name, out=sys.stdout)
if (command_line.options.walk is not None):
exercise_walk(
root_dir=command_line.options.walk,
full=command_line.options.full,
verbose=command_line.options.verbose)
while (command_line.options.forever):
exercise_basic()
exercise_modifiers()
def run():
exercise()
print("OK")
if (__name__ == "__main__"):
run()
|
986,028 | 2dd4abe97a3102038001e17ab1965f5d30e6aa61 | #!/Users/amansour/.brew/Cellar/python/3.7.4/bin/python3.7
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
def display_plot(data, x, y, cost_history, theta0_history, theta1_history):
# Configurate the plot
fig, axs = plt.subplots(2, 2, figsize=(8,8))
# Say, "the default sans-serif font is COMIC SANS"
plt.rcParams['font.sans-serif'] = "Comic Sans MS"
# Then, "ALWAYS use sans-serif fonts"
plt.rcParams['font.family'] = "sans-serif"
axs[0, 0].scatter(data[:, 0], data[:, 1], color='blue', label="data")
axs[0, 0].plot(x, y, '-r', label="line")
axs[0, 0].set_title("Price of cars based on their metrage", fontsize = 15, y=1.02, color = 'green', fontweight = 'bold')
axs[0, 0].legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)
axs[0, 0].set_xlabel("Km", fontsize=14, fontweight = 'bold')
axs[0, 0].set_ylabel("Price", fontsize=14, fontweight = 'bold')
# plot cost
axs[0, 1].plot(np.arange(10) * 10, cost_history, '-r', label="cost")
axs[0, 1].set_title("Cost per iteration", fontsize=15, y=1.02, color='green', fontweight='bold')
axs[0, 1].legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)
axs[0, 1].set_xlabel("Iteration", fontsize=14, fontweight='bold')
axs[0, 1].set_ylabel("Cost", fontsize=14, fontweight='bold')
# plot theta0 history
axs[1, 0].plot(np.arange(10) * 10, theta0_history, '-r', label="Y-intercept")
axs[1, 0].set_title("Y-intercept per iteration", fontsize=15, y=1.02, color='green', fontweight='bold')
axs[1, 0].legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)
axs[1, 0].set_xlabel("Iteration", fontsize=14, fontweight='bold')
axs[1, 0].set_ylabel("Y-intercept", fontsize=14, fontweight='bold')
# plot theta0 history
axs[1, 1].plot(np.arange(10) * 10, theta1_history, '-r', label="Slope")
axs[1, 1].set_title("Slope per iteration", fontsize=15, y=1.02, color='green', fontweight='bold')
axs[1, 1].legend(fancybox=True, framealpha=1, shadow=True, borderpad=1)
axs[1, 1].set_xlabel("Iteration", fontsize=14, fontweight='bold')
axs[1, 1].set_ylabel("Slope", fontsize=14, fontweight='bold')
plt.tight_layout()
plt.tight_layout()
plt.show()
def rmse(size, y_pred, y_actual):
# mean squared error
mse = np.sum((y_pred - y_actual) ** 2)
# root mean squared error
# size is the number of training examples
return(np.sqrt(mse / size))
def determintion_coeff(y_pred, y_actual):
# sum of square of residuals
ssr = np.sum((y_pred - y_actual) ** 2)
# total sum of squares
sst = np.sum((y_actual - np.mean(y_actual)) ** 2)
# R2 score
return(1 - (ssr / sst))
def cost_function(theta, x, y, size):
cost = (theta[0] + x * theta[1] - y) ** 2
cost /= 2 * size
return(np.sum(cost))
def scale_data(x):
return (x - np.mean(x)) / np.std(x)
def descale_data(x, x_ref):
return x * np.std(x_ref) + np.mean(x_ref)
def predict(x, theta):
return theta[0] + theta[1] * x
def learning(x, y, size, alpha, iterations):
theta = np.zeros(2)
cost_history, theta0_history, theta1_history = [], [], []
for i in range(0, iterations):
tmp_theta = np.zeros(2)
for j in range(0, size):
tmp_theta[0] += predict(x[j], theta) - y[j]
tmp_theta[1] += ((predict(x[j], theta) - y[j]) * x[j])
theta[0] -= (tmp_theta[0] * alpha) / size
theta[1] -= (tmp_theta[1] * alpha) / size
if i % 10 == 0:
cost_history.append(cost_function(theta, x, y, size))
theta0_history.append(theta[0])
theta1_history.append(theta[1])
return theta, cost_history, theta0_history, theta1_history
if __name__ == '__main__':
# Read data from file 'filename.csv'
data = pd.read_csv("data.csv")
# convert dataframe to matrix
data = np.array(data)
size = np.shape(data)[0]
# # plt.scatter(data[:, 0], data[:, 1], color='blue')
# Learning
iterations = 100
learning_rate = 0.3
x = scale_data(data[:, 0])
y = scale_data(data[:, 1])
theta, cost_history, theta0_history, theta1_history = learning(x, y, size, learning_rate, iterations)
# use data
y = predict(x, theta)
x = descale_data(x, data[:, 0])
y = descale_data(y, data[:, 1])
# performance
rmse = rmse(size, y, data[:, 1])
print("RMSE PERFORMANCE = {:.3f}".format(rmse))
print("R^2 = {:.3f}".format(determintion_coeff(data[:, 1], y)))
# Plot
display_plot(data, x, y, cost_history, theta0_history, theta1_history)
# save data
m = (y[0] - y[1]) / (x[0] - x[1])
b = m * x[0] * -1 + y[0]
theta = [b, m]
np.savetxt("theta.txt", theta) |
986,029 | 69b54afc5ae3cd3a8b4374470189d0b9aa0f9dd6 | # External modules
import numpy as np
# Local modules
from .. import geo_utils
from .baseConstraint import GeometricConstraint
class GearPostConstraint(GeometricConstraint):
"""
This class is used to represet a single volume constraint. The
parameter list is explained in the addVolumeConstaint() of
the DVConstraints class
"""
def __init__(
self,
name,
wimpressCalc,
up,
down,
thickLower,
thickUpper,
thickScaled,
MACFracLower,
MACFracUpper,
DVGeo,
addToPyOpt,
compNames,
):
super().__init__(name, None, None, None, None, DVGeo, addToPyOpt)
self.wimpress = wimpressCalc
self.thickLower = thickLower
self.thickUpper = thickUpper
self.thickScaled = thickScaled
self.MACFracLower = MACFracLower
self.MACFracUpper = MACFracUpper
self.coords = np.array([up, down])
# First thing we can do is embed the coordinates into DVGeo
# with the name provided:
self.DVGeo.addPointSet(self.coords, self.name, compNames=compNames)
# Compute the reference length
self.D0 = np.linalg.norm(self.coords[0] - self.coords[1])
def evalFunctions(self, funcs, config):
# Update the gear post locations
self.coords = self.DVGeo.update(self.name, config=config)
# Compute the thickness constraint
D = np.linalg.norm(self.coords[0] - self.coords[1])
if self.thickScaled:
D = D / self.D0
# Compute the values we need from the wimpress calc
wfuncs = {}
self.wimpress.evalFunctions(wfuncs)
# Now the constraint value is
postLoc = 0.5 * (self.coords[0, 0] + self.coords[1, 0])
locCon = (postLoc - wfuncs["xLEMAC"]) / wfuncs["MAC"]
# Final set of two constrains
funcs[self.name + "_thick"] = D
funcs[self.name + "_MAC"] = locCon
def evalFunctionsSens(self, funcsSens, config):
"""
Evaluate the sensitivity of the functions this object has and
place in the funcsSens dictionary
Parameters
----------
funcsSens : dict
Dictionary to place function values
"""
nDV = self.DVGeo.getNDV()
if nDV > 0:
wfuncs = {}
self.wimpress.evalFunctions(wfuncs)
wSens = {}
self.wimpress.evalFunctionsSens(wSens)
# Accumulate the derivative into p1b and p2b
p1b, p2b = geo_utils.eDist_b(self.coords[0, :], self.coords[1, :])
if self.thickScaled:
p1b /= self.D0
p2b /= self.D0
funcsSens[self.name + "_thick"] = self.DVGeo.totalSensitivity(
np.array([[p1b, p2b]]), self.name, config=config
)
# And now we need the sensitivity of the conLoc calc
p1b[:] = 0
p2b[:] = 0
p1b[0] += 0.5 / wfuncs["MAC"]
p2b[0] += 0.5 / wfuncs["MAC"]
tmpSens = self.DVGeo.totalSensitivity(np.array([[p1b, p2b]]), self.name, config=config)
# And we need the sensitivity of conLoc wrt 'xLEMAC' and 'MAC'
postLoc = 0.5 * (self.coords[0, 0] + self.coords[1, 0])
for key in wSens["xLEMAC"]:
tmpSens[key] -= wSens["xLEMAC"][key] / wfuncs["MAC"]
tmpSens[key] += wfuncs["xLEMAC"] / wfuncs["MAC"] ** 2 * wSens["MAC"][key]
tmpSens[key] -= postLoc / wfuncs["MAC"] ** 2 * wSens["MAC"][key]
funcsSens[self.name + "_MAC"] = tmpSens
def addConstraintsPyOpt(self, optProb):
"""
Add the constraints to pyOpt, if the flag is set
"""
if self.addToPyOpt:
optProb.addCon(
self.name + "_thick", lower=self.thickLower, upper=self.thickUpper, wrt=self.DVGeo.getVarNames()
)
optProb.addCon(
self.name + "_MAC", lower=self.MACFracLower, upper=self.MACFracUpper, wrt=self.DVGeo.getVarNames()
)
def writeTecplot(self, handle):
raise NotImplementedError()
|
986,030 | 27f5ae9f403104800b20185a378421cb22a91265 | from django.shortcuts import render, redirect
from django.contrib.auth.models import User
from django.contrib.auth import login, logout, authenticate
from django.contrib.auth.forms import UserCreationForm
# Create your views here.
def home(request):
if request.POST.get('login'):
username = request.POST['username']
password = request.POST['password']
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
else:
print('Error logging in')
elif request.POST.get('wordsfinder'):
dictionary = open("wordsfinder/dictionary.txt", "r").read().split('\n')
letters = list(request.POST['word'].lower()) + list(request.POST['word'].upper())
wordInput = request.POST['word']
wordList = []
for word in dictionary:
if len(word) > len(letters):
continue
if all(letters.count(char) >= word.count(char) for char in word):
wordList.append(word)
return render(request, 'wordsfinder/index.html', {
'words': wordList,
'word': wordInput
})
elif request.POST.get('logout'):
logout(request)
return render(request, 'wordsfinder/index.html', {
})
def signup(request):
if request.POST.get('signup'):
form = UserCreationForm(request.POST)
if form.is_valid():
form.save()
username = form.cleaned_data.get('username')
raw_password = form.cleaned_data.get('password1')
user = authenticate(username=username, password=raw_password)
login(request, user)
return redirect('/')
else:
form = UserCreationForm()
return render(request, 'wordsfinder/signup.html', {'form': form})
# def getwords(request):
|
986,031 | d9619ab8fc76089a68ed83345549f387d2800c2a | # -*- coding: utf-8 -*-
# mors alfabesi çözümü
"""
mors= {
'A': '.-', 'B': '-...', 'C': '-.-.', 'D': '-..', 'E': '.', 'F': '..-.',
'G': '--.', 'H': '....', 'I': '..', 'J': '.---', 'K': '-.-', 'L': '.-..',
'M': '--', 'N': '-.', 'O': '---', 'P': '.--.', 'Q': '--.-', 'R': '.-.',
'S': '...', 'T': '-', 'U': '..-', 'V': '...-', 'W': '.--', 'X': '-..-',
'Y': '-.--', 'Z': '--..', ' ': ' ', '0': '-----',
'1': '.----', '2': '..---', '3': '...--', '4': '....-', '5': '.....',
'6': '-....', '7': '--...', '8': '---..', '9': '----.',
'&': '.-...', "'": '.----.', '@': '.--.-.', ')': '-.--.-', '(': '-.--.',
':': '---...', ',': '--..--', '=': '-...-', '!': '-.-.--', '.': '.-.-.-',
'-': '-....-', '+': '.-.-.', '"': '.-..-.', '?': '..--..', '/': '-..-.'
}
word = input("enter a message: ").upper()
my_space = ""
for i in word:
if i != " ":
my_space += mors[i] + " "
else:
my_space += " "
print(my_space)
"""
# Write a string that contains only `(`, `)`, `{`, `}`, `[` and `]`:
"""
a=input("Write a string that contains only `(`, `)`, `{`, `}`, `[` and `]`:")
b=[]
c=("{[()]}")
left=set("{[(")
dict={"(" : ")", "[" : "]", "{" : "}"}
stack=[]
for i in a:
if i in c:
b.append(i)
print(b)
for i in b:
if i in left:
stack.append(i)
elif stack and i == dict[stack[-1]]:
stack.pop()
else:
print(False)
if stack==[]:
print(True)
# joseph hoca çözüm
s = "{[[()]]}"
def is_valid(s):
while "()" in s or "[]" in s or "{}" in s:
s = s.replace("()", "").replace("[]","").replace("{}","")
return s == ""
print(is_valid("{[]}"))
print(is_valid("{[[]}{"))
"""
# max profit
"""
def buy_and_sell(arr):
max_profit = 0
for i in range(len(arr) - 1):
for j in range(i, len(arr)):
buy_price, sell_price = arr[i], arr[j]
max_profit = max(max_profit, sell_price - buy_price)
return max_profit
print(buy_and_sell([1,15,30]))
"""
"""
def parrot_trouble(talking,hour):
if 0<hour<24:
if talking == True and (hour<6 or hour>21):
return True
else:
return False
else:
return f"your hour value is wrong, enter hour 0-23"
print(parrot_trouble(True, 15))
print(parrot_trouble(True, 32))
"""
liste = []
for i in range(24):
liste.append(i**2)
print(liste) |
986,032 | 5f34c5c9975ff30bf382455f800b64c35067ea9a | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def detectCycle(self, head: ListNode) -> ListNode:
slow = head
fast = head
finder = head
while(True):
if fast is None or fast.next is None or fast.next.next is None:
return None
slow = slow.next
fast = fast.next.next
if fast == slow:
while(True):
if finder == fast:
return finder
else:
finder = finder.next
|
986,033 | 36f0e1654b904ff60d290588997361d27ed96be1 | def main():
# Setup stuff
first_name = "Anna"
last_name = "Jones"
age = 34
msg = 'Hi {0}, your full name is {0} {1} and you are {2} years old'.format(first_name, last_name, age)
print(msg)
# We can also algin text
msg = '{:>10}'.format(first_name,)
print(msg)
# And we can pad
msg = '{:10}!'.format(first_name,)
print(msg)
# Format floating point numbers
# In this case 6 characters padding with 0
msg = '{:06.2f}'.format(3.141592653589793,)
print(msg)
# You can also do things you can't do with the old style like
# Center text
msg = '>{:^16}<'.format(first_name)
print(msg)
if __name__ == '__main__':
main() |
986,034 | a836f1b68cd25d844966a13807f4814bce488630 | # aardvark (c) 2016 phreaklets
# Inspired by:
# - http://askldjd.com/2014/01/15/a-reasonably-fast-python-ip-sniffer/
# - http://www.binarytides.com/python-packet-sniffer-code-linux/
import getopt
import datetime
import time
import sys
import requests
import simplejson
from netaddr import IPAddress
import logging
import logging.handlers as handlers
# turn off those irritating IPv6 warning messages
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
from contextlib import contextmanager
import datetime
import sqlite3
import socket, struct, os, array
from scapy.all import ETH_P_ALL
from scapy.all import select
from scapy.all import MTU
from scapy.all import load_module
class SizedTimedRotatingFileHandler(handlers.TimedRotatingFileHandler):
"""
Taken from here: http://www.blog.pythonlibrary.org/2014/02/11/python-how-to-create-rotating-logs/
Handler for logging to a set of files, which switches from one file
to the next when the current file reaches a certain size, or at certain
timed intervals
"""
def __init__(self, filename, mode='a', maxBytes=0, backupCount=0, encoding=None,
delay=0, when='h', interval=1, utc=False):
# If rotation/rollover is wanted, it doesn't make sense to use another
# mode. If for example 'w' were specified, then if there were multiple
# runs of the calling application, the logs from previous runs would be
# lost if the 'w' is respected, because the log file would be truncated
# on each run.
if maxBytes > 0:
mode = 'a'
handlers.TimedRotatingFileHandler.__init__(
self, filename, when, interval, backupCount, encoding, delay, utc)
self.maxBytes = maxBytes
def shouldRollover(self, record):
"""
Determine if rollover should occur.
Basically, see if the supplied record would cause the file to exceed
the size limit we have.
"""
if self.stream is None: # delay was set...
self.stream = self._open()
if self.maxBytes > 0: # are we rolling over?
msg = "%s\n" % self.format(record)
self.stream.seek(0, 2) #due to non-posix-compliant Windows feature
if self.stream.tell() + len(msg) >= self.maxBytes:
return 1
t = int(time.time())
if t >= self.rolloverAt:
return 1
return 0
# logging
def log_setup(log_file=None):
formatter = logging.Formatter(
'%(asctime)s aardvark [%(process)d]: %(message)s',
'%b %d %H:%M:%S')
formatter.converter = time.gmtime # if you want UTC time
logger = logging.getLogger()
if log_file:
log_handler=SizedTimedRotatingFileHandler(
log_file, maxBytes=52428800, backupCount=5,
when='s',interval=86400,
#encoding='bz2', # uncomment for bz2 compression
)
else:
log_handler=logging.StreamHandler(sys.stdout)
log_handler.setFormatter(formatter)
logger.addHandler(log_handler)
logger.setLevel(logging.INFO)
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
class DbConnector:
conn = None
@contextmanager
def get_cursor(self):
cursor = self.conn.cursor()
try:
yield cursor
self.conn.commit()
finally:
cursor.close()
def initDB(self, name):
# Create tables if db doesn't exist
logging.debug("Initializing DB: %s" % name)
try:
with self.get_cursor() as cur:
cur.execute('''CREATE TABLE aardvark(srcip text, ethsrc text, dstip text, vendor text, srcport integer, dstport integer, ttl text, pof text, timefirstseen text, timelastseen text)''')
except sqlite3.Error, e:
logging.debug("initDB failed: %s" % e)
def __init__(self, dbname):
logging.debug("Connecting to DB %s" % dbname)
if (self.conn is None):
try:
if not os.path.isfile(dbname):
logging.debug("DB does not exist")
self.conn = sqlite3.connect(dbname)
self.initDB(dbname)
else:
logging.debug("DB already exists")
self.conn = sqlite3.connect(dbname)
except sqlite3.Error, e:
logging.error("DB connection failed: %s" % e)
def isipaddrindb(self, ipaddr):
try:
with self.get_cursor() as cur:
cur.execute("select srcip from aardvark where srcip=:ipaddress", {"ipaddress":str(ipaddr)})
row = cur.fetchone()
if row is not None:
logging.debug("IP source address %s is in the database" % ipaddr)
return True
else:
logging.debug("IP source address %s is not in the database" % ipaddr)
return False
except sqlite3.Error, e:
logging.debug("isipaddrindb failed: %s" % e)
def addhost(self, ethsrc, vendor, srcip, dstip, sport, dport, pof):
with self.get_cursor() as cur:
cur.execute("INSERT INTO aardvark VALUES (?,?,?,?,?,?,?,?,?,?)", (str(srcip), str(ethsrc), str(dstip), str(vendor), sport, dport, "", str(pof), datetime.datetime.now(), datetime.datetime.now()))
def addttl(self, ipaddr, ttl):
with self.get_cursor() as cur:
cur.execute("UPDATE aardvark SET ttl=? WHERE srcip=?", (str(ttl), str(ipaddr)))
def refreshtimestamp(self, ipaddr):
with self.get_cursor() as cur:
cur.execute("UPDATE aardvark SET timelastseen=? WHERE srcip=?", (datetime.datetime.now(), str(ipaddr)))
def close_conn(self):
self.conn.close()
class IPSniff:
def __init__(self, interface_name, on_ip_incoming, on_ip_outgoing):
self.interface_name = interface_name
self.on_ip_incoming = on_ip_incoming
self.on_ip_outgoing = on_ip_outgoing
# The raw in (listen) socket is a L2 raw socket that listens
# for all packets going through a specific interface.
self.ins = socket.socket(
socket.AF_PACKET, socket.SOCK_RAW, socket.htons(ETH_P_ALL))
self.ins.setsockopt(socket.SOL_SOCKET, socket.SO_RCVBUF, 2**30)
self.ins.bind((self.interface_name, ETH_P_ALL))
def __process_ipframe(self, pkt_type, ip_header, src_eth_addr, payload):
# Extract the 20 bytes IP header, ignoring the IP options
fields = struct.unpack("!BBHHHBBHII", ip_header)
version_ihl = fields[0]
version = version_ihl >> 4
ihl = version_ihl & 0xF
iph_length = ihl * 4
dummy_hdrlen = fields[0] & 0xf
iplen = fields[2]
ttl = fields[5]
protocol = fields[6]
if protocol == 6:
ip_src = payload[12:16]
ip_dst = payload[16:20]
ip_frame = payload[0:iplen]
t = iph_length
tcp_header = payload[t:t+20]
#now unpack them :)
tcph = struct.unpack('!HHLLBBHHH' , tcp_header)
src_port = tcph[0]
dst_port = tcph[1]
if pkt_type == socket.PACKET_OUTGOING:
if self.on_ip_outgoing is not None:
self.on_ip_outgoing(src_eth_addr, ip_src, ip_dst, src_port, dst_port, ttl, ip_frame)
else:
if self.on_ip_incoming is not None:
self.on_ip_incoming(src_eth_addr, ip_src, ip_dst, src_port, dst_port, ttl, ip_frame)
#Convert a string of 6 characters of ethernet address into a dash separated hex string
def eth_addr (self, a) :
b = "%.2x:%.2x:%.2x:%.2x:%.2x:%.2x" % (ord(a[0]) , ord(a[1]) , ord(a[2]), ord(a[3]), ord(a[4]) , ord(a[5]))
return b
def recv(self):
load_module("p0f")
while True:
pkt, sa_ll = self.ins.recvfrom(MTU)
prnp0f(pkt)
if type == socket.PACKET_OUTGOING and self.on_ip_outgoing is None:
continue
elif self.on_ip_outgoing is None:
continue
if len(pkt) <= 0:
break
eth_header = struct.unpack("!6s6sH", pkt[0:14])
dummy_eth_protocol = socket.ntohs(eth_header[2])
if eth_header[2] != 0x800 :
continue
src_eth_addr = self.eth_addr(pkt[6:12])
ip_header = pkt[14:34]
payload = pkt[14:]
self.__process_ipframe(sa_ll[2], ip_header, src_eth_addr, payload)
def getttl(dbconn, ttl, ip):
# Get TTL
if ttl < 64 and ttl > 49:
logging.debug("pkt most likely from Linux-based system")
dbconn.addttl(ip, "Linux")
elif ttl < 128 and ttl > 113:
logging.debug("pkt most likely from Windows-based system")
dbconn.addttl(ip, "Windows")
else:
logging.debug("pkt does not match")
dbconn.addttl(ip, "Unknown")
def vendorlookup(ethsrc):
vendor_eth = ethsrc.replace(":", "-")
url = "https://www.macvendorlookup.com/api/v2/%s" % vendor_eth
jsondata = ""
try:
headers = {'Accept': 'application/json'}
response = requests.get(url, headers=headers)
if response is not None:
try:
jsondata = response.json()
except simplejson.decoder.JSONDecodeError:
pass
except requests.exceptions.RequestException or requests.exceptions.ConnectionError:
logging.error("Requests error occured")
if jsondata:
return jsondata[0]['company']
else:
return None
def test_incoming_callback(src_eth_addr, src, dst, src_port, dst_port, ttl, frame):
ipsrc = socket.inet_ntoa(src)
ipdst = socket.inet_ntoa(dst)
if (IPAddress(ipsrc).is_private() and IPAddress(ipsrc).is_unicast()):
logging.debug("Processing IP packet from the private internal range")
if not dbconn.isipaddrindb(ipsrc):
vendor = vendorlookup(src_eth_addr)
logging.info("Looking up info on IP Address: %s MAC Address: %s Vendor: %s" % (ipsrc, src_eth_addr, vendor))
dbconn.addhost(src_eth_addr, vendor, ipsrc, ipdst, src_port, dst_port, "")
getttl(dbconn, ttl, ipsrc)
else:
logging.debug("IP address %s already in DB, refreshing timestamp" % ipsrc)
dbconn.refreshtimestamp(ipsrc)
else:
logging.debug("Non-TCPIP packet")
logging.debug("incoming - src_eth_addr=%s src=%s, dst=%s, frame len = %d"
%(src_eth_addr, socket.inet_ntoa(src), socket.inet_ntoa(dst), len(frame)))
def test_outgoing_callback(src_eth_addr, src, dst, src_port, dst_port, ttl, frame):
ipsrc = socket.inet_ntoa(src)
ipdst = socket.inet_ntoa(dst)
if (IPAddress(ipsrc).is_private() and IPAddress(ipsrc).is_unicast()):
logging.debug("Processing IP packet from the private internal range")
if not dbconn.isipaddrindb(ipsrc):
vendor = vendorlookup(src_eth_addr)
logging.info("Looking up info on IP Address: %s MAC Address: %s Vendor: %s" % (ipsrc, src_eth_addr, vendor))
dbconn.addhost(src_eth_addr, vendor, ipsrc, ipdst, src_port, dst_port, "")
getttl(dbconn, ttl, ipsrc)
else:
logging.debug("IP address %s already in DB, refreshing timestamp" % ipsrc)
dbconn.refreshtimestamp(src_eth_addr)
else:
logging.debug("Non-TCPIP packet")
logging.debug("outgoing - src_eth_addr=%s src=%s, dst=%s, frame len = %d"
%(src_eth_addr, socket.inet_ntoa(src), socket.inet_ntoa(dst), len(frame)))
def main(argv):
global dbconn
iface = ""
log_setup("aardvark.log")
print("Sniffer starting...")
logging.info("Sniffer starting...")
dbconn = DbConnector("aardvark.db")
if len(sys.argv) >= 2:
try:
opts, args = getopt.getopt(argv,"hi:",["interface="])
except getopt.GetoptError:
print('aardvark.py -i <interface>')
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print('aardvark.py -i <interface>')
sys.exit()
elif opt in ("-i", "--interface"):
iface = arg
else:
print("Error in response")
sys.exit()
else:
print("Wrong number of arguments!")
sys.exit()
ip_sniff = IPSniff(iface, test_incoming_callback, test_outgoing_callback)
ip_sniff.recv()
if __name__ == "__main__":
main(sys.argv[1:])
|
986,035 | 330dc2af339364c0be3d2080054ba3629ddfc916 | import keyword
s="valid"
print("Yes") if keyword.iskeyword(s) else print("No")
#print ("Yes") if s.iskeyword() else print("No")
s="gderre"
#print ("Yes") if s.iskeyword() else print("No")
#assert(isidentifier('foo'))
|
986,036 | 2501d4a2b47a39efb11e7ff9506eda0cdb8a10b1 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import random
while True:
s = int(random.randint(1, 3))
if s == 1:
ind = "石头"
elif s == 2:
ind = "剪刀"
elif s == 3:
ind = "布"
m = input("请输入石头,剪刀,布, 输入end结束:")
blist = ["石头", "剪刀", "布"]
if m == "end":
print("结束游戏")
break
elif m not in blist:
print("输入错误:")
elif m == ind:
print("平局")
elif (m == "石头" and ind == "剪刀") or (m == "剪刀" and ind == "布") or (m == "布" and ind == "石头"):
print("电脑出:" + ind + "你赢了")
elif (m == "剪刀" and ind == "石头") or (m == "布" and ind == "剪刀") or (m == "石头" and ind == "布"):
print("电脑出:" + ind + "你输了")
|
986,037 | a719e7528b1f59c7f6c2a507c21ab76d7075d8fb | import sys
filename=sys.argv[1]
infile=open(filename,"r")
outfile=open("google.out","w")
dict={'a':'y','b':'h','c':'e',' ':' ','\n':'\n','d':'s','e':'o','f':'c','g':'v','h':'x','i':'d','j':'u','k':'i','l':'g','m':'l','n':'b','o':'k','p':'r','q':'z','r':'t','s':'n','t':'w','u':'j','v':'p','w':'f','x':'m','y':'a','z':'q'}
count=0
list=[]
line=''
for line in infile:
if count==0:
count+=1
continue
else:
count+=1
out=''
for i in line:
out+=dict[i]
outfile.write ("Case #"+str(count-1)+": "+str(out))
print "Done"
outfile.close()
|
986,038 | 75d79c6a4eb324a17862fe5bd216a9ab07a7d0d4 | import os.path
def get_urls(filename):
if not os.path.isfile(filename):
return None
try:
with open(filename, 'r') as file:
urls = file.readlines()
except:
print("exception occurred in getURLs().")
return None
return [url.strip() for url in urls]
|
986,039 | 9abe945aa23c69cdbfbc07d211319429bcc1cca8 | import pygame
import random
import sys
import time
WIDTH = 800
HEIGHT = 800
COLORS = {
"white": pygame.Color("white"),
"red": pygame.Color("red"),
"green": pygame.Color("green"),
"blue": pygame.Color("blue"),
"black": pygame.Color("black"),
"yellow": pygame.Color("yellow")
}
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Sorting Algoritms Visualizer")
ARR_SIZE = 200
class Element:
def __init__(self, value):
self.value = value
self.color = COLORS["white"]
self.width = (WIDTH / ARR_SIZE)
self.height = self.value
class Solver:
def __init__(self, arr):
self.arr = arr
def bubble_sort(self):
for i in range(len(self.arr)):
for j in range(0, len(self.arr) - i - 1):
if (self.arr[j].value > self.arr[j + 1].value):
self.arr[j], self.arr[j + 1] = self.arr[j + 1], self.arr[j]
self.arr[len(self.arr) - 1 - i].color = COLORS["green"]
draw_arr(self.arr)
def selection_sort(self):
for i in range(len(self.arr)):
min_idx = i
for j in range(i + 1, len(self.arr)):
if self.arr[min_idx].value > self.arr[j].value:
min_idx = j
self.arr[i], self.arr[min_idx] = self.arr[min_idx], self.arr[i]
self.arr[i].color = COLORS["green"]
draw_arr(self.arr)
def insertion_sort(self):
self.arr[0].color = COLORS["green"]
for i in range(1, len(self.arr)):
key = self.arr[i]
key.color = COLORS["green"]
j = i - 1
while j >= 0 and key.value < self.arr[j].value:
self.arr[j + 1] = self.arr[j]
j -= 1
self.arr[j + 1] = key
draw_arr(self.arr)
def merge_sort(self):
current_size = 1
while current_size < len(self.arr) - 1:
left = 0
while left < len(self.arr) - 1:
mid = min((left + current_size - 1), (len(self.arr) - 1))
right = ((2 * current_size + left - 1, len(self.arr) - 1)
[2 * current_size + left - 1 > len(self.arr) - 1])
self.merge(left, mid, right)
left = left + current_size * 2
draw_arr(self.arr)
current_size = 2 * current_size
for elem in self.arr:
elem.color = COLORS["green"]
def merge(self, l, m, r):
n1 = m - l + 1
n2 = r - m
L = [0] * n1
R = [0] * n2
for i in range(0, n1):
L[i] = self.arr[l + i]
for i in range(0, n2):
R[i] = self.arr[m + i + 1]
i, j, k = 0, 0, l
while i < n1 and j < n2:
if L[i].value > R[j].value:
self.arr[k] = R[j]
j += 1
else:
self.arr[k] = L[i]
i += 1
k += 1
while i < n1:
self.arr[k] = L[i]
i += 1
k += 1
while j < n2:
self.arr[k] = R[j]
j += 1
k += 1
def bogo_sort(self):
def is_sorted(arr):
return all(arr[i].value < arr[i + 1].value for i in range(len(arr) - 1))
while not is_sorted(self.arr):
random.shuffle(self.arr)
draw_arr(self.arr)
def draw_arr(arr):
for event in pygame.event.get():
if event.type == pygame.QUIT:
sys.exit()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_ESCAPE:
sys.exit()
screen.fill(COLORS["black"])
x = 0
for elem in arr:
pygame.draw.rect(screen, elem.color,
(x, HEIGHT - elem.height, elem.width, elem.height))
x += elem.width
pygame.time.delay(100)
pygame.display.update()
def main():
arr = []
for i in range(1, ARR_SIZE + 1):
val = random.randint(1, HEIGHT)
arr.append(Element(val))
random.shuffle(arr)
solver = Solver(arr)
solver.bubble_sort()
while True:
draw_arr(arr)
main()
|
986,040 | 9ad353484bc754276253233100ae7e6f19cd95d1 | from random import randint
print("""
************************************************************
* - Magic number Rules - *
* A random number is taken between 1 and 10 and you've 4 *
* try to take the right number *
* *
************************************************************
""")
lifes = 4
winning_number = randint(1, 10)
win = False
while lifes > 0 and not win:
ok = False
while not ok:
try:
user_number = input("What is the magic number : ")
user_number = int(user_number)
except ValueError:
print("Error : Please enter a number")
ok = False
continue
if user_number < 0 or user_number > 10:
print("Error : The number should be between 1 and 10")
ok = False
continue
else:
lifes -= 1
if user_number > winning_number:
print("smaller")
elif user_number < winning_number:
print("higher")
else:
win = True
ok = True
if lifes > 0 and not win:
print(f"{ lifes } remaining left \n")
if win:
print("Congratulations ! You win.")
else :
print("Game over ! You lose.") |
986,041 | 81b0ab075af00d71c568edfb3a708a0d0cea2bc6 | """Python3 implementation of CRAM.
To use macros one must put the code in an own file and create a second file (the launcher) which activates MacroPy and then imports the file where the macros are used.
E. g. if you have a file target.py which contains your code, create a file run.py:
#!/usr/bin/env python
import macropy.activate
import target
Now launch run.py to start your program.
Modules:
designator -- implementation of designators.
fluent -- implementation of fluents and the whenever macro.
helper -- implementation of helper classes and functions for internal usage only.
language -- implementation of the CRAM language.
process_module -- implementation of process modules.
"""
|
986,042 | 39117c8097fd3885818922910141164887a75b4e | # Generated by Django 2.0.1 on 2018-09-09 12:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('scores', '0024_auto_20180909_1939'),
]
operations = [
migrations.RemoveField(
model_name='total',
name='student',
),
migrations.RemoveField(
model_name='assignment',
name='begin_date',
),
migrations.AddField(
model_name='schedule',
name='term',
field=models.IntegerField(blank=True, choices=[(1, '第一学年上学期'), (2, '第一学年下学期'), (3, '第二学年上学期'), (4, '第二学年下学期'), (5, '第三学年上学期'), (6, '第三学年下学期')], null=True),
),
migrations.AddField(
model_name='schedule',
name='year',
field=models.DateField(blank=True, null=True),
),
migrations.AlterField(
model_name='assignment',
name='create_date',
field=models.DateField(auto_now_add=True),
),
migrations.DeleteModel(
name='Total',
),
]
|
986,043 | 0ea51c92884de9539c81f778774502f8cbf6ff08 | from . import dedicated, pooled
__all__ = ["dedicated", "pooled"]
|
986,044 | 1ad958ad7b494f71bce773c2641d7bf714ae9055 | import os, sys
import urllib, json
import requests
import boto3
import crypto_client
credentials = crypto_client.get_credentials("".join([os.environ["AUTO_ID_API_ENDPOINT"], "/Stage/verify"]), "".join(["secretsmanager,",os.environ["AUTO_ID_SECRET_NAME"]]))
parts = credentials.split(',')
sts_client = boto3.client('sts', aws_access_key_id=parts[0], aws_secret_access_key=parts[1])
assumed_role_object = sts_client.assume_role(
RoleArn=os.environ["ASSUME_ROLE_ARN"],
RoleSessionName=os.environ["ASSUME_ROLE_SESSION_NAME"]
)
credentials = {
"sessionId": assumed_role_object["Credentials"]["AccessKeyId"],
"sessionKey": assumed_role_object["Credentials"]["SecretAccessKey"],
"sessionToken": assumed_role_object["Credentials"]["SessionToken"]
}
json_string_with_temp_credentials = json.dumps(credentials)
request_parameters = "?Action=getSigninToken"
request_parameters += "&SessionDuration=43200"
request_parameters += "&Session=" + urllib.parse.quote_plus(json_string_with_temp_credentials)
request_url = "https://signin.aws.amazon.com/federation" + request_parameters
r = requests.get(request_url)
signin_token = json.loads(r.text)
request_parameters = "?Action=login"
request_parameters += "&Issuer=Example.org"
request_parameters += "&Destination=" + urllib.parse.quote_plus(os.environ["VISIT_URL"])
request_parameters += "&SigninToken=" + signin_token["SigninToken"]
request_url = "https://signin.aws.amazon.com/federation" + request_parameters
print(request_url)
|
986,045 | 3efae291a45b67671f7a8c55cee9f86c008fe602 | """Utilities module.
"""
from importlib.machinery import SourceFileLoader
def import_from_file(module_name: str, filepath: str):
"""Imports a module from file.
Args:
module_name (str): Assigned to the module's __name__ parameter (does not
influence how the module is named outside of this function)
filepath (str): Path to the .py file
Returns:
The module
"""
return SourceFileLoader(module_name, filepath).load_module()
|
986,046 | 329a5e42fe6cacf09bd393c03d70843a1ab7a6a0 | class Solution:
def findKthPositive(self, arr: List[int], k: int) -> int:
present = [False]*4000
answer = -1
for i in range(0, len(arr)):
present[arr[i]] = True
curr = 1
for i in range(1, len(present)):
if present[i] == False and curr == k:
answer = i
break
elif present[i] == False and curr < k:
curr += 1
return answer
|
986,047 | 9217175f93e85ccac03414988ccb260ec083703e | #!/usr/bin/env python
import rospy
from std_msgs.msg import Float32
rospy.init_node('publish_radius', anonymous = True)
pub1 = rospy.Publisher('radius', Float32, queue_size = 10)
rate = rospy.Rate(10)
r = 1
while not rospy.is_shutdown():
rospy.loginfo(r)
pub1.publish(r)
rate.sleep()
|
986,048 | 5cbdb308dfd9817a7b0e1f0eef213b1b8e7dc70c | from chainercv.extensions.evaluator.detection_coco_evaluator import DetectionCOCOEvaluator # NOQA
from chainercv.extensions.evaluator.detection_voc_evaluator import DetectionVOCEvaluator # NOQA
from chainercv.extensions.evaluator.instance_segmentation_coco_evaluator import InstanceSegmentationCOCOEvaluator # NOQA
from chainercv.extensions.evaluator.instance_segmentation_voc_evaluator import InstanceSegmentationVOCEvaluator # NOQA
from chainercv.extensions.evaluator.semantic_segmentation_evaluator import SemanticSegmentationEvaluator # NOQA
from chainercv.extensions.vis_report.detection_vis_report import DetectionVisReport # NOQA
|
986,049 | 4d7ad85ea16884c36daa8cc769b9e9d52dd7ac65 | import pandas as pd
def sample_first_name(first_name_file, num_samples):
"""Load the file and get a distribution of first names.
@param first_name_f is the location of the first names.
"""
df = pd.read_csv(first_name_file, header=None)
df.columns = ["name", "gender", "count"]
df = df[(df["count"] > 10)]
names = df["name"].sample(n=num_samples, random_state=2021, replace=True).apply(str.title)
return list(names.values)
def sample_last_name(last_name_file, num_samples):
"""Load the file and get a distribution of last names.
@param last_name_f is the location of the last names.
@return a function that is able to generate a new last name (i.e. endless generator).
"""
df = pd.read_csv(last_name_file)
df = df[~df.name.isna()]
df = df[(df["count"] > 400)]
print(num_samples, len(df))
names = df["name"].sample(n=num_samples, random_state=2021).apply(str.title)
return list(names.values)
def run(input_file, output_file, first_name_f, last_name_f):
patients = pd.read_csv(input_file)
patients = patients[["SUBJECT_ID", "GENDER"]]
subject_ids = set(patients["SUBJECT_ID"].values)
# generate both first and last names
last_names = sample_last_name(last_name_f, len(subject_ids))
first_names = sample_first_name(first_name_f, len(subject_ids))
names = list(zip(first_names, last_names))
print(len(first_names), len(last_names), len(names))
print("Unique Names", len(set(names)))
# add to the data and save
patients["FIRST_NAME"] = first_names
patients["LAST_NAME"] = last_names
patients.sort_values(by="SUBJECT_ID").to_csv(output_file, index=False)
from argparse import ArgumentParser
parser = ArgumentParser()
parser.add_argument("--input-file", required=True)
parser.add_argument("--output-file", required=True)
parser.add_argument("--first-name-f", default="data/yob1950.txt")
parser.add_argument("--last-name-f", default="data/Names_2010Census.csv")
if __name__ == "__main__":
"""
Usage:
- python subject_id_to_name.py --input-file PATIENTS.csv --output-file SUBJECT_ID_to_NAME.csv \
--first-name-f ... --last-name-f ...
Output Format:
SUBJECT_ID,FIRST_NAME,LAST_NAME
249,Eric,Lehman
250,Sarthak,Jain
"""
args = parser.parse_args()
run(args.input_file, args.output_file, args.first_name_f, args.last_name_f)
print(f"Saved Named to {args.output_file}")
|
986,050 | 3631520ba9932a4f1add4cf48b87851625c10012 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# vim:fenc=utf-8
#
# Copyright © Her Majesty the Queen in Right of Canada, as represented
# by the Minister of Statistics Canada, 2019.
#
# Distributed under terms of the license.
import pyarrow as pa
import pyfwfr as pf
import unittest
import warnings
from pyfwfr.tests.common import make_random_fwf, read_bytes
def ignore_numpy_warning(test_func):
"""
Ignore deprecated numpy warning. Official fix from numpy is to ignore the
warning, but unittest warning settings override it and shows the warning.
"""
def do_test(self, *args, **kwargs):
with warnings.catch_warnings():
warnings.filterwarnings('ignore',
message='numpy.ufunc size changed')
test_func(self, *args, **kwargs)
return do_test
class TestPyfwfr(unittest.TestCase):
@ignore_numpy_warning
def test_big(self):
field_widths = []
for i in range(30):
field_widths.append(4)
parse_options = pf.ParseOptions(field_widths)
fwf, expected = make_random_fwf(num_cols=30, num_rows=10000)
table = read_bytes(fwf, parse_options)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
@ignore_numpy_warning
def test_big_encoded(self):
field_widths = []
for i in range(30):
field_widths.append(4)
parse_options = pf.ParseOptions(field_widths)
read_options = pf.ReadOptions(encoding='Big5')
fwf, expected = make_random_fwf(num_cols=30, num_rows=10000,
encoding='big5')
table = read_bytes(fwf, parse_options, read_options=read_options)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_cobol(self):
rows = b'a b c \r\n1A ab 12\r\n33Jcde34\r\n6} fg56\r\n 3Dhij78'
parse_options = pf.ParseOptions([3, 3, 2])
convert_options = pf.ConvertOptions(is_cobol=True)
table = read_bytes(rows, parse_options,
convert_options=convert_options)
assert isinstance(table, pa.Table)
assert table.to_pydict() == {'a': [11, -331, -60, 34],
'b': ['ab', 'cde', 'fg', 'hij'],
'c': [12, 34, 56, 78]}
assert table.column(0).type == 'int64'
def test_convert_options(self):
cls = pf.ConvertOptions
opts = cls()
opts.column_types = {'a': pa.int64(), 'b': pa.float32()}
assert opts.column_types == {'a': pa.int64(), 'b': pa.float32()}
opts.column_types = {'c': 'int16', 'd': 'null'}
assert opts.column_types == {'c': pa.int16(), 'd': pa.null()}
schema = pa.schema([('a', pa.int32()), ('b', pa.string())])
opts.column_types = schema
assert opts.column_types == {'a': pa.int32(), 'b': pa.string()}
opts.column_types = [('a', pa.binary())]
assert opts.column_types == {'a': pa.binary()}
assert opts.strings_can_be_null is False
opts.strings_can_be_null = True
assert opts.strings_can_be_null is True
assert isinstance(opts.null_values, list)
assert '' in opts.null_values
assert 'N/A' in opts.null_values
opts.null_values = ['a', 'b']
assert opts.null_values == ['a', 'b']
assert isinstance(opts.true_values, list)
opts.true_values = ['a', 'b']
assert opts.true_values == ['a', 'b']
assert isinstance(opts.false_values, list)
opts.false_values = ['a', 'b']
assert opts.false_values == ['a', 'b']
assert opts.is_cobol is False
opts.is_cobol = True
assert opts.is_cobol is True
opts.pos_values = {'a': '1', 'b': '2'}
assert opts.pos_values == {'a': '1', 'b': '2'}
opts.neg_values = {'a': 'b', '3': '4'}
assert opts.neg_values == {'a': 'b', '3': '4'}
opts = cls(column_types={'a': pa.null()}, is_cobol=True,
pos_values={'a': '1'}, neg_values={'b': '2'},
null_values=['N', 'nn'], true_values=['T', 'tt'],
false_values=['F', 'ff'], strings_can_be_null=True)
assert opts.column_types == {'a': pa.null()}
assert opts.is_cobol is True
assert opts.pos_values == {'a': '1'}
assert opts.neg_values == {'b': '2'}
assert opts.null_values == ['N', 'nn']
assert opts.true_values == ['T', 'tt']
assert opts.false_values == ['F', 'ff']
assert opts.strings_can_be_null is True
def test_header(self):
rows = b'abcdef'
parse_options = pf.ParseOptions([2, 3, 1])
table = read_bytes(rows, parse_options)
assert isinstance(table, pa.Table)
assert table.num_columns == 3
assert table.column_names == ['ab', 'cde', 'f']
assert table.num_rows == 0
def test_no_header(self):
rows = b'123456789'
parse_options = pf.ParseOptions([1, 2, 3, 3])
read_options = pf.ReadOptions(column_names=['a', 'b', 'c', 'd'])
table = read_bytes(rows, parse_options, read_options=read_options)
assert table.to_pydict() == {'a': [1], 'b': [23],
'c': [456], 'd': [789]}
def test_nulls_bools(self):
rows = b'a b \r\n null N/A \r\n123456 true'
parse_options = pf.ParseOptions([6, 6])
table = read_bytes(rows, parse_options)
assert(table.column(0).type == 'int64')
assert(table.column(1).type == 'bool')
assert table.to_pydict() == {'a': [None, 123456], 'b': [None, True]}
def test_parse_options(self):
cls = pf.ParseOptions
with self.assertRaises(Exception):
opts = cls()
opts = cls([1, 2])
assert opts.field_widths == [1, 2]
opts.field_widths = [1, 2, 3]
assert opts.field_widths == [1, 2, 3]
assert opts.ignore_empty_lines is True
opts.ignore_empty_lines = False
assert opts.ignore_empty_lines is False
opts = cls([1, 2], ignore_empty_lines=False)
assert opts.field_widths == [1, 2]
assert opts.ignore_empty_lines is False
def test_read_options(self):
cls = pf.ReadOptions
opts = cls()
assert opts.encoding == ""
opts.encoding = 'cp1047,swaplfnl'
assert opts.encoding == 'cp1047,swaplfnl'
assert opts.use_threads is True
opts.use_threads = False
assert opts.use_threads is False
assert opts.block_size > 0
opts.block_size = 12345
assert opts.block_size == 12345
assert opts.skip_rows == 0
opts.skip_rows = 5
assert opts.skip_rows == 5
assert opts.column_names == []
opts.column_names = ['ab', 'cd']
assert opts.column_names == ['ab', 'cd']
opts = cls(encoding='abcd', use_threads=False, block_size=1234,
skip_rows=1, column_names=['a', 'b', 'c'])
assert opts.encoding == 'abcd'
assert opts.use_threads is False
assert opts.block_size == 1234
assert opts.skip_rows == 1
assert opts.column_names == ['a', 'b', 'c']
def test_serial_read(self):
parse_options = pf.ParseOptions([4, 4])
read_options = pf.ReadOptions(use_threads=False)
fwf, expected = make_random_fwf() # generate 2 col, width 4 by default
table = read_bytes(fwf, parse_options, read_options=read_options)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_skip_columns(self):
rows = b'a b c \r\n11 ab 123\r\n33 cde456\r\n-60 fg789'
parse_options = pf.ParseOptions([3, 3, 3], skip_columns=[0, 2])
table = read_bytes(rows, parse_options)
assert isinstance(table, pa.Table)
assert table.to_pydict() == {'b': ['ab', 'cde', 'fg']}
def test_small(self):
parse_options = pf.ParseOptions([4, 4])
fwf, expected = make_random_fwf() # generate 2 col, width 4 by default
table = read_bytes(fwf, parse_options)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
def test_small_encoded(self):
parse_options = pf.ParseOptions([4, 4])
read_options = pf.ReadOptions(encoding='Big5')
fwf, expected = make_random_fwf(encoding='big5')
table = read_bytes(fwf, parse_options, read_options=read_options)
assert table.schema == expected.schema
assert table.equals(expected)
assert table.to_pydict() == expected.to_pydict()
|
986,051 | b92aba1ec5497e21475b7be96d599ce9784cd9c1 | def bomberMan(n, grid):
configAtOneSecond = [list(word) for word in grid]
configAtEvenSeconds = [list('O' * len(grid[0])) for word in grid]
configAtThreeSeconds = GetConfigAtThreeSeconds(grid)
if(n % 4 == 1):
return ConvertListOfListToListOfString(configAtOneSecond)
if(n % 2 == 0):
return ConvertListOfListToListOfString(configAtEvenSeconds)
if(n % 3 == 0):
return ConvertListOfListToListOfString(configAtThreeSeconds)
def ConvertListOfListToListOfString(grid):
result = [''.join(l) for l in grid]
return result
def GetConfigAtThreeSeconds(grid):
result = [list('O' * len(grid[0])) for word in grid]
for i in range(0,len(grid)):
for j in range(0,len(grid[i])):
#if there is a bomb at i,j, detonate
if grid[i][j] == 'O':
result[i][j] = '.'
# detonate surrounding bombs as well
if i > 0:
result[i-1][j] = '.'
if i < len(grid) - 1:
result[i+1][j] = '.'
if j > 0:
result[i][j-1] = '.'
if j < len(grid[0]) - 1:
result[i][j+1] = '.'
return result
grid = ['.......',
'...O...',
'....O..',
'.......',
'OO.....',
'OO.....']
print(bomberMan(3, grid)) |
986,052 | c98c97df0bd90c649a2ad4ece7d047442f1badcf | from pickle import load
import sys
with open(sys.argv[1], "rb") as file:
predictions = load(file)
truth, predicted = predictions
for p in predicted:
print(p) |
986,053 | f2a604a01cb967f9678f0b8d237d858a82a25af6 | #!/usr/bin/python
import autosklearn.classification
import sklearn.model_selection
import sklearn.datasets
import sklearn.metrics
X, y = sklearn.datasets.load_digits(return_X_y=True)
X_train, X_test, y_train, y_test = sklearn.model_selection.train_test_split(X, y, random_state=1)
automl = autosklearn.classification.AutoSklearnClassifier(tmp_folder="/tmp/autosklearn_tmp", output_folder="/tmp/autosklearn_output", delete_tmp_folder_after_terminate=False, delete_output_folder_after_terminate=False)
automl.fit(X_train, y_train)
y_hat = automl.predict(X_test)
print("Accuracy score", sklearn.metrics.accuracy_score(y_test, y_hat))
|
986,054 | 810fe90b121d08857f26bd174e8788a3145bb523 | from flask import Flask, request, Response, jsonify
import json
import os
import fnmatch
import tempfile
import shutil
import zipfile
from error import InvalidUsage
from shape_importer.shp2pgsql import shape2pgsql
from shape_importer.tab2pgsql import shape2pgsql as ogr2ogr
from db_utils.postgis import geojson_from_table
from gdal_utils.metadata_finder import get_srid_from_prj, get_encoding_from_dbf
from werkzeug import secure_filename
STATIC_FOLDER = '../client'
app = Flask(__name__, static_folder=STATIC_FOLDER, static_url_path='')
app.config.from_envvar('APP_CONFIG_FILE')
CONN_STRING = 'dbname={dbname} user={user} password={password}'.format(
dbname=app.config['DB_NAME'],
user=app.config['DB_USER'],
password=app.config['DB_PASSWORD']
)
ALLOWED_EXTENSIONS = 'zip'
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def get_shp_prj_dbf_shx_files_from_tree(temp_dir):
files_to_return = {}
patterns = [
'[a-zA-Z]*.shp',
'[a-zA-Z]*.prj',
'[a-zA-Z]*.dbf',
'[a-zA-Z]*.shx'
]
for path, dirs, files in os.walk(os.path.abspath(temp_dir)):
for extension in patterns:
for filename in fnmatch.filter(files, extension):
files_to_return[extension[-3:]] = os.path.join(path, filename)
if not files_to_return or not files_to_return['shp'] or \
not files_to_return['prj'] or \
not files_to_return['dbf'] or \
not files_to_return['shx']:
raise InvalidUsage(
'A shapefile must contain shp, prj, dbf, shx files',
status_code=500
)
return files_to_return
def extract_zip(filestream, temp_dir):
if not filestream or not allowed_file(filestream.filename):
raise InvalidUsage(
'No filestream or no zipfile',
status_code=500
)
with zipfile.ZipFile(filestream, 'r') as z:
z.extractall(temp_dir)
files_to_return = get_shp_prj_dbf_shx_files_from_tree(temp_dir)
return files_to_return
def get_shape_srid_encoding(files, temp_dir):
data = {
'shape': files['shp']
}
data['srid'] = get_srid_from_prj(files['prj'])
data['encoding'] = get_encoding_from_dbf(files['dbf'])
return data
def get_data_from_zipfile(file, temp_dir):
files = extract_zip(file, temp_dir)
data = get_shape_srid_encoding(files, temp_dir)
return data
def get_data_from_files(filestreams, temp_dir):
for file in filestreams:
if file:
filename = secure_filename(file.filename)
file.save(os.path.join(temp_dir, filename))
files = get_shp_prj_dbf_shx_files_from_tree(temp_dir)
data = get_shape_srid_encoding(files, temp_dir)
return data
def get_geojson(request, data):
table_name = shape2pgsql(
app.config, data['shape'], data['srid'], data['encoding']
)
geojson_data = geojson_from_table(CONN_STRING, table_name)
return geojson_data
def get_data_from_request(files_from_request, temp_dir):
files = files_from_request.getlist('file')
data = None
if len(files) is 1:
data = get_data_from_zipfile(files[0], temp_dir)
if len(files) is 4:
data = get_data_from_files(files, temp_dir)
if not data:
raise InvalidUsage(
'Please upload a zip or 4 files(shp, dbf, shx, prj)',
status_code=500
)
return data
@app.route('/api/import/shp2pgsql', methods=['POST'])
def import_shapefile_shp2pgsql():
if request.method != 'POST':
raise InvalidUsage('Method allowed is POST', status_code=405)
temp_dir = tempfile.mkdtemp()
data = get_data_from_request(request.files, temp_dir)
if not data or not data['shape']:
shutil.rmtree(os.path.abspath(temp_dir))
raise InvalidUsage('No data or no shp file found', status_code=500)
geojson_data = get_geojson(request, data)
shutil.rmtree(os.path.abspath(temp_dir))
return Response(
json.dumps([{'data': geojson_data}]), mimetype='application/json')
@app.errorhandler(InvalidUsage)
def handle_invalid_usage(error):
response = jsonify(error.to_dict())
response.status_code = error.status_code
return response
# TODO: Make this work
@app.route('/api/import/ogr2ogr')
def import_shapefile_ogr2ogr(): # pragma: no cover
zip_file = '/vagrant/shapefiles/streetshighways.zip'
filename = create_shapefile(zip_file)
ogr2ogr(app.config, filename)
return '200'
if __name__ == '__main__': # pragma: no cover
app.run(port=4002, host='0.0.0.0', debug=True)
|
986,055 | 5150e3cd9ba4d45619e3f2ad2acef2705ee157c9 | # Copyright 2023 Matrix.org Foundation C.I.C.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import TYPE_CHECKING
from authlib.jose import JsonWebToken, JWTClaims
from authlib.jose.errors import BadSignatureError, InvalidClaimError, JoseError
from synapse.api.errors import Codes, LoginError
from synapse.types import JsonDict, UserID
if TYPE_CHECKING:
from synapse.server import HomeServer
class JwtHandler:
def __init__(self, hs: "HomeServer"):
self.hs = hs
self.jwt_secret = hs.config.jwt.jwt_secret
self.jwt_subject_claim = hs.config.jwt.jwt_subject_claim
self.jwt_algorithm = hs.config.jwt.jwt_algorithm
self.jwt_issuer = hs.config.jwt.jwt_issuer
self.jwt_audiences = hs.config.jwt.jwt_audiences
def validate_login(self, login_submission: JsonDict) -> str:
"""
Authenticates the user for the /login API
Args:
login_submission: the whole of the login submission
(including 'type' and other relevant fields)
Returns:
The user ID that is logging in.
Raises:
LoginError if there was an authentication problem.
"""
token = login_submission.get("token", None)
if token is None:
raise LoginError(
403, "Token field for JWT is missing", errcode=Codes.FORBIDDEN
)
jwt = JsonWebToken([self.jwt_algorithm])
claim_options = {}
if self.jwt_issuer is not None:
claim_options["iss"] = {"value": self.jwt_issuer, "essential": True}
if self.jwt_audiences is not None:
claim_options["aud"] = {"values": self.jwt_audiences, "essential": True}
try:
claims = jwt.decode(
token,
key=self.jwt_secret,
claims_cls=JWTClaims,
claims_options=claim_options,
)
except BadSignatureError:
# We handle this case separately to provide a better error message
raise LoginError(
403,
"JWT validation failed: Signature verification failed",
errcode=Codes.FORBIDDEN,
)
except JoseError as e:
# A JWT error occurred, return some info back to the client.
raise LoginError(
403,
"JWT validation failed: %s" % (str(e),),
errcode=Codes.FORBIDDEN,
)
try:
claims.validate(leeway=120) # allows 2 min of clock skew
# Enforce the old behavior which is rolled out in productive
# servers: if the JWT contains an 'aud' claim but none is
# configured, the login attempt will fail
if claims.get("aud") is not None:
if self.jwt_audiences is None or len(self.jwt_audiences) == 0:
raise InvalidClaimError("aud")
except JoseError as e:
raise LoginError(
403,
"JWT validation failed: %s" % (str(e),),
errcode=Codes.FORBIDDEN,
)
user = claims.get(self.jwt_subject_claim, None)
if user is None:
raise LoginError(403, "Invalid JWT", errcode=Codes.FORBIDDEN)
return UserID(user, self.hs.hostname).to_string()
|
986,056 | 5e6980367e89e4e41595148ad714994da70aaf1b | import RPi.GPIO as GPIO
from time import sleep
GPIO.setmode(GPIO.BCM)
GPIO.setup((20, 21), GPIO.OUT)
try:
while True:
GPIO.output(20, GPIO.HIGH)
sleep(5.0)
GPIO.output(20, GPIO.LOW)
sleep(5.0)
GPIO.output(21, GPIO.HIGH)
sleep(5.0)
GPIO.output(21, GPIO.LOW)
sleep(5.0)
except KeyboardInterrupt:
pass
GPIO.cleanup() |
986,057 | ade5ac502cec6c68140792f309ab101c998f6f7c | from django.apps import AppConfig
class ExperimentsConfig(AppConfig):
name = 'experiments'
verbose_name = "Experiments"
def ready(self):
from experiments.signals import ( # noqa
new_experiment,
experiment_pre_deleted,
experiment_post_deleted,
new_experiment_job,
new_experiment_job_status,
new_experiment_metric
)
|
986,058 | 4e0a879982fa967632ee34645bfdafffb54df5ce | from django.db import models
class Piece(models.Model):
class Meta:
managed = True
piece_id = models.AutoField(primary_key=True)
piece_name = models.CharField(max_length=100)
def __str__(self):
return self.piece_name |
986,059 | 1030a8afc593b3c5a4c0fd2cb514b9b050d7864e | def notas(*n, sit=False):
"""
-> Função para analisar notas e situação de aluno.
:param n: uma ou mais notas (aceita mais de uma)
:params sit: valor opcional, indicando se mostra ou não situação Aprovado, Reprovado ou Recuperação
:return: retorna um dicionario com o total, a maior, a menor, a media e a situação das notas
"""
r = dict()
r['total'] = len(n)
r['maior'] = max(n)
r['menor'] = min(n)
r['media'] = sum(n) / len(n)
if sit:
if r['media'] >= 7:
r['situacao'] = 'APROVADO'
elif r['media'] >= 5:
r['situacao'] = 'RECUPERAÇÃO'
else:
r['situacao'] = 'REPROVADO'
return r
resp = notas(2.5, 1.8, 2.2, sit=True)
print(resp)
help(notas) |
986,060 | aa843db942dd4c2f9c26c676cdaa93c9820b08b0 | """A web application for tracking projects, students, and student grades."""
from flask import Flask, request, render_template
import hackbright
app = Flask(__name__)
@app.route("/")
def display_home():
return render_template("home.html")
@app.route("/student-search")
def get_student_form():
"""Show form for searching for a student."""
return render_template("student_search.html")
@app.route("/student")
def get_student():
"""Show information about a student."""
github = request.args.get('github')
first, last, github = hackbright.get_student_by_github(github)
title_grade_list = hackbright.get_grades_by_github(github)
html = render_template("student_info.html",
first=first,
last=last,
github=github,
title_grade_list=title_grade_list)
return html
@app.route("/add-student")
def show_add_student_form():
"""Show form for to add a student."""
return render_template("add_student_form.html")
@app.route("/confirm-added", methods=['POST'])
def add_student():
"""Adds a student to our db"""
# import pdb; pdb.set_trace()
if request.method == "POST":
first = request.form.get('first_name')
last = request.form.get('last_name')
github = request.form.get('github')
hackbright.make_new_student(first, last, github)
html = render_template("added_student_confirmation.html",
first=first,
last=last,
github=github)
return html
@app.route("/project", methods=['GET'])
def get_project_info():
""" Show the info for a project title """
title = request.args.get('project')
project_info_list = hackbright.get_project_by_title(title)
html = render_template("project_info.html",
project_info_list=project_info_list)
return html
if __name__ == "__main__":
hackbright.connect_to_db(app)
app.run(debug=True)
|
986,061 | 8fa6eec37a6d5f081f192ac63f59e0f20e98273b | #!/usr/bin/python
from gstate import GState
from ball import Ball
from paddle import Paddle
import random as r
from graphics import *
import time
disc_div = 12
Ne = 128 #72
gamma = .72#0.373 (2/0.3)^gamma = 0.1
alpha_fac = float(771) #434
#Let Navg = game_ct*(2/0.3)*10/2/(12*12*12*2*3+1)
#Ne = Navg/4 (explore 1/3 of time)
#alpha_fac = Navg/9 (last learning should be weighted 1/10)
'''Good Result 1: 9.08 avg
Ne = 100
gamma = 0.75
alpha_fac = 1000
'''
windows_size = 640
refresh_rate = 1.0/30.0
pos_actions = [-1,0,1]
pos_vel_x = [-1,1]
pos_vel_y = [-1,0,1]
def trainGame(game_ct = 100000):
ct = 0
bounces = 0
max_bounces = 0
ball = Ball()
paddle = Paddle()
pos_actions = [-1,0,1]
state_space = [[[[[None for x in range(disc_div)] for y in pos_vel_y] for z in pos_vel_x] for w in range(disc_div)] for v in range(disc_div)]
fail_state = GState(disc_div,0,0,0,0) #Ball x > 1 aka failure
for x in range(disc_div):
for y in range(disc_div):
for velx in pos_vel_x:
for vely in pos_vel_y:
for p_y in range(disc_div):
state_space[x][y][pos_vel_x.index(velx)][pos_vel_y.index(vely)][p_y] = GState(x,y,velx,vely,p_y)
print game_ct
prev_state = None
prev_action = None
prev_R = None
while(ct < game_ct):
cur_state = None
disc_vals = DiscretizeCurrentState(ball,paddle)
if(disc_vals[0]>=disc_div):
cur_state = fail_state
else:
cur_state = state_space[disc_vals[0]][disc_vals[1]][disc_vals[2]][disc_vals[3]][disc_vals[4]]
#cur_state = next_state
if(prev_state!=None):
prev_state.incrN(prev_action)
newQ = prev_state.getQ(prev_action)
newQ = newQ + (alpha_fac/(alpha_fac+prev_state.getN(prev_action)))*(prev_R+gamma*max([cur_state.getQ(ap) for ap in pos_actions])-newQ)
prev_state.setQ(newQ,prev_action)
#print newQ
if(cur_state.isTerminal(disc_div)):
prev_state = None
prev_action = None
prev_R = None
ball.reset()
paddle.reset()
ct+=1
if(bounces>max_bounces):
max_bounces = bounces
bounces = 0
else:
action_values = [Fn(cur_state.getQ(a),cur_state.getN(a)) for a in pos_actions] #[value for action -1, value for action 0, value for action 1]
max_a = max(action_values)
max_action_choices = []
for a in pos_actions:
if(action_values[a+1] == max_a):
max_action_choices.append(a)
r.shuffle(max_action_choices)
prev_action = max_action_choices[0]
paddle.movePaddle(prev_action)
prev_R = ball.updatePos(paddle.getY(),paddle.getHeight())
prev_state = cur_state
if(prev_R>0):
bounces+=1
if(float(ct)*100/game_ct == ct*100/game_ct and ct*100/game_ct>0):
print str(ct*100/game_ct)+"%"
#drawGame(ball,paddle,win,cir,pad)
ret = (max_bounces,state_space,fail_state)
return ret
def testGame(state_space, fail_state, test_ct = 1000):
win = GraphWin("1P Pong",windows_size,windows_size)
ball = Ball()
paddle = Paddle()
cir,pad = setUpDrawGame(ball,paddle,win)
ct = 0
total_bounces = 0
prev_state = None
prev_action = None
prev_R = None
while(ct < test_ct):
cur_state = None
disc_vals = DiscretizeCurrentState(ball,paddle)
if(disc_vals[0]>=disc_div):
cur_state = fail_state
else:
cur_state = state_space[disc_vals[0]][disc_vals[1]][disc_vals[2]][disc_vals[3]][disc_vals[4]]
#cur_state = next_state
if(cur_state.isTerminal(disc_div)):
prev_state = None
prev_action = None
prev_R = None
ball.reset()
paddle.reset()
ct+=1
else:
action_values = [cur_state.getQ(a) for a in pos_actions] #[value for action -1, value for action 0, value for action 1]
max_a = max(action_values)
max_action_choices = []
for a in pos_actions:
if(action_values[a+1] == max_a):
max_action_choices.append(a)
r.shuffle(max_action_choices)
prev_action = max_action_choices[0]
paddle.movePaddle(prev_action)
prev_R = ball.updatePos(paddle.getY(),paddle.getHeight())
prev_state = cur_state
if(prev_R>0):
total_bounces+=1
if(test_ct-ct<10):
drawGame(ball,paddle,win,cir,pad)
win.getMouse()
win.close()
ret = float(total_bounces)/test_ct
return ret
def Fn(u,n):
if(n<Ne):
return 100
else:
return u
def DiscretizeCurrentState(ball,paddle):
ball_x,ball_y = ball.getPos()
if(ball_x>1):
return (disc_div,0,0,0,0)
if(ball_x == 1):
ball_x = disc_div-1
else:
ball_x = (int)(ball_x * disc_div)
if(ball_y == 1):
ball_y = disc_div-1
else:
ball_y = (int)(ball_y * disc_div)
vel_x,vel_y = ball.getVel()
if(vel_x>0):
vel_x = 1
else:
vel_x = -1
if(abs(vel_x)<0.65):
vel_x*=0.5
if(vel_y>0.015):
vel_y = 1
elif(vel_y<-0.015):
vel_y = -1
else:
vel_y = 0
paddle_y = paddle.getY()
if(paddle_y == 1-paddle.getHeight()):
paddle_y = disc_div-1
else:
paddle_y = (int)(disc_div*paddle_y/(1-paddle.getHeight()))
return (ball_x,ball_y,pos_vel_x.index(vel_x), pos_vel_y.index(vel_y), paddle_y)
def saveStates(state_space,fail_state, avg):
f_name = str(avg)+'_'+str(Ne)+'_'+str(gamma)+'_'+str(alpha_fac)
f_name = f_name.replace('.','-')
f_name+='.txt'
f_out = open(f_name,'w')
str_out = ''
for x in range(disc_div):
for y in range(disc_div):
for velx in pos_vel_x:
for vely in pos_vel_y:
for p_y in range(disc_div):
cur_state = state_space[x][y][pos_vel_x.index(velx)][pos_vel_y.index(vely)][p_y]
for a in pos_actions:
str_out+=str(cur_state.getQ(a))+' '
str_out = str_out[:-1]
str_out += '\n'
for a in pos_actions:
str_out+=str(fail_state.getQ(a))+' '
str_out = str_out[:-1]
f_out.write(str_out)
f_out.close()
def loadStates(f_name):
f_in = open(f_name,'r')
str_in = []
state_space = [[[[[None for x in range(disc_div)] for y in pos_vel_y] for z in pos_vel_x] for w in range(disc_div)] for v in range(disc_div)]
fail_state = GState(disc_div,0,0,0,0) #Ball x > 1 aka failure
for x in range(disc_div):
for y in range(disc_div):
for velx in pos_vel_x:
for vely in pos_vel_y:
for p_y in range(disc_div):
str_in = f_in.readline().split(' ')
cur_state = GState(x,y,velx,vely,p_y)
for a in pos_actions:
cur_state.setQ(float(str_in[pos_actions.index(a)]),a)
state_space[x][y][pos_vel_x.index(velx)][pos_vel_y.index(vely)][p_y] = cur_state
str_in = f_in.readline().split(' ')
for a in pos_actions:
fail_state.setQ(float(str_in[pos_actions.index(a)]),a)
return state_space, fail_state
def setUpDrawGame(ball,paddle,win):
left = Line(Point(0,0),Point(0,windows_size))
left.setWidth(6)
left.draw(win)
pad = Line(Point(1*windows_size,paddle.getY()*windows_size),Point(windows_size,paddle.getY()*windows_size+paddle.getHeight()*windows_size))
pad.setWidth(10)
pad.draw(win)
t = 0
ball_x, ball_y = ball.getPos()
cir = Circle(Point(ball_x*windows_size,ball_y*windows_size),windows_size/160)
cir.draw(win)
cir.setFill("black")
return cir,pad
#win.close()
def drawGame(ball,paddle,win,cir,pad):
ball_x, ball_y = ball.getPos()
c_x, c_y = (cir.getCenter().getX(),cir.getCenter().getY())
cir.move(ball_x*windows_size-c_x,ball_y*windows_size-c_y)
p_y = pad.getP1().getY()
pad.move(0,paddle.getY()*windows_size-p_y)
time.sleep(refresh_rate)
t = time.time()
m_b,learned_states,fail_state = trainGame(100000)
time_run = time.time()-t;
t_b = testGame(learned_states,fail_state,2000)
print "Max Bounces: "+str(m_b)+"\nAverage Bounces/Game: "+str(t_b)
print "It took: "+str(time_run/60.0)+" min to complete"
saveStates(learned_states,fail_state,t_b)
#drawGame(None,None)
'''states, fail = loadStates('10-8105_128_0-72_771-0.txt')
t_b = testGame(states,fail,2000)
print "Average Bounces/Game: "+str(t_b)'''
|
986,062 | 4ae5d1b3fa7f7ee613003ab3b942b3682e68b489 | '''
自除数 是指可以被它包含的每一位数除尽的数。
例如,128 是一个自除数,因为 128 % 1 == 0,128 % 2 == 0,128 % 8 == 0。
还有,自除数不允许包含 0 。
给定上边界和下边界数字,输出一个列表,列表的元素是边界(含边界)内所有的自除数。
示例 1:
输入:
上边界left = 1, 下边界right = 22
输出: [1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 15, 22]
注意:
每个输入参数的边界满足 1 <= left <= right <= 10000。
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/self-dividing-numbers
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
'''
class Solution:
def selfDividingNumbers(self, left: int, right: int) -> List[int]:
result = []
for num in range(left, right+1):
mode = []
temp = num
flag = True
while temp > 0:
if temp % 10 == 0: flag=False;break
mode.append(temp % 10)
temp //= 10
if flag:
for m in mode:
if num % m != 0:
flag = False
break
if flag: result.append(num)
return result
|
986,063 | bfba533b7f64767beb5ba81c1c958e1ed669c923 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#----------------------------------------------------------------------
# Name: wx.tools.wxget
# Purpose: wx Based alternative to wget
#
# Author: Steve Barnes
#
# Created: 06-Aug-2017
# Copyright: (c) 2017-2018 by Steve Barnes
# Licence: wxWindows license
# Tags: phoenix-port, py3-port
#
# Module to allow cross platform downloads originally from answers to:
# https://stackoverflow.com/questions/22676/how-do-i-download-a-file-over-http-using-python
# by Stan and PabloG then converted to wx.
#----------------------------------------------------------------------
"""
wxget.py -- wx Version of wget utility for platform that don't have it already.
Usage:
wxget URL [DEST_DIR]
Where URL is a file URL and the optional DEST_DIR is a destination directory to
download to, (default is to prompt the user).
The --trusted option can be used to surpress certificate checks.
"""
from __future__ import (division, absolute_import, print_function, unicode_literals)
import sys
import os
import wx
import subprocess
import ssl
import pip
if sys.version_info >= (3,):
from urllib.error import (HTTPError, URLError)
import urllib.request as urllib2
import urllib.parse as urlparse
else:
import urllib2
from urllib2 import (HTTPError, URLError)
import urlparse
def get_docs_demo_url(demo=False):
""" Get the URL for the docs or demo."""
if demo:
pkg = 'demo'
else:
pkg = 'docs'
base_url = "https://extras.wxpython.org/wxPython4/extras/%s/wxPython-%s-%s.tar.gz"
ver = wx.version().split(' ')[0]
major = ver.split('.')[0]
if major != '4':
raise ValueError("wx Versions before 4 not supported!")
return base_url % (ver, pkg, ver)
def get_save_path(url, dest_dir, force=False):
""" Get the file save location."""
old_dir = os.getcwd()
if not dest_dir:
dest_dir = os.getcwd()
else:
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
os.chdir(dest_dir)
filename = os.path.basename(urlparse.urlsplit(url)[2])
if not filename:
filename = 'downloaded.file'
if not force:
with wx.FileDialog(
None, message="Save As ...", defaultDir=dest_dir,
defaultFile=filename, style=wx.FD_SAVE | wx.FD_OVERWRITE_PROMPT
) as dlg:
if dlg.ShowModal() == wx.ID_OK:
dest_dir, filename = os.path.split(dlg.GetPath())
else:
url = None
else:
if not os.path.exists(dest_dir):
os.makedirs(dest_dir)
elif not os.path.isdir(dest_dir):
url = None
if dest_dir:
filename = os.path.join(dest_dir, filename)
os.chdir(old_dir)
return (url, filename)
def download_wget(url, filename, trusted=False):
""" Try to donwload via wget."""
result = False
try:
cmd = ["wget", url, '-O', filename]
if trusted:
cmd.append('--no-check-certificate')
print("Trying:\n ", ' '.join(cmd))
result = subprocess.check_call(cmd)
# note some users may need to add "--no-check-certificate" on some sites
result = result == 0
except Exception:
print("wget did not work or not installed - trying urllib")
return result
def download_urllib(url, filename):
""" Try to donwload via urllib."""
print("Trying to Download via urllib from:\n ", url)
keep_going = True
try:
url_res = urllib2.urlopen(url)
except (HTTPError, URLError, ssl.CertificateError) as err:
print("Error: %s" % err)
return False
with open(filename, 'wb') as outfile:
block_sz = 8192
meta = url_res.info()
meta_func = meta.getheaders if hasattr(meta, 'getheaders') else meta.get_all
meta_length = meta_func("Content-Length")
file_size = None
if meta_length:
file_size = int(meta_length[0])
message = "Downloading: {0}\nBytes: {1}\n".format(url, file_size)
dstyle = wx.PD_APP_MODAL | wx.PD_CAN_ABORT | wx.PD_AUTO_HIDE
if file_size:
progress = wx.ProgressDialog('Downloading', message,
maximum=1+file_size/block_sz, style=dstyle)
else:
progress = wx.ProgressDialog('Downloading', message, style=dstyle)
file_size_dl = 0
while keep_going:
read_buffer = url_res.read(block_sz)
if not read_buffer:
progress.Update(file_size_dl / block_sz, "message+\nDONE!")
wx.Sleep(0.2)
break
file_size_dl += len(read_buffer)
outfile.write(read_buffer)
status = "{0:16}".format(file_size_dl)
if file_size:
status += " [{0:6.2f}%]".format(file_size_dl * 100 / file_size)
(keep_going, dummy_skip) = progress.Update(file_size_dl / block_sz,
message+status)
wx.Sleep(0.08) # Give the GUI some update time
progress.Destroy()
result = os.path.exists(filename) and os.stat(filename).st_size > 0
return result
def download_pip(url, filename, force=False, trusted=False):
""" Try to donwload via pip."""
download_dir = os.path.split(filename)[0]
if len(download_dir) == 0:
download_dir = '.'
print("Trying to use pip to download From:\n ", url, 'To:\n ', filename)
cmds = ['pip', 'download', url, '--dest', download_dir, "--no-deps",
'--exists-action', 'i']
if force:
cmds.append('--no-cache-dir')
if trusted:
host = '/'.join(url.split('/')[:3]) # take up to http://something/ as host
cmds.extend(['--trusted-host', host])
if force and os.path.exists(filename):
print("Delete Existing", filename)
os.unlink(filename)
print("Running pip", ' '.join(cmds))
try:
print("\nAbusing pip so expect possible error(s) in the next few lines.")
result = subprocess.check_call(cmds)
print(result)
except (FileNotFoundError, subprocess.CalledProcessError) as Error:
print("Download via pip may have Failed!")
print(Error)
result = 0
result = os.path.exists(filename) and os.stat(filename).st_size > 0
return result
def download_file(url, dest=None, force=False, trusted=False):
"""
Download and save a file specified by url to dest directory, with force will
operate silently and overwrite any existing file.
"""
url, filename = get_save_path(url, dest, force)
keep_going = True
success = False
if url is None:
return 'Aborted!'
if url:
success = download_wget(url, filename, trusted) # Try wget
if not success:
success = download_urllib(url, filename) # Try urllib
if not success:
success = download_pip(url, filename, force, trusted) # Try urllib
if not success:
split_url = url.split('/')
msg = '\n'.join([
"\n\nERROR in Web Access! - You may be behind a firewall!",
"-" * 52,
"You should be able to bybass this by using a browser to download:",
"\t%s\nfrom:\t%s\nthen copying the download file to:\n\t%s" % (
split_url[-1], '/'.join(split_url[:-1]), filename),
])
print(msg, '\n')
wx.MessageBox(msg, caption='WDOWNLOAD ERROR!',
style=wx.OK|wx.CENTRE|wx.ICON_ERROR)
return "FAILURE or Abort!"
return filename
def main(args=sys.argv):
""" Entry point for wxget."""
APP = wx.App()
dest_dir = '.'
force_flag = '--force'
trusted_flag = '--trusted'
force = False
trusted = False
if force_flag in args:
force = True
args.remove(force_flag)
if trusted_flag in args:
trusted = True
args.remove(force_flag)
if len(args) > 2:
dest_dir = args[2]
else:
dest_dir = None
if len(args) > 1:
url = args[1]
else:
print(__doc__)
yes_no = wx.MessageBox(__doc__+"\n\nRUN TEST?", "wxget",
wx.YES_NO|wx.CENTER)
if yes_no == wx.YES:
print("Testing with wxDemo")
url = get_docs_demo_url()
else:
url = None
if url:
FILENAME = download_file(url=url, dest=dest_dir, force=force, trusted=trusted)
print(FILENAME)
if __name__ == "__main__": # Only run if this file is called directly
main()
|
986,064 | 107ef05f775e7b69c64e56f80e3a23ac9402276d | # Generated by Django 2.0.3 on 2019-09-24 07:59
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Logo',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('Title', models.CharField(max_length=50)),
('Image', models.ImageField(blank=True, upload_to='logo/')),
('icon', models.ImageField(blank=True, upload_to='logo/')),
('Status', models.BooleanField(default=True)),
('header_image', models.ImageField(blank=True, upload_to='logo/')),
],
options={
'verbose_name': 'Logo',
'verbose_name_plural': 'Logos',
},
),
]
|
986,065 | 782e99246cf15a42b21b8d1d430578f84d8ca6ab | import requests
import pandas as pd
BASE_URL = 'https://query2.finance.yahoo.com'
def _make_request(url, response_field, **kwargs):
params = {
'lang': kwargs.get('lang', 'en-US'),
'region': kwargs.get('region', 'US'),
'corsDomain': kwargs.get('corsDomain', 'finance.yahoo.com')
}
r = requests.get(url, params=params)
json = r.json()
return json[response_field]['result']
def get_currencies():
"""Get a list of currencies
"""
url = '{}/v1/finance/currencies'.format(BASE_URL)
return _make_request(url, 'currencies')
def get_exchanges():
"""Get a list of available exchanges and their suffixes
"""
url = 'https://help.yahoo.com/kb/finance-for-web/SLN2310.html?impressions=true'
dataframes = pd.read_html(url)
return dataframes[0]
def get_market_summary(**kwargs):
"""Get a market summary
"""
url = '{}/v6/finance/quote/marketSummary'.format(BASE_URL)
return _make_request(url, 'marketSummaryResponse', **kwargs)
def get_trending(region='US', **kwargs):
"""Get trending stocks for a specific region
"""
url = '{}/v1/finance/trending/{}'.format(BASE_URL, region)
return _make_request(url, 'finance')[0]
|
986,066 | 4d10f3dfa122fd8a1b77a53c86aa91b8f3fb97a5 | # Generated by Django 3.1.3 on 2021-01-06 11:27
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('audio', '0024_telugulevels'),
]
operations = [
migrations.AlterField(
model_name='telugulevels',
name='name',
field=models.CharField(max_length=20, unique=True),
),
migrations.AlterField(
model_name='telugulevels',
name='nickname',
field=models.CharField(max_length=6, unique=True),
),
migrations.AlterField(
model_name='telugulevels',
name='shortname',
field=models.CharField(max_length=2, unique=True),
),
]
|
986,067 | a2b24635acfdbcaef8ea8cbd91f2c51c4a2298b0 | t=int(input())
for i in range(t):
yoar=list(map(int,input().split()))
x1=yoar[0]
x2=yoar[1]
x3=yoar[2]
v1=yoar[3]
v2=yoar[4]
t1=(x3-x1)/v1
t2=(x2-x3)/v2
if (t1<t2):
print("Chef")
elif (t2<t1):
print("Kefa")
else:
print("Draw") |
986,068 | 842a9e6edbe03bc3414c7b6ba18f1b1b52ace0bd | """
该模块做测试使用,可以删除
"""
from flask import Blueprint, request, session
from flaskapp.create_flask import app
from mysql.create_db import db
from models.person import Person
blue_print_name = "/test"
user_blueprint = Blueprint(blue_print_name, __name__)
@user_blueprint.route('/create/')
def create():
db.create_all()
return '创建成功'
@user_blueprint.route('/insert/')
def insert():
person = Person(username="tt",password="ss")
person.save()
return '添加成功'
# 注意,这一步操作要放在程序的最后,因为程序要先执行前面的创建blueprint,才能把
# 蓝图等级到这里
app.register_blueprint(blueprint=user_blueprint, url_prefix=blue_print_name)
|
986,069 | 650a108824f7eb1c4b8ef2e711b6291f51fc5fe4 | import gpt_2_simple as gpt2
checkpoint_dir = './data/checkpoint'
sess = gpt2.start_tf_sess()
gpt2.load_gpt2(sess, checkpoint_dir=checkpoint_dir)
single_text = gpt2.generate(sess, return_as_list=True, checkpoint_dir=checkpoint_dir)[0]
print('-----BEGIN GENERATED TEXT-----')
print(single_text)
print('-----END GENERATED TEXT-----')
|
986,070 | 1db02e04526229fff82957ec515445888a8fb5d6 | #!/usr/bin/env python3
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from PIL import Image
import io
import numpy as np
import getpass
import tkinter
import socket
import sys
CODIGO_YES = bytearray([30])
CODIGO_NO = bytearray([31])
CODIGO_LOGOUT = bytearray([32])
CODIGO_ACK = bytearray([33])
def printPokemon():
"""Imprime en pantalla el logo Pokemon
:returns: Nada
"""
print(" _ ")
print(" _ __ ___ | | _____ _ __ ___ ___ _ __ ")
print(" | '_ \ / _ \| |/ / _ \ '_ ` _ \ / _ \| '_ \ ")
print(" | |_) | (_) | < __/ | | | | | (_) | | | |")
print(" | .__/ \___/|_|\_\___|_| |_| |_|\___/|_| |_|")
print(" |_| ")
def login(soc):
"""Transfiere los datos al servidor para validar el acceso, y cierra el programa si los datos no son válidos.
:param soc: Socket de la conexión
:type soc: Socket
:returns: Nada
"""
try:
print("Ingrese el nombre de usuario con el que está registrado")
user = input(" >> ")
print("Ingrese la contraseña")
psswd = getpass.getpass(" >> ")
soc.send(user.encode(encoding='UTF-8'))
soc.recv(1)
soc.send(psswd.encode(encoding='UTF-8'))
access = soc.recv(1)
access = int.from_bytes(access,"big")
if access == 51:
print("Datos incorrectos")
sys.exit(1)
except BrokenPipeError:
terminarConexion()
def playPokemon(soc):
"""Permite que el usuario juegue Pokemon Go.
:param soc: Socket de la conexión
:type soc: Socket
:returns: Nada
"""
try:
soc.send(bytearray([10]))
mensaje = soc.recv(2)
idPokemon = mensaje[1]
nombrePokemon = soc.recv(50).decode("utf-8")
print("¿Capturar al Pokemon " + nombrePokemon + "?")
print("Sí [S] o No [N]")
message = input(" >> ")
if message == 'S':
soc.send(CODIGO_YES)
jugando = True
while jugando:
mensaje = soc.recv(10)
respuesta = mensaje[0]
if respuesta == 21: #aun tienes intentos
print("¿Intentar captura de nuevo? Quedan " + str(mensaje[2]+1) + " intentos")
print("Sí [S] o No [N]")
message = input(" >> ")
mensaje_correcto = False
while mensaje_correcto == False:
if message == 'S' or message == 'N':
mensaje_correcto = True
else:
print("Opción inválida >:(")
print("¿Intentar captura de nuevo? Quedan " + str(mensaje[2]+1) + " intentos")
print("Sí [S] o No [N]")
message = input(" >> ")
if message == 'S':
soc.send(CODIGO_YES)
else:
jugando = False
else:
if respuesta == 22:#capturaste al pokemon
print("Capturaste al pokemon...")
soc.send(CODIGO_ACK)
img_size = int.from_bytes(soc.recv(4),"big")
soc.send(CODIGO_ACK)
img_bytes = soc.recv(img_size)
muestraPokemon(img_bytes)
jugando = False
if respuesta == 23:#te quedaste sin intentos
print("Te quedaste sin intentos :(")
jugando = False
cerrarSesion(soc)
else:
cerrarSesion(soc)
except socket.timeout : #No recibe respuesta del servidor
terminarConTimeout(soc)
except IndexError: #El Servidor manda un timeout
terminarConexion()
def muestraPokemon(bytes):
"""Despliega el pokemon asignado.
:param bytes: bytes de la imagen del pokemon a desplegar
:type bytes: bytearray
:returns: Nada
"""
image = Image.open(io.BytesIO(bytes))
data = np.array(image)
plt.imshow(data)
plt.axis('off')
plt.show()
def muestraPokedex(soc):
"""Muestra el Pokedex del usuario que solicita esta acción
al usuario.
:param soc: Socket de la conexión
:type soc: Socket
:returns: Nada
"""
try:
print("Mostrando Pokedex...")
soc.send(bytearray([11]))
respuesta = soc.recv(1)[0]
if respuesta == 24:
soc.send(CODIGO_ACK)
size_pokedex = int.from_bytes(soc.recv(4),"big")
soc.send(CODIGO_ACK)
pokedex = []
for i in range(size_pokedex):
pokemon_size = int.from_bytes(soc.recv(1),"big")
soc.send(CODIGO_ACK)
modelo = soc.recv(pokemon_size)
pokemon = modelo.decode("utf-8")
pokedex.append(pokemon)
print(pokedex)
#displayPokedex(pokedex)
except socket.timeout: #No recibe respuesta del servidor
terminarConTimeout(soc)
except IndexError : #El Servidor manda un timeout
terminarConexion()
def displayPokedex(pokedex):
"""Imprime en pantalla el Pokedex de manera
"amigable".
:param pokedex: Pokedex de Pokemones
:type pokedex: List of String
:returns: Nada
"""
for col1,col2 in zip(pokedex[::2],pokedex[1::2]):
print(col1+",",col2+",")
def muestraCatalogo(soc):
"""Le muestra el catálogo disponible de Pokemones
al usuario.
:param soc: Socket de la conexión
:type soc: Socket
:returns: Nada
"""
try:
print("Mostrando catálogo...")
soc.send(bytearray([12]))
respuesta = soc.recv(1)[0]
if respuesta == 25:
soc.send(CODIGO_ACK)
size_catalogo = int.from_bytes(soc.recv(4),"big")
soc.send(CODIGO_ACK)
catalogo = []
for i in range(size_catalogo):
pokemon_size = int.from_bytes(soc.recv(1),"big")
soc.send(CODIGO_ACK)
modelo = soc.recv(pokemon_size)
pokemon = modelo.decode("utf-8")
catalogo.append(pokemon)
displayCatalogo(catalogo)
except socket.timeout: #No recibe respuesta del servidor
terminarConTimeout(soc)
except IndexError : #El Servidor manda un timeout
terminarConexion()
def displayCatalogo(catalogo):
"""Imprime en pantalla el catálogo de Pokemones
disponibles de manera "amigable".
:param catalogo: Catalogo de Pokemones
:type catalogo: List of String
:returns: Nada
"""
for col1,col2,col3,col4,col5,col6 in zip(catalogo[::6],catalogo[1::6],catalogo[2::6],catalogo[3::6],catalogo[4::6],catalogo[5::6]):
print (col1+",",col2+",",col3+",",col4+",",col5+",",col6+",")
def cerrarSesion(soc):
"""Cierre normal de sesión del usuario.
:param soc: Socket de la conexión
:type soc: Socket
:returns: Nada
"""
print("Terminando conexión...")
soc.send(CODIGO_LOGOUT)
def terminarConexion():
"""Termina la conexion pues el Servidor notifica que
el tiempo de espera ha excedido.
:param: Nada
:returns: Nada
"""
print("Tiempo de respuesta excedido: 10 segundos")
print("Terminando conexión...")
sys.exit(1)
def terminarConTimeout(soc):
"""Termina la conexión pues el tiempo de espera de la
respuesta del Servidor ha excedido.
:param soc: Socket de la conexión
:type soc: Socket
:returns: Nada
"""
print("Falló la conexión con el servidor...")
print("Terminando conexión...")
soc.close()
sys.exit(1)
def main():
""" Función principal
"""
soc = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
host = sys.argv[1]
port = int(sys.argv[2])
except IndexError:
print("Debe ingresar los parametros")
try:
soc.connect((host, port))
soc.settimeout(20)
except:
print("Connection Error")
sys.exit()
login(soc)
printPokemon()
opcion_correcta = False
try:
while opcion_correcta == False:
print("Bienvenido a Pokemon Go! ¿Deseas capturar un Pokémon [P], revisar el Pokedex [X], revisar el catálogo? [C] o salir [S]?")
message = input(" >> ")
if message == 'S' or message == 'P' or message == 'C' or message == 'X' :
opcion_correcta = True
except socket.timeout: #No recibe respuesta del servidor
terminarConTimeout(soc)
except IndexError : #El Servidor manda un timeout
terminarConexion()
if message != 'S':
if message == 'P':
playPokemon(soc)
if message == 'X':
muestraPokedex(soc)
if message == 'C':
muestraCatalogo(soc)
else:
cerrarSesion(soc)
if __name__ == "__main__":
main()
|
986,071 | 973ccc81540e7697d6877d340d1bd7aedb68dbd4 | from PIL import Image
import glob
r_left_border = 255
r_right_border = 255
g_left_border = 255
g_right_border = 255
b_left_border = 255
b_right_border = 255
r_fill = 255
g_fill = 255
b_fill = 255
def restore_images(directory_path):
types = (".jpg", ".jpeg")
for type in types:
for filename in glob.glob(directory_path + r"\*" + type):
image = Image.open(filename)
restore_image(image)
image.save(filename)
def restore_image(image: Image):
for i in range(image.size[0]):
for j in range(image.size[1]):
r = image.getpixel((i, j))[0]
g = image.getpixel((i, j))[1]
b = image.getpixel((i, j))[2]
if r_left_border <= r <= r_right_border and g_left_border <= g <= g_right_border \
and b_left_border <= b <= b_right_border: # От 170 до 223 для всех
image.putpixel((i, j), (r_fill, g_fill, b_fill)) # r, g и b равны 255
if __name__ == '__main__':
r_left_border = int(input("Enter bottom value for Red (0-255): "))
r_right_border = int(input("Enter top value for Red (0-255): "))
g_left_border = int(input("Enter bottom value for Green (0-255): "))
g_right_border = int(input("Enter top value for Green (0-255): "))
b_left_border = int(input("Enter bottom value for Blue (0-255): "))
b_right_border = int(input("Enter top value for Blue (0-255): "))
r_fill = int(input("\nEnter Red value to paint the watermark (0-255): "))
g_fill = int(input("Enter Green value to paint the watermark (0-255): "))
b_fill = int(input("Enter Blue value to paint the watermark (0-255): "))
print("\nAll images placed in the folder will be changed. Save a copy"
" of them before starting recovery! Are you sure you want to change the images?\n(y/n)")
confirmation = input()
if confirmation == "y":
directory_path = '.\images_for_restore'
restore_images(directory_path)
print("\nRestore successful")
|
986,072 | 45f192ba89aeb8705a0ca78789fbb1017e6fda52 | from django.urls import path
from . import views
urlpatterns = [
path('home/', views.index, name="index"),
path('home/', views.homeview, name="homeview"),
path(r'produkt/<int:id>/', views.produkt, name='produkt'),
path(r'dyqan/<int:id>/', views.dyqan, name='dyqan'),
path(r'dyqan/<int:id>/inventar/', views.inventar, name='inventar'),
path('accounts/sign_up/', views.sign_up, name='sign_up'),
path('accounts/login/', views.login_request, name='login_request'),
path('accounts/logged_out/', views.logout_request, name="logout_request"),
path(r'home/KerkoDyqan/', views.kerko_dyqan, name='kerko_dyqan'),
path(r'home/KerkoProdukt/', views.kerko_produkt, name='kerko_produkt'),
path(r'dyqan/bli/', views.shto_ne_shporte, name='shto_ne_shporte'),
path(r'dyqan/sukses/', views.blerje, name='blerje'),
] |
986,073 | 3c348856243702b6cf8328228bb01953f9b33153 | #!/usr/bin/env python
import os
import re
import here
import tornado.httpserver
import tornado.ioloop
import tornado.options
import tornado.web
import redis.client
from tornado.options import define, options
from tornado_utils.routes import route
import handlers
import settings
define("debug", default=False, help="run in debug mode", type=bool)
define("database_name", default=settings.DATABASE_NAME, help="db name")
define("port", default=8000, help="run on the given port", type=int)
class Application(tornado.web.Application):
def __init__(self, database_name=None):
_ui_modules = __import__('ui_modules', globals(), locals(), ['ui_modules'], -1)
ui_modules_map = {}
for name in [x for x in dir(_ui_modules) if re.findall('[A-Z]\w+', x)]:
thing = getattr(_ui_modules, name)
try:
if issubclass(thing, tornado.web.UIModule):
ui_modules_map[name] = thing
except TypeError: # pragma: no cover
# most likely a builtin class or something
pass
routed_handlers = route.get_routes()
app_settings = dict(
title=settings.PROJECT_TITLE,
template_path=os.path.join(os.path.dirname(__file__), "templates"),
static_path=os.path.join(os.path.dirname(__file__), "static"),
cookie_secret=settings.COOKIE_SECRET,
debug=options.debug,
email_backend=options.debug and \
'tornado_utils.send_mail.backends.console.EmailBackend' \
or 'tornado_utils.send_mail.backends.pickle.EmailBackend',
admin_emails=settings.ADMIN_EMAILS,
ui_modules=ui_modules_map,
twitter_consumer_key=settings.TWITTER_CONSUMER_KEY,
twitter_consumer_secret=settings.TWITTER_CONSUMER_SECRET,
)
if 1 or not options.debug:
routed_handlers.append(
tornado.web.url('/.*?', handlers.PageNotFoundHandler,
name='page_not_found')
)
super(Application, self).__init__(routed_handlers, **app_settings)
self.redis = redis.client.Redis(settings.REDIS_HOST,
settings.REDIS_PORT)
from models import connection
self.db = connection[database_name or settings.DATABASE_NAME]
def main(): # pragma: no cover
tornado.options.parse_command_line()
http_server = tornado.httpserver.HTTPServer(Application())
print "Starting tornado on port", options.port
http_server.listen(options.port)
try:
tornado.ioloop.IOLoop.instance().start()
except KeyboardInterrupt:
pass
if __name__ == "__main__": # pragma: no cover
main()
|
986,074 | 053fb34edc42a69458256bf839e767499cd7cf4f | import numpy as np
import tensorflow as tf
class MlpPolicy:
def __init__(self, obs_dim, act_dim, hid1_mult, policy_logvar, obs_ph):
self.obs_dim = obs_dim
self.act_dim = act_dim
self.hid1_mult = hid1_mult
self.policy_logvar = policy_logvar
self.obs_ph = obs_ph
def build_network(self,):
# hidden layer sizes determined by obs_dim and act_dim (hid2 is geometric mean)
hid1_size = self.obs_dim * self.hid1_mult # 10 empirically determined
hid3_size = self.act_dim * 10 # 10 empirically determined
hid2_size = int(np.sqrt(hid1_size * hid3_size))
# heuristic to set learning rate based on NN size (tuned on 'Hopper-v1')
self.lr = 9e-4 / np.sqrt(hid2_size) # 9e-4 empirically determined
# 3 hidden layers with tanh activations
out = tf.compat.v1.layers.dense(self.obs_ph, hid1_size, tf.tanh,
kernel_initializer=tf.compat.v1.random_normal_initializer(
stddev=np.sqrt(1 / self.obs_dim)), name="h1_actor")
out = tf.compat.v1.layers.dense(out, hid2_size, tf.tanh,
kernel_initializer=tf.compat.v1.random_normal_initializer(
stddev=np.sqrt(1 / hid1_size)), name="h2_actor")
out = tf.compat.v1.layers.dense(out, hid3_size, tf.tanh,
kernel_initializer=tf.compat.v1.random_normal_initializer(
stddev=np.sqrt(1 / hid2_size)), name="h3_actor")
out = out*250
self.means = tf.compat.v1.layers.dense(out, self.act_dim,
kernel_initializer=tf.compat.v1.random_normal_initializer(
stddev=np.sqrt(1 / hid3_size)), name="means")
# logvar_speed is used to 'fool' gradient descent into making faster updates
# to log-variances. heuristic sets logvar_speed based on network size.
logvar_speed = (10 * hid3_size) // 48
log_vars = tf.compat.v1.get_variable('logvars', (logvar_speed, self.act_dim), tf.float32,
tf.compat.v1.constant_initializer(0.0))
self.log_vars = tf.reduce_sum(input_tensor=log_vars, axis=0) + self.policy_logvar
""" Sample from distribution, given observation """
self.sampled_act = (self.means +
tf.exp(self.log_vars / 2.0) *
tf.random.normal(shape=(self.act_dim,)))
print('Policy Params -- h1: {}, h2: {}, h3: {}, lr: {:.3g}, logvar_speed: {}'
.format(hid1_size, hid2_size, hid3_size, self.lr, logvar_speed))
return self.means, self.log_vars, self.lr
def step(self, sess, obs):
feed_dict = {self.obs_ph: obs}
return sess.run(self.sampled_act, feed_dict=feed_dict)
|
986,075 | 789642ef8f29079a4f0e234193d75ee25732c7e4 | import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import LabelEncoder
from sklearn.linear_model import LogisticRegression
import seaborn as sns
le = LabelEncoder()
df = pd.read_csv('telecom_churn.csv')
df.dropna(inplace = True)
df['gender'] = le.fit_transform(df['gender'])
df['SeniorCitizen'] = le.fit_transform(df['SeniorCitizen'])
df['Partner'] = le.fit_transform(df['Partner'])
df['Dependents'] = le.fit_transform(df['Dependents'])
df['tenure'] = le.fit_transform(df['tenure'])
df['PhoneService'] = le.fit_transform(df['PhoneService'])
df['MultipleLines'] = le.fit_transform(df['MultipleLines'])
df['InternetService'] = le.fit_transform(df['InternetService'])
df['OnlineSecurity'] = le.fit_transform(df['OnlineSecurity'])
df['OnlineBackup'] = le.fit_transform(df['OnlineBackup'])
df['DeviceProtection'] = le.fit_transform(df['DeviceProtection'])
df['TechSupport'] = le.fit_transform(df['TechSupport'])
df['StreamingTV'] = le.fit_transform(df['StreamingTV'])
df['StreamingMovies'] = le.fit_transform(df['StreamingMovies'])
df['Contract'] = le.fit_transform(df['Contract'])
df['PaperlessBilling'] = le.fit_transform(df['PaperlessBilling'])
df['PaymentMethod'] = le.fit_transform(df['PaymentMethod'])
df['Churn'] = le.fit_transform(df['Churn'])
Logistic = LogisticRegression(solver='newton-cg')
Logistic.fit(df[['gender','SeniorCitizen','Partner','Dependents','tenure','PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies','Contract','PaperlessBilling','PaymentMethod','MonthlyCharges','TotalCharges']], df['Churn'])
test = pd.read_csv('telecom_churn_test.csv')
test.dropna(inplace = True)
test['gender'] = le.fit_transform(test['gender'])
test['SeniorCitizen'] = le.fit_transform(test['SeniorCitizen'])
test['Partner'] = le.fit_transform(test['Partner'])
test['Dependents'] = le.fit_transform(test['Dependents'])
test['tenure'] = le.fit_transform(test['tenure'])
test['PhoneService'] = le.fit_transform(test['PhoneService'])
test['MultipleLines'] = le.fit_transform(test['MultipleLines'])
test['InternetService'] = le.fit_transform(test['InternetService'])
test['OnlineSecurity'] = le.fit_transform(test['OnlineSecurity'])
test['OnlineBackup'] = le.fit_transform(test['OnlineBackup'])
test['DeviceProtection'] = le.fit_transform(test['DeviceProtection'])
test['TechSupport'] = le.fit_transform(test['TechSupport'])
test['StreamingTV'] = le.fit_transform(test['StreamingTV'])
test['StreamingMovies'] = le.fit_transform(test['StreamingMovies'])
test['Contract'] = le.fit_transform(test['Contract'])
test['PaperlessBilling'] = le.fit_transform(test['PaperlessBilling'])
test['PaymentMethod'] = le.fit_transform(test['PaymentMethod'])
test['Churn'] = le.fit_transform(test['Churn'])
score = Logistic.score(test[['gender','SeniorCitizen','Partner','Dependents','tenure','PhoneService','MultipleLines','InternetService','OnlineSecurity','OnlineBackup','DeviceProtection','TechSupport','StreamingTV','StreamingMovies','Contract','PaperlessBilling','PaymentMethod','MonthlyCharges','TotalCharges']], test['Churn'])
print(score)
plt.show() |
986,076 | 6712a109b4fd996cd5d2d4088732109b21d2eae9 | __author__ = 'sergejyurskyj'
D = {'a': 1, 'b': 2, 'c': 3}
print(D)
Ks = D.keys() # Sorting a view object doesn't work!
# Ks.sort() # AttributeError: 'dict_keys' object has no attribute 'sort'
print('#' * 52 + ' Force it to be a list and then sort')
Ks = list(Ks) # Force it to be a list and then sort
Ks.sort()
for k in Ks: print(k, D[k])
print('#' * 52 + ' Or you can use sorted() on the keys. orted() accepts any iterable , sorted() returns its result')
print(D)
Ks = D.keys() # Or you can use sorted() on the keys
for k in sorted(Ks): print(k, D[k]) # sorted() accepts any iterable , sorted() returns its result
print('#' * 52 + ' Better yet, sort the dict directly. dict iterators return keys')
print(D) # Better yet, sort the dict directly
for k in sorted(D): print(k, D[k]) # dict iterators return keys
|
986,077 | 69d789b7daf3aed9bbde4783427ad0792bcb870f | import csv
def cargar_datosCSV(ruta):
with open(ruta) as cont:
archivocsv = csv.reader(cont)
encabezado = next(archivocsv)
print("******------DATOS EN CSV-------*****")
print(encabezado)
print()
for registro in archivocsv:
print(registro)
print("La clase de la estructura es: ")
print(type(archivocsv))
print("******------FIN DE DATOS EN CSV-------*****") |
986,078 | 8719c87d728f4932067c299df2a9185dff81b609 | # -*- coding: utf-8 -*-
"""
Django rest_framework ViewSet filters.
"""
from uuid import UUID
from django.contrib.auth import get_user_model
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ObjectDoesNotExist
from django.db.models import Q
from django.http import Http404
from django.shortcuts import get_object_or_404
import six
from django_filters import rest_framework as django_filter_filters
from rest_framework import filters
from rest_framework_guardian.filters import ObjectPermissionsFilter
from onadata.apps.api.models import OrganizationProfile, Team
from onadata.apps.logger.models import Instance, Project, XForm, DataView, MergedXForm
from onadata.apps.api.viewsets.dataview_viewset import get_filter_kwargs
from onadata.apps.viewer.models import Export
from onadata.libs.permissions import exclude_items_from_queryset_using_xform_meta_perms
from onadata.libs.utils.common_tags import MEDIA_FILE_TYPES
from onadata.libs.utils.numeric import int_or_parse_error
# pylint: disable=invalid-name
User = get_user_model()
def _public_xform_id_or_none(export_id: int):
export = Export.objects.filter(pk=export_id).first()
if export and (export.xform.shared_data or export.xform.shared):
return export.xform_id
return None
# pylint: disable=too-few-public-methods
class AnonDjangoObjectPermissionFilter(ObjectPermissionsFilter):
"""Anonymous user permission filter class."""
def filter_queryset(self, request, queryset, view):
"""
Anonymous user has no object permissions, return queryset as it is.
"""
form_id = view.kwargs.get(view.lookup_field, view.kwargs.get("xform_pk"))
lookup_field = view.lookup_field
queryset = queryset.filter(deleted_at=None)
if request.user.is_anonymous:
return queryset
if form_id:
if lookup_field == "pk":
int_or_parse_error(
form_id, "Invalid form ID. It must be a positive integer"
)
try:
if lookup_field == "uuid":
form_id = UUID(form_id)
form = queryset.get(Q(uuid=form_id.hex) | Q(uuid=str(form_id)))
else:
xform_kwargs = {lookup_field: form_id}
form = queryset.get(**xform_kwargs)
except ObjectDoesNotExist as non_existent_object:
raise Http404 from non_existent_object
# Check if form is public and return it
if form.shared:
if lookup_field == "uuid":
return queryset.filter(Q(uuid=form_id.hex) | Q(uuid=str(form_id)))
return queryset.filter(Q(**xform_kwargs))
return super().filter_queryset(request, queryset, view)
# pylint: disable=too-few-public-methods
class EnketoAnonDjangoObjectPermissionFilter(AnonDjangoObjectPermissionFilter):
"""EnketoAnonDjangoObjectPermissionFilter
Same as AnonDjangoObjectPermissionFilter but checks 'report_xform'
permission when the view 'enketo' is accessed.
"""
def filter_queryset(self, request, queryset, view):
"""Check report_xform permission when requesting for Enketo URL."""
if view.action == "enketo":
# noqa pylint: disable=attribute-defined-outside-init
self.perm_format = "%(app_label)s.report_%(model_name)s"
return super().filter_queryset(request, queryset, view)
# pylint: disable=too-few-public-methods
class XFormListObjectPermissionFilter(AnonDjangoObjectPermissionFilter):
"""XFormList permission filter with using [app].report_[model] form."""
perm_format = "%(app_label)s.report_%(model_name)s"
class XFormListXFormPKFilter:
"""Filter forms via 'xform_pk' param."""
def filter_queryset(self, request, queryset, view):
"""Returns an XForm queryset filtered by the 1xform_pk' param."""
xform_pk = view.kwargs.get("xform_pk")
if xform_pk:
try:
xform_pk = int(xform_pk)
except ValueError:
pass
else:
queryset = queryset.filter(pk=xform_pk)
if queryset.count() == 0:
raise Http404
return queryset
class FormIDFilter(django_filter_filters.FilterSet):
"""formID filter using the XForm.id_string."""
# pylint: disable=invalid-name
formID = django_filter_filters.CharFilter(field_name="id_string") # noqa
# pylint: disable=missing-class-docstring
class Meta:
model = XForm
fields = ["formID"]
# pylint: disable=too-few-public-methods
class OrganizationPermissionFilter(ObjectPermissionsFilter):
"""Organization profiles filter
Based on the organization the profile is added to.
"""
def filter_queryset(self, request, queryset, view):
"""Return a filtered queryset or all profiles if a getting a specific
profile."""
if view.action == "retrieve" and request.method == "GET":
return queryset.model.objects.all()
filtered_queryset = super().filter_queryset(request, queryset, view)
org_users = set(
[group.team.organization for group in request.user.groups.all()] +
[o.user for o in filtered_queryset]
)
return queryset.model.objects.filter(user__in=org_users, user__is_active=True)
# pylint: disable=too-few-public-methods
class XFormOwnerFilter(filters.BaseFilterBackend):
"""XForm `owner` filter"""
owner_prefix = "user"
# pylint: disable=unused-argument
def filter_queryset(self, request, queryset, view):
"""Filter by `owner` query parameter."""
owner = request.query_params.get("owner")
if owner:
kwargs = {self.owner_prefix + "__username__iexact": owner}
return queryset.filter(**kwargs)
return queryset
# pylint: disable=too-few-public-methods
class DataFilter(ObjectPermissionsFilter):
"""Shared data filter."""
# pylint: disable=unused-argument
def filter_queryset(self, request, queryset, view):
"""Filter by ``XForm.shared_data = True`` for anonymous users."""
if request.user.is_anonymous:
return queryset.filter(Q(shared_data=True))
return queryset
class InstanceFilter(django_filter_filters.FilterSet):
"""
Instance FilterSet implemented using django-filter
"""
submitted_by__id = django_filter_filters.ModelChoiceFilter(
field_name="user",
queryset=User.objects.all(),
to_field_name="id",
)
submitted_by__username = django_filter_filters.ModelChoiceFilter(
field_name="user",
queryset=User.objects.all(),
to_field_name="username",
)
media_all_received = django_filter_filters.BooleanFilter()
# pylint: disable=missing-class-docstring
class Meta:
model = Instance
date_field_lookups = [
"exact",
"gt",
"lt",
"gte",
"lte",
"year",
"year__gt",
"year__lt",
"year__gte",
"year__lte",
"month",
"month__gt",
"month__lt",
"month__gte",
"month__lte",
"day",
"day__gt",
"day__lt",
"day__gte",
"day__lte",
]
generic_field_lookups = ["exact", "gt", "lt", "gte", "lte"]
fields = {
"date_created": date_field_lookups,
"date_modified": date_field_lookups,
"last_edited": date_field_lookups,
"media_all_received": ["exact"],
"status": ["exact"],
"survey_type__slug": ["exact"],
"user__id": ["exact"],
"user__username": ["exact"],
"uuid": ["exact"],
"version": generic_field_lookups,
}
# pylint: disable=too-few-public-methods
class ProjectOwnerFilter(filters.BaseFilterBackend):
"""Project `owner` filter."""
owner_prefix = "organization"
# pylint: disable=unused-argument
def filter_queryset(self, request, queryset, view):
"""Project `owner` filter."""
owner = request.query_params.get("owner")
if owner:
kwargs = {self.owner_prefix + "__username__iexact": owner}
return queryset.filter(**kwargs) | Project.objects.filter(
shared=True, deleted_at__isnull=True, **kwargs
)
return queryset
# pylint: disable=too-few-public-methods
class AnonUserProjectFilter(ObjectPermissionsFilter):
"""Anonymous user project filter."""
owner_prefix = "organization"
def filter_queryset(self, request, queryset, view):
"""
Anonymous user has no object permissions, return queryset as it is.
"""
user = request.user
project_id = view.kwargs.get(view.lookup_field)
if user.is_anonymous:
return queryset.filter(Q(shared=True))
if project_id:
int_or_parse_error(
project_id,
"Invalid value for project_id. It must be a positive integer.",
)
# check if project is public and return it
try:
project = queryset.get(id=project_id)
except ObjectDoesNotExist as non_existent_object:
raise Http404 from non_existent_object
if project.shared:
return queryset.filter(Q(id=project_id))
return super().filter_queryset(request, queryset, view)
# pylint: disable=too-few-public-methods
class TagFilter(filters.BaseFilterBackend):
"""Tag filter using the `tags` query parameter."""
# pylint: disable=unused-argument
def filter_queryset(self, request, queryset, view):
"""Tag filter using the `tags` query parameter."""
# filter by tags if available.
tags = request.query_params.get("tags", None)
if tags and isinstance(tags, six.string_types):
tags = tags.split(",")
return queryset.filter(tags__name__in=tags)
return queryset
# pylint: disable=too-few-public-methods
class XFormPermissionFilterMixin:
"""XForm permission filter."""
def _add_instance_prefix_to_dataview_filter_kwargs(self, filter_kwargs):
prefixed_filter_kwargs = {}
for kwarg in filter_kwargs:
prefixed_filter_kwargs["instance__" + kwarg] = filter_kwargs[kwarg]
return prefixed_filter_kwargs
def _xform_filter(self, request, view, keyword):
"""Use XForm permissions"""
xform = request.query_params.get("xform")
dataview = request.query_params.get("dataview")
merged_xform = request.query_params.get("merged_xform")
public_forms = XForm.objects.none()
dataview_kwargs = {}
if dataview:
int_or_parse_error(
dataview,
"Invalid value for dataview ID. It must be a positive integer."
)
self.dataview = get_object_or_404(DataView, pk=dataview)
# filter with fitlered dataset query
dataview_kwargs = self._add_instance_prefix_to_dataview_filter_kwargs(
get_filter_kwargs(self.dataview.query))
xform_qs = XForm.objects.filter(pk=self.dataview.xform.pk)
elif merged_xform:
int_or_parse_error(
merged_xform,
"Invalid value for Merged Dataset ID. It must be a positive integer.")
self.merged_xform = get_object_or_404(MergedXForm, pk=merged_xform)
xform_qs = self.merged_xform.xforms.all()
elif xform:
int_or_parse_error(
xform, "Invalid value for formid. It must be a positive integer."
)
self.xform = get_object_or_404(XForm, pk=xform)
xform_qs = XForm.objects.filter(pk=self.xform.pk)
public_forms = XForm.objects.filter(pk=self.xform.pk, shared_data=True)
else:
xform_qs = XForm.objects.all()
xform_qs = xform_qs.filter(deleted_at=None)
if request.user.is_anonymous:
xforms = xform_qs.filter(shared_data=True)
else:
xforms = super().filter_queryset(request, xform_qs, view) | public_forms
return {
**{f"{keyword}__in": xforms},
**dataview_kwargs
}
def _xform_filter_queryset(self, request, queryset, view, keyword):
kwarg = self._xform_filter(request, view, keyword)
return queryset.filter(**kwarg)
# pylint: disable=too-few-public-methods
class ProjectPermissionFilterMixin:
"""Project permission filter."""
def _project_filter(self, request, view, keyword):
project_id = request.query_params.get("project")
if project_id:
int_or_parse_error(
project_id,
"Invalid value for projectid. It must be a positive integer.",
)
project = get_object_or_404(Project, pk=project_id)
project_qs = Project.objects.filter(pk=project.id)
else:
project_qs = Project.objects.all()
projects = super().filter_queryset(request, project_qs, view)
return {f"{keyword}__in": projects}
def _project_filter_queryset(self, request, queryset, view, keyword):
"""Use Project Permissions"""
kwarg = self._project_filter(request, view, keyword)
return queryset.filter(**kwarg)
# pylint: disable=too-few-public-methods
class InstancePermissionFilterMixin:
"""Instance permission filter."""
# pylint: disable=too-many-locals
def _instance_filter(self, request, view, keyword):
instance_kwarg = {}
instance_content_type = ContentType.objects.get_for_model(Instance)
instance_kwarg["content_type"] = instance_content_type
instance_id = request.query_params.get("instance")
project_id = request.query_params.get("project")
xform_id = request.query_params.get("xform")
if instance_id and project_id and xform_id:
for object_id in [instance_id, project_id]:
int_or_parse_error(
object_id,
"Invalid value for instanceid. It must be a positive integer.",
)
instance = get_object_or_404(Instance, pk=instance_id)
# test if user has permissions on the project
if xform_id:
xform = get_object_or_404(XForm, pk=xform_id)
parent = xform.instances.filter(id=instance.id) and xform
else:
return {}
project = get_object_or_404(Project, pk=project_id)
project_qs = Project.objects.filter(pk=project.id)
if parent and parent.project == project:
projects = super().filter_queryset(request, project_qs, view)
instances = [instance.id] if projects else []
instance_kwarg[f"{keyword}__in"] = instances
return instance_kwarg
return {}
return instance_kwarg
def _instance_filter_queryset(self, request, queryset, view, keyword):
kwarg = self._instance_filter(request, view, keyword)
return queryset.filter(**kwarg)
# pylint: disable=too-few-public-methods
class RestServiceFilter(XFormPermissionFilterMixin, ObjectPermissionsFilter):
"""Rest service filter."""
def filter_queryset(self, request, queryset, view):
return self._xform_filter_queryset(request, queryset, view, "xform_id")
# pylint: disable=too-few-public-methods
class MetaDataFilter(
ProjectPermissionFilterMixin,
InstancePermissionFilterMixin,
XFormPermissionFilterMixin,
ObjectPermissionsFilter,
):
"""Meta data filter."""
def filter_queryset(self, request, queryset, view):
keyword = "object_id"
xform_id = request.query_params.get("xform")
project_id = request.query_params.get("project")
instance_id = request.query_params.get("instance")
# generate queries
xform_content_type = ContentType.objects.get_for_model(XForm)
xform_kwarg = self._xform_filter(request, view, keyword)
xform_kwarg["content_type"] = xform_content_type
project_content_type = ContentType.objects.get_for_model(Project)
project_kwarg = self._project_filter(request, view, keyword)
project_kwarg["content_type"] = project_content_type
instance_kwarg = self._instance_filter(request, view, keyword)
# return instance specific metadata
if instance_id:
return (
queryset.filter(Q(**instance_kwarg))
if (xform_id and instance_kwarg)
else []
)
if xform_id:
# return xform specific metadata
return queryset.filter(Q(**xform_kwarg))
# return project specific metadata
if project_id:
return queryset.filter(Q(**project_kwarg))
# return all project,instance and xform metadata information
return queryset.filter(
Q(**xform_kwarg) | Q(**project_kwarg) | Q(**instance_kwarg)
)
# pylint: disable=too-few-public-methods
class AttachmentFilter(XFormPermissionFilterMixin, ObjectPermissionsFilter):
"""Attachment filter."""
def filter_queryset(self, request, queryset, view):
queryset = self._xform_filter_queryset(
request, queryset, view, "instance__xform"
)
# Ensure queryset is filtered by XForm meta permissions
xform_ids = set(queryset.values_list("instance__xform", flat=True))
for xform_id in xform_ids:
xform = XForm.objects.get(id=xform_id)
user = request.user
queryset = exclude_items_from_queryset_using_xform_meta_perms(
xform, user, queryset
)
instance_id = request.query_params.get("instance")
if instance_id:
int_or_parse_error(
instance_id,
"Invalid value for instance_id. It must be a positive integer.",
)
instance = get_object_or_404(Instance, pk=instance_id)
queryset = queryset.filter(instance=instance)
return queryset
# pylint: disable=too-few-public-methods
class AttachmentTypeFilter(filters.BaseFilterBackend):
"""Attachment type filter using `type` query parameter."""
def filter_queryset(self, request, queryset, view):
attachment_type = request.query_params.get("type")
mime_types = MEDIA_FILE_TYPES.get(attachment_type)
if mime_types:
queryset = queryset.filter(mimetype__in=mime_types)
return queryset
# pylint: disable=too-few-public-methods
class TeamOrgFilter(filters.BaseFilterBackend):
"""Team organization filter using `org` query parameter"""
def filter_queryset(self, request, queryset, view):
org = request.data.get("org") or request.query_params.get("org")
# Get all the teams for the organization
if org:
kwargs = {"organization__username__iexact": org}
return Team.objects.filter(**kwargs)
return queryset
# pylint: disable=too-few-public-methods
class UserNoOrganizationsFilter(filters.BaseFilterBackend):
"""Filter by ``orgs`` query parameter."""
def filter_queryset(self, request, queryset, view):
"""Returns all users that are not organizations when `orgs=false`
query parameter"""
if str(request.query_params.get("orgs")).lower() == "false":
organization_user_ids = OrganizationProfile.objects.values_list(
"user__id", flat=True
)
queryset = queryset.exclude(id__in=organization_user_ids)
return queryset
# pylint: disable=too-few-public-methods
class OrganizationsSharedWithUserFilter(filters.BaseFilterBackend):
"""Filters by ``shared_with`` query parameter."""
def filter_queryset(self, request, queryset, view):
"""
This returns a queryset containing only organizations to which
the passed user belongs.
"""
username = request.query_params.get("shared_with")
if username:
try:
# The Team model extends the built-in Django Group model
# Groups a User belongs to are available as a queryset property
# of a User object, which this code takes advantage of
organization_user_ids = (
User.objects.get(username=username)
.groups.all()
.values_list("team__organization", flat=True)
.distinct()
)
filtered_queryset = queryset.filter(user_id__in=organization_user_ids)
return filtered_queryset
except ObjectDoesNotExist as non_existent_object:
raise Http404 from non_existent_object
return queryset
# pylint: disable=too-few-public-methods
class WidgetFilter(XFormPermissionFilterMixin, ObjectPermissionsFilter):
"""Filter to return forms shared with user."""
def filter_queryset(self, request, queryset, view):
"""Filter to return forms shared with user when ``view.action == "list"``."""
if view.action == "list":
# Return widgets from xform user has perms to
return self._xform_filter_queryset(request, queryset, view, "object_id")
return super().filter_queryset(request, queryset, view)
# pylint: disable=too-few-public-methods
class UserProfileFilter(filters.BaseFilterBackend):
"""Filter by the ``users`` query parameter."""
def filter_queryset(self, request, queryset, view):
"""Filter by the ``users`` query parameter - returns a queryset of only the
users in the users parameter when `view.action == "list"`"""
if view.action == "list":
users = request.GET.get("users")
if users:
users = users.split(",")
return queryset.filter(user__username__in=users)
if not request.user.is_anonymous:
return queryset.filter(user__username=request.user.username)
return queryset.none()
return queryset
# pylint: disable=too-few-public-methods
class NoteFilter(filters.BaseFilterBackend):
"""Notes filter by the query parameter ``instance``."""
def filter_queryset(self, request, queryset, view):
"""Notes filter by the query parameter ``instance``."""
instance_id = request.query_params.get("instance")
if instance_id:
int_or_parse_error(
instance_id,
"Invalid value for instance_id. It must be a positive integer",
)
instance = get_object_or_404(Instance, pk=instance_id)
queryset = queryset.filter(instance=instance)
return queryset
# pylint: disable=too-few-public-methods
class ExportFilter(XFormPermissionFilterMixin, ObjectPermissionsFilter):
"""
ExportFilter class uses permissions on the related xform to filter Export
querysets. Also filters submitted_by a specific user.
"""
def filter_queryset(self, request, queryset, view):
"""Filter by xform permissions and submitted by user."""
has_submitted_by_key = (
Q(options__has_key="query") & Q(options__query__has_key="_submitted_by"),
)
if request.user.is_anonymous:
return self._xform_filter_queryset(
request, queryset, view, "xform_id"
).exclude(*has_submitted_by_key)
public_xform_id = _public_xform_id_or_none(view.kwargs.get("pk"))
if public_xform_id:
form_exports = queryset.filter(xform_id=public_xform_id)
current_user_form_exports = (
form_exports.filter(*has_submitted_by_key)
.filter(options__query___submitted_by=request.user.username)
)
other_form_exports = form_exports.exclude(*has_submitted_by_key)
return current_user_form_exports | other_form_exports
old_perm_format = getattr(self, "perm_format")
# only if request.user has access to all data
# noqa pylint: disable=attribute-defined-outside-init
self.perm_format = old_perm_format + "_all"
all_qs = self._xform_filter_queryset(
request, queryset, view, "xform_id"
).exclude(*has_submitted_by_key)
# request.user has access to own submitted data
self.perm_format = old_perm_format + "_data"
submitter_qs = (
self._xform_filter_queryset(request, queryset, view, "xform_id")
.filter(*has_submitted_by_key)
.filter(options__query___submitted_by=request.user.username)
)
return all_qs | submitter_qs
# pylint: disable=too-few-public-methods
class PublicDatasetsFilter:
"""Public data set filter where the share attribute is True"""
# pylint: disable=unused-argument
def filter_queryset(self, request, queryset, view):
"""Return a queryset of shared=True data if the user is anonymous."""
if request and request.user.is_anonymous:
return queryset.filter(shared=True)
return queryset
|
986,079 | b0fe188b2a22fc804b5e21982d7302a8c9d29c0e | TOKEN = "" # Telegram Bot Token |
986,080 | 4d2ddcc0d91322e41567f25d7cbe5235a229299b | metadata = """
summary @ X.Org initialisation program
homepage @ http://xorg.freedesktop.org/
license @ custom
src_url @ http://xorg.freedesktop.org/archive/individual/app/xinit-$version.tar.bz2
arch @ ~x86_64
"""
depends = """
runtime @ x11-libs/libX11 x11-apps/xauth
"""
def prepare():
patch("06_move_serverauthfile_into_tmp.diff", level=1)
patch("fs25361.patch", level=1)
sed("-i -e 's/XSLASHGLOB.sh/XSLASHGLOB/' xinitrc.cpp")
def configure():
conf("--with-xinitdir=/etc/X11/xinit")
def install():
raw_install("DESTDIR=%s" % install_dir)
insdoc("COPYING")
|
986,081 | 32c306368c09eb0105c7ca750e8e879a1aadb1eb | # coding=UTF-8
from AES import AES
import numpy as np
key = 0x3220db6534d687f844c41b6de5a4c737
aes = AES(key, 1, 0)
inp_row = np.array([172,47,117,192,67,251,195,103,9,211,21,242,36,87,70,216])
cipher_text, trace = aes.encrypt(inp_row)
assert ([173,205,44,52,32,86,75,184,193,231,36,82,28,6,44,234] == cipher_text).all()
print("AES Test PASS")
|
986,082 | ce629ed32e2b925bf3f6a99fea22a684bf3152e2 | def canFinish(numCourses, prerequisites):
"""
:type numCourses: int
:type prerequisites: List[List[int]]
:rtype: bool
"""
graph = [[] for _ in range(numCourses)]
visited = [0] * numCourses
for course, pre in prerequisites:
graph[course].append(pre)
def dfs(i):
if visited[i] == -1:
return False
if visited[i] == 1:
return True
visited[i] = -1
for j in graph[i]:
if not dfs(j):
return False
visited[i] = 1
return True
for i in range(numCourses):
if not dfs(i):
return False
return True
numCourses = 2
prerequisites = [[1, 0]]
# 0 is a prerequisite of 1
print(canFinish(numCourses, prerequisites))
|
986,083 | f81917d172a0b222e1625607333f34fdcc3cced0 | from datetime import date
hoje = date.today().year
maior = 0
menor = 0
for c in range(0,7):
n = int(input('Digite a data de Nacimento: '))
idade = hoje - n
if idade >= 18:
maior += 1
else:
menor += 1
print(f'{maior} são Maiores de IDADE')
print(f'{menor} são Menores de IDADE') |
986,084 | 46bdf02509d3e53edc1e48593d9e775dfb2adda1 |
import os
import numpy as np
import pandas as pd
import cv2
import panZoom
DATAHOME = '/home/ctorney/data/tz-2017/'
CODEHOME = '/home/ctorney/workspace/wildGradient/'
inputname = CODEHOME + '/irMovieList.csv'
#dfMovies = pd.read_csv(inputname,index_col=0)
# initialize the list of points for the rectangle bbox,
# the temporaray endpoint of the drawing rectangle
# the list of all bounding boxes of selected rois
# and boolean indicating wether drawing of mouse
# is performed or not
rect_endpoint_tmp = []
rect_bbox = []
bbox_list_rois = []
drawing = False
def select_rois(img):
"""
Interactive select rectangle ROIs and store list of bboxes.
Parameters
----------
img :
image 3-dim.
Returns
-------
bbox_list_rois : list of list of int
List of bboxes of rectangle rois.
"""
# mouse callback function
def draw_rect_roi(event, x, y, flags, param):
# grab references to the global variables
global rect_bbox, rect_endpoint_tmp, drawing
# if the left mouse button was clicked, record the starting
# (x, y) coordinates and indicate that drawing is being
# performed. set rect_endpoint_tmp empty list.
if event == cv2.EVENT_LBUTTONDOWN:
rect_endpoint_tmp = []
rect_bbox = [(x, y)]
drawing = True
# check to see if the left mouse button was released
elif event == cv2.EVENT_LBUTTONUP:
# record the ending (x, y) coordinates and indicate that
# drawing operation is finished
rect_bbox.append((x, y))
drawing = False
# draw a rectangle around the region of interest
p_1, p_2 = rect_bbox
cv2.rectangle(img, p_1, p_2, color=(0, 255, 0),thickness=1)
cv2.imshow('image', img)
# for bbox find upper left and bottom right points
p_1x, p_1y = p_1
p_2x, p_2y = p_2
lx = min(p_1x, p_2x)
ty = min(p_1y, p_2y)
rx = max(p_1x, p_2x)
by = max(p_1y, p_2y)
# add bbox to list if both points are different
if (lx, ty) != (rx, by):
cx = 0.5*(lx+rx)/float(nx//2)
cy = 0.5*(by+ty)/float(ny//2)
wx = (rx -lx)/float(nx//2)
wy = (by -ty)/float(ny//2)
bbox = [cx, cy, wx, wy]
bbox_list_rois.append(bbox)
# if mouse is drawing set tmp rectangle endpoint to (x,y)
elif event == cv2.EVENT_MOUSEMOVE and drawing:
rect_endpoint_tmp = [(x, y)]
# clone image img and setup the mouse callback function
img_copy = img.copy()
cv2.namedWindow('image',cv2.WINDOW_NORMAL)
cv2.resizeWindow('image', 1600,1200)
cv2.setMouseCallback('image', draw_rect_roi)
# keep looping until the 'c' key is pressed
while True:
# display the image and wait for a keypress
if not drawing:
cv2.imshow('image', img)
elif drawing and rect_endpoint_tmp:
rect_cpy = img.copy()
start_point = rect_bbox[0]
end_point_tmp = rect_endpoint_tmp[0]
cv2.rectangle(rect_cpy, start_point, end_point_tmp,(0,255,0),1)
cv2.imshow('image', rect_cpy)
key = cv2.waitKey(1) & 0xFF
# if the 'c' key is pressed, break from the loop
if key == ord('c'):
break
# close all open windows
cv2.destroyAllWindows()
#cv2.waitKey(1)
return bbox_list_rois
imName = 'test.png'
frame = cv2.imread(imName)
ny,nx,_ = frame.shape
for x in range(2):
for y in range(2):
f = frame[y*ny//2:(y+1)*ny//2,x*nx//2:(x+1)*nx//2,:]
fr = f.copy()
rois = select_rois(fr)
outFile = os.path.splitext(os.path.basename(imName))[0] + str(x) + '_' + str(y) + '.txt'
outIm = os.path.splitext(os.path.basename(imName))[0] + str(x) + '_' + str(y) + '.png'
output = open(outFile, "w")
cv2.imwrite(outIm,f)
for roi in rois:
s = '0 ' + str(roi[0]) + ' ' + str(roi[1]) + ' ' + str(roi[2]) + ' ' + str(roi[3]) + '\n'
output.write(s)
output.close()
|
986,085 | 68f5fac3d032b0c54385057952aa3be7d4f4dac6 | # Cody Hancock | Student ID: #001087330
class Package:
def __init__(self,pkgID, pkgAddress, pkgCity, pkgState, pkgZip, pkgTime, pkgValue, pkgNote):
self.pkgID = pkgID
self.pkgAddress = pkgAddress
self.pkgCity = pkgCity
self.pkgState = pkgState
self.pkgZip = pkgZip
self.pkgTime = pkgTime
self.pkgValue = pkgValue
self.pkgNote = pkgNote
self.pkgStatus = "At hub"
self.timeDelivered = "n/a"
self.timeLeftHub = "n/a"
@property
def timeLeftHub(self):
return self.__timeLeftHub
@timeLeftHub.setter
def timeLeftHub(self,timeLeftHub):
self.__timeLeftHub = timeLeftHub
@property
def timeDelivered(self):
return self.__timeDelivered
@timeDelivered.setter
def timeDelivered(self,deliveryTime):
self.__timeDelivered = deliveryTime
@property
def pkgStatus(self):
return self.__pkgStatus
@pkgStatus.setter
def pkgStatus(self,status):
self.__pkgStatus = status
@property
def pkgID(self):
return self.__pkgID
@pkgID.setter
def pkgID(self,pkgID):
self.__pkgID = pkgID
@property
def pkgAddress(self):
return self.__pkgAddress
@pkgAddress.setter
def pkgAddress(self, pkgAddress):
self.__pkgAddress = pkgAddress
@property
def pkgCity(self):
return self.__pkgCity
@pkgCity.setter
def pkgCity(self, pkgCity):
self.__pkgCity = pkgCity
@property
def pkgState(self):
return self.__pkgState
@pkgState.setter
def pkgState(self, pkgState):
self.__pkgState = pkgState
@property
def pkgZip(self):
return self.__pkgZip
@pkgZip.setter
def pkgZip(self, pkgZip):
self.__pkgZip = pkgZip
@property
def pkgTime(self):
return self.__pkgTime
@pkgTime.setter
def pkgTime(self, pkgTime):
self.__pkgTime = pkgTime
@property
def pkgValue(self):
return self.__pkgValue
@pkgValue.setter
def pkgValue(self,pkgValue):
self.__pkgValue = pkgValue
@property
def pkgNote(self):
return self.__pkgNote
@pkgNote.setter
def pkgNote(self,pkgNote):
self.__pkgNote = pkgNote
|
986,086 | 03a0bc619da080e137df3870bc346dd6e062f6f4 | '''
Perform basic database operations (CRUD)
'''
def insert(connection, table : str):
with connection.cursor() as cursor:
sql = 'INSERT INTO {} (text) VALUES (%s)'.format(table)
cursor.execute(sql, ('test'))
connection.commit()
def update(connection, table : str, record : int):
with connection.cursor() as cursor:
sql = 'UPDATE {} SET {} = %s WHERE id = %s'.format(table, 'text')
cursor.execute(sql, ('test-test', record))
connection.commit()
def select(connection, table : str):
with connection.cursor() as cursor:
sql = 'SELECT * FROM {}'.format(table)
cursor.execute(sql)
result = cursor.fetchall()
return result
def delete(connection, table : str, record : int):
with connection.cursor() as cursor:
sql = 'DELETE FROM {} WHERE id = %s'.format(table, 'text')
cursor.execute(sql, (record))
connection.commit()
|
986,087 | f0cc78b282101ea3701e47a7c64b197c54dee153 | """
Form Validation Module
"""
# local Django
from app.modules.validation.extensions import *
from app.modules.validation.validator import Validator
from app.modules.validation.sanitizer import Sanitizer
from app.exceptions.sanitization_rule_not_found import Sanitization_Rule_Not_Found
from app.exceptions.validation_rule_not_found import Validation_Rule_Not_Found
class Form():
__inputs = {}
__errors = {}
__vstatus = False
__sstatus = False
__validator = None
__sanitizer = None
__sanitizers = []
__validators = []
def __init__(self, inputs={}):
self.__inputs = inputs
self.__validator = Validator()
self.__sanitizer = Sanitizer()
def add_inputs(self, inputs={}):
self.__inputs = inputs
def get_inputs(self):
return self.__inputs
def get_input_value(self, input_key, sanitized=True):
return self.__inputs[input_key]["value"] if not sanitized or not "svalue" in self.__inputs[input_key] else self.__inputs[input_key]["svalue"]
def get_errors(self, with_type = False):
if with_type:
errors = []
for input_key, error_list in self.__errors.items():
for error in error_list:
errors.append({"type": "error", "message": error})
return errors
else:
return self.__errors
def is_passed(self):
for input in self.__inputs:
if len(self.__errors[input]) > 0:
return False
return True
def get_vstatus(self):
return self._vstatus
def get_sstatus(self):
return self._sstatus
def process(self, direction=['sanitize', 'validate']):
if direction[0] == 'sanitize':
if 'sanitize' in direction:
self.__sanitize()
if 'validate' in direction:
self.__validate()
else:
if 'validate' in direction:
self.__validate()
if 'sanitize' in direction:
self.__sanitize()
def add_validator(self, val_instance):
self.__validators.append(val_instance)
def add_sanitizer(self, san_instance):
self.__sanitizers.append(san_instance)
def __validate(self):
status = True
for current_input, validation_rule in self.__inputs.items():
self.__validator.set_input(self.__inputs[current_input]['value'])
if 'validate' in validation_rule:
self.__errors[current_input] = []
for rule_name, rule_args in validation_rule['validate'].items():
self.__update_validator(rule_name)
# Check if param exist and pass them to the method
if 'param' in rule_args.keys() and len(rule_args['param']) > 0:
current_status = getattr(self.__validator, rule_name)(*rule_args['param'])
else:
current_status = getattr(self.__validator, rule_name)()
if "optional" in validation_rule['validate'] and self.__inputs[current_input]['value'] == "":
current_status = True
self.__inputs[current_input]['status'] = current_status
status &= current_status
if not current_status and 'error' in rule_args.keys():
self.__errors[current_input].append(rule_args['error'])
self.__vstatus = status
return status
def __sanitize(self):
status = True
for current_input, sanitization_rule in self.__inputs.items():
self.__sanitizer.set_input(self.__inputs[current_input]['value'])
self.__sanitizer.set_sinput(None)
if 'sanitize' in sanitization_rule:
for rule_name, rule_args in sanitization_rule['sanitize'].items():
self.__update_sanitizer(rule_name)
# Check if param provided and pass them to the method
if 'param' in rule_args.keys() and len(rule_args['param']) > 0:
sanitized_value = getattr(self.__sanitizer, rule_name)(*rule_args['param'])
else:
sanitized_value = getattr(self.__sanitizer, rule_name)()
self.__inputs[current_input]['svalue'] = sanitized_value
self.__inputs[current_input]['is_exact'] = True if self.__inputs[current_input]['value'] == self.__sanitizer.get_sinput() else False
status &= self.__inputs[current_input]['is_exact']
self.__sstatus = status
return status
def __update_validator(self, rule_name):
if hasattr(self.__validator, rule_name):
return True
for validator in self.__validators:
if hasattr(validator, rule_name):
self.__validator = validator
return True
raise Validation_Rule_Not_Found('Non existent validation rule %s' % rule_name)
def __update_sanitizer(self, rule_name):
if hasattr(self.__sanitizer, rule_name):
if self.__sanitizer.get_sinput() is None:
self.__sanitizer.set_input(self.__sanitizer.get_input())
self.__sanitizer.set_sinput(None)
else:
self.__sanitizer.set_input(self.__sanitizer.get_sinput())
return True
for sanitizer in self.__sanitizers:
if hasattr(sanitizer, rule_name):
if self.__sanitizer.get_sinput() is None:
sanitizer.set_input(self.__sanitizer.get_input())
sanitizer.set_sinput(None)
else:
sanitizer.set_input(self.__sanitizer.get_sinput())
self.__sanitizer = sanitizer
return True
raise Sanitization_Rule_Not_Found('Non existent sanitization rule %s' % rule_name) |
986,088 | 20055f6c21daa6a4e073f6bbbb166d976362715a | #ReverseLookup\
def reverseLookup(data, value):
keys = []
for key in data:
if data[key] == value:
keys.append(key)
return keys
def main():
antonyms = {"sad" : "happy", "sad" : "cheerful", "left" : "right", "stop" : "go", "sweet" : "sour"}
print("The antonym for 'left' is: ", reverseLookup(antonyms, "left"))
print("[right]")
print("The antonyms for 'sad' are: ", reverseLookup(antonyms, "sad"))
print("[happy, cheerful]")
print("The antonym for 'stop' is: ", reverseLookup(antonyms, "stop"))
print("[go]")
print("The antonym for 'kevin' is: "), reverseLookup(antonyms, "kevin")
print(" [--]")
if __name__ == "__main__":
main() |
986,089 | 6d6bf59888b28e765212aa53f4e1587b518d35a6 | from django.db import models
# Create your models here.
class TodoItem(models.Model):
content = models.CharField(max_length=1024)
created_time = models.DateTimeField(auto_now_add=True)
finished_time = models.DateTimeField(auto_now=True)
finished = models.BooleanField()
def __unicode__(self):
return "created_time: " + self.created_time + "\n" + \
"finised: " + self.finished + "\n" + \
"finished_time: " + self.finished_time + "\n" + \
"content: " + self.content
|
986,090 | 68ebaf50f15af44e2b78aebd2902201c140d688a | import pytest
from api_test_utils.api_test_session_config import APITestSessionConfig
from api_test_utils import poll_until, PollTimeoutError
from api_test_utils.api_session_client import APISessionClient
@pytest.fixture
def api_test_config() -> APITestSessionConfig:
yield APITestSessionConfig(base_uri="https://httpbin.org")
@pytest.mark.asyncio
async def test_wait_for_poll_does_timeout(api_client: APISessionClient):
with pytest.raises(PollTimeoutError) as exec_info:
await poll_until(lambda: api_client.get('status/404'), timeout=1, sleep_for=0.3)
error = exec_info.value # type: PollTimeoutError
assert len(error.responses) > 0
assert error.responses[0][0] == 404
@pytest.mark.asyncio
async def test_wait_for_200_bytes(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('bytes/100'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/octet-stream'
assert isinstance(body, bytes)
@pytest.mark.asyncio
async def test_wait_for_200_json(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('json'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/json'
assert body['slideshow']['title'] == 'Sample Slide Show'
@pytest.mark.asyncio
async def test_wait_for_200_html(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('html'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'text/html'
assert isinstance(body, str)
assert body.startswith('<!DOCTYPE html>')
@pytest.mark.asyncio
async def test_wait_for_200_json_gzip(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('gzip'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/json'
assert body['gzipped'] is True
@pytest.mark.asyncio
async def test_wait_for_200_json_deflate(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('deflate'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/json'
assert body['deflated'] is True
@pytest.mark.skip('we probably do not need brotli support just yet, but if we do .. add brotlipy')
@pytest.mark.asyncio
async def test_wait_for_200_json_brotli(api_client: APISessionClient):
responses = await poll_until(lambda: api_client.get('brotli'), timeout=5)
assert len(responses) == 1
status, headers, body = responses[0]
assert status == 200
assert headers.get('Content-Type').split(';')[0] == 'application/json'
assert body['brotli'] is True
|
986,091 | 22ffbf4d38a8cc81e19b3535ccec8f6e8b570a4f | from django.contrib.auth import get_user_model
from django.db import models
from creator.ingest_runs.common.model import IngestProcess
from creator.files.models import Version
DELIMITER = "-"
NAME_PREFIX = "INGEST_RUN"
INGEST_QUEUE_NAME = "ingest"
User = get_user_model()
class IngestRun(IngestProcess):
"""
Request to ingest file(s) into a target data service
"""
class Meta(IngestProcess.Meta):
permissions = [
("list_all_ingestrun", "Show all ingest_runs"),
("cancel_ingestrun", "Cancel an ingest_run"),
]
name = models.TextField(
blank=True,
null=True,
help_text=(
"The name of the ingest run. Autopopulated on save with the "
"concatenation of the IngestRun's file version IDs"
),
)
versions = models.ManyToManyField(
Version,
related_name="ingest_runs",
help_text="List of files to ingest in the ingest run",
)
@property
def study(self):
if self.versions.count() > 0:
return self.versions.first().root_file.study
def compute_name(self):
"""
Compute the name from the IngestRun's file version ids
"""
version_id_str = DELIMITER.join(
sorted(v.kf_id for v in self.versions.all())
)
return DELIMITER.join([NAME_PREFIX, version_id_str])
def __str__(self):
if self.name:
return self.name
else:
return str(self.id)
|
986,092 | bc39c2c13968dbaba59680ab20d270b32327b42f | import webapp2
from numpy import *
from math import *
from google.appengine.ext import db
from models import *
from classification import *
class Test(webapp2.RequestHandler):
"""
Script to test classification algorithm
"""
def get(self):
self.response.headers['Content-Type'] = 'text/plain'
self.w_vector = ClassWeights.get_recent_weights()
all_patients = Patient.all()
self.response.write('Testing classificaton algorithm...\n')
for p in all_patients:
data = get_feature_matrix(p)
if data is not None:
prob = classify(data, self.w_vector)
self.response.write(p.first_name + ' ' + p.last_name + ':\n')
self.response.write(prob)
self.response.write('\n')
APPLICATION = webapp2.WSGIApplication([
('/test', Test),
], debug=True) |
986,093 | b4ef03c2d9a50ae6acc4de18036533f4ba3f9eea | # vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import re
import Logger
import Packager
from Component import (PkgUninstallComponent, PkgInstallComponent,
ComponentBase, RuntimeComponent)
from Util import (DB,
get_host_ip,
execute_template)
from Exceptions import (StartException, StopException,
StatusException, RestartException)
from Shell import (execute)
from Trace import (TraceReader,
IN_TRACE)
LOG = Logger.getLogger("install.db")
TYPE = DB
MYSQL = 'mysql'
DB_ACTIONS = {
MYSQL: {
#hopefully these are distro independent, these should be since they are invoking system init scripts
'start': ["service", "mysql", 'start'],
'stop': ["service", 'mysql', "stop"],
'status': ["service", 'mysql', "status"],
'restart': ["service", 'mysql', "status"],
#
'create_db': ['mysql', '--user=%USER%', '--password=%PASSWORD%', '-e', 'CREATE DATABASE %DB%;'],
'drop_db': ['mysql', '--user=%USER%', '--password=%PASSWORD%', '-e', 'DROP DATABASE IF EXISTS %DB%;'],
'grant_all': [
"mysql",
"--user=%USER%",
"--password=%PASSWORD%",
"-e \"GRANT ALL PRIVILEGES ON *.* TO '%USER%'@'%' identified by '%PASSWORD%';\"",
],
#we could do this in python directly, but executing allows us to not have to sudo the whole program
'host_adjust': ['perl', '-p', '-i', '-e'] + ["'s/127.0.0.1/0.0.0.0/g'", '/etc/mysql/my.cnf'],
},
}
BASE_ERROR = 'Currently we do not know how to %s for database type [%s]'
class DBUninstaller(PkgUninstallComponent):
def __init__(self, *args, **kargs):
PkgUninstallComponent.__init__(self, TYPE, *args, **kargs)
class DBInstaller(PkgInstallComponent):
def __init__(self, *args, **kargs):
PkgInstallComponent.__init__(self, TYPE, *args, **kargs)
self.runtime = DBRuntime(*args, **kargs)
def _get_download_location(self):
return (None, None)
def _get_param_map(self, fn=None):
#this dictionary will be used for parameter replacement
#in pre-install and post-install sections
out = dict()
out['PASSWORD'] = self.cfg.getpw("passwords", "sql")
out['BOOT_START'] = str(True).lower()
out['USER'] = self.cfg.get("db", "sql_user")
hostip = get_host_ip(self.cfg)
out['SERVICE_HOST'] = hostip
out['HOST_IP'] = hostip
return out
def install(self):
pres = PkgInstallComponent.install(self)
#extra actions to ensure we are granted access
dbtype = self.cfg.get("db", "type")
dbactions = DB_ACTIONS.get(dbtype)
if(dbactions and dbactions.get('grant_all')):
#update the DB to give user 'USER'@'%' full control of the all databases:
grant_cmd = dbactions.get('grant_all')
params = self._get_param_map()
cmds = list()
cmds.append({
'cmd': grant_cmd,
'run_as_root': False,
})
#shell seems to be needed here
#since python escapes this to much...
execute_template(*cmds, params=params, shell=True)
#special mysql actions
if(dbactions and dbtype == MYSQL):
cmd = dbactions.get('host_adjust')
if(cmd):
execute(*cmd, run_as_root=True, shell=True)
#restart it to make sure all good
self.runtime.restart()
return pres
class DBRuntime(ComponentBase, RuntimeComponent):
def __init__(self, *args, **kargs):
ComponentBase.__init__(self, TYPE, *args, **kargs)
self.tracereader = TraceReader(self.tracedir, IN_TRACE)
def _gettypeactions(self, act, exception_cls):
pkgsinstalled = self.tracereader.packages_installed()
if(len(pkgsinstalled) == 0):
msg = "Can not %s %s since it was not installed" % (act, TYPE)
raise exception_cls(msg)
#figure out how to do it
dbtype = self.cfg.get("db", "type")
typeactions = DB_ACTIONS.get(dbtype)
if(typeactions == None or not typeactions.get(act)):
msg = BASE_ERROR % (act, dbtype)
raise NotImplementedError(msg)
return typeactions.get(act)
def start(self):
if(self.status().find('start') == -1):
startcmd = self._gettypeactions('start', StartException)
execute(*startcmd, run_as_root=True)
return None
def stop(self):
if(self.status().find('stop') == -1):
stopcmd = self._gettypeactions('stop', StopException)
execute(*stopcmd, run_as_root=True)
return None
def restart(self):
restartcmd = self._gettypeactions('restart', RestartException)
execute(*restartcmd, run_as_root=True)
return None
def status(self):
statuscmd = self._gettypeactions('status', StatusException)
(sysout, stderr) = execute(*statuscmd, run_as_root=True)
return sysout.strip()
def drop_db(cfg, dbname):
dbtype = cfg.get("db", "type")
dbactions = DB_ACTIONS.get(dbtype)
if(dbactions and dbactions.get('drop_db')):
dropcmd = dbactions.get('drop_db')
params = dict()
params['PASSWORD'] = cfg.getpw("passwords", "sql")
params['USER'] = cfg.get("db", "sql_user")
params['DB'] = dbname
cmds = list()
cmds.append({
'cmd': dropcmd,
'run_as_root': False,
})
execute_template(*cmds, params=params)
else:
msg = BASE_ERROR % ('drop', dbtype)
raise NotImplementedError(msg)
def create_db(cfg, dbname):
dbtype = cfg.get("db", "type")
dbactions = DB_ACTIONS.get(dbtype)
if(dbactions and dbactions.get('create_db')):
createcmd = dbactions.get('create_db')
params = dict()
params['PASSWORD'] = cfg.getpw("passwords", "sql")
params['USER'] = cfg.get("db", "sql_user")
params['DB'] = dbname
cmds = list()
cmds.append({
'cmd': createcmd,
'run_as_root': False,
})
execute_template(*cmds, params=params)
else:
msg = BASE_ERROR % ('create', dbtype)
raise NotImplementedError(msg)
|
986,094 | d77e3e7687cce527f6b63672f27de0df55e03c64 | from django.db import models
from django.contrib.auth.models import User
class Medico(models.Model):
ESPECIALIDADES = (
('Cardiologista', 'Cardiologista'),
('Clínico Geral', 'Clínico Geral'),
('Dermatologista', 'Dermatologista'),
('Ortopedista', 'Ortopedista'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
crm = models.CharField(max_length=15, null=True)
endereco = models.CharField(max_length=100, null=True)
email = models.CharField(max_length=100, null=True)
fone = models.CharField(max_length=20, null=True)
especialidade = models.CharField(max_length=50, choices=ESPECIALIDADES, null=True)
status = models.BooleanField(default=True)
@property
def get_name(self):
return self.user.first_name + " " + self.user.last_name
@property
def get_id(self):
return self.user.id
def __str__(self):
return "{} ({})".format(self.user.first_name, self.especialidade)
class Paciente(models.Model):
user = models.OneToOneField(User, on_delete=models.CASCADE, null=True)
cpf = models.CharField(max_length=15, null=True)
endereco = models.CharField(max_length=40, null=True)
email = models.CharField(max_length=100, null=True)
fone = models.CharField(max_length=20, null=True)
sintomas = models.CharField(max_length=100, null=True)
status = models.BooleanField(default=True)
@property
def get_name(self):
return self.user.first_name + " " + self.user.last_name
@property
def get_id(self):
return self.user.id
def __str__(self):
return self.user.first_name + " (" + self.sintomas + ")"
class Consulta(models.Model):
id_paciente = models.PositiveIntegerField(null=True)
id_medico = models.PositiveIntegerField(null=True)
nome_paciente = models.CharField(max_length=40, null=True)
nome_medico = models.CharField(max_length=40, null=True)
data = models.DateTimeField(null=True)
descricao = models.TextField(max_length=500, null=True)
status = models.BooleanField(default=True)
|
986,095 | 428991ebcfe5288e9e62b2371a899331075dd133 | class Peca():
def __init__(self,x,y):
self.x = x
self.y = y
self.w = 100
self.h = 100
self.dx = 0 |
986,096 | 43ea1ed104990b0980e98af462b6999bc0facd1d | # pylint: disable=C0103
# pylint: disable=C0111
# pylint: disable=C0301
import argparse
import json
import sys
import os
import threading
import logging
import time
import curses
import ConfigParser
from shutil import rmtree
from shutil import move
from getpass import getpass
from time import sleep
from pprint import pformat
from jobs.Transfer import Transfer
from jobs.Ncclimo import Climo
from jobs.Timeseries import Timeseries
from jobs.AMWGDiagnostic import AMWGDiagnostic
from jobs.CoupledDiagnostic import CoupledDiagnostic
from jobs.JobStatus import JobStatus
from lib.Monitor import Monitor
from lib.YearSet import YearSet
from lib.YearSet import SetStatus
from lib.mailer import Mailer
from lib.util import *
parser = argparse.ArgumentParser()
parser.add_argument('-c', '--config', help='Path to configuration file.')
parser.add_argument('-v', '--debug', help='Run in debug mode.', action='store_true')
parser.add_argument('-d', '--daemon', help='Run in daemon mode.', action='store_true')
parser.add_argument('-n', '--no-ui', help='Turn off the GUI.', action='store_true')
parser.add_argument('-r', '--dry-run', help='Do all setup, but dont submit jobs.', action='store_true')
parser.add_argument('-l', '--log', help='Path to logging output file.')
parser.add_argument('-u', '--no-cleanup', help='Don\'t perform pre or post run cleanup. This will leave all run scripts in place.', action='store_true')
parser.add_argument('-m', '--no-monitor', help='Don\'t run the remote monitor or move any files over globus.', action='store_true')
parser.add_argument('-V', '--viewer', help='Turn on generation for output_viewer style web pages.', action='store_true')
parser.add_argument('-s', '--size', help='The maximume size in gigabytes of a single transfer, defaults to 100. Must be larger then the largest single file.')
if not os.environ.get('NCARG_ROOT'):
print 'No NCARG_ROOT found in environment variables, is NCL installed on the machine? Check /usr/local/src/NCL-6.3.0/'
sys.exit()
def setup(parser):
"""
Setup the config, file_list, and job_sets variables from either the config file passed from the parser
of from the previously saved state
"""
global debug
global config
global file_list
global job_sets
global from_saved_state
args = parser.parse_args()
if args.debug:
debug = True
print_message('Running in debug mode', 'ok')
# read through the config file and setup the config dict
config = {}
if not args.config:
parser.print_help()
sys.exit()
else:
try:
confParse = ConfigParser.ConfigParser()
confParse.read(args.config)
for section in confParse.sections():
config[section] = {}
for option in confParse.options(section):
opt = confParse.get(section, option)
if not opt:
if 'pass' in option and not args.no_monitor:
opt = getpass('>> ' + option + ': ')
else:
opt = raw_input('>> ' + option + ': ')
if opt.startswith('[') or opt.startswith('{'):
opt = json.loads(opt)
config[section][option] = opt
except Exception as e:
msg = 'Unable to read config file, is it properly formatted json?'
print_message(msg)
print_debug(e)
return -1
if args.no_ui:
config['global']['ui'] = False
else:
debug = False
config['global']['ui'] = True
if args.dry_run:
config['global']['dry_run'] = True
else:
config['global']['dry_run'] = False
if args.no_cleanup:
config['global']['no_cleanup'] = True
else:
config['global']['no_cleanup'] = False
if args.no_monitor:
config['global']['no_monitor'] = True
print "Turning off remote monitoring"
else:
config['global']['no_monitor'] = False
if args.size:
config['transfer']['size'] = args.size
else:
config['transfer']['size'] = 100
if args.viewer:
print 'Turning on output_viewer mode'
config['global']['viewer'] = True
else:
config['global']['viewer'] = False
# setup config for file type directories
for key, val in config.get('global').get('output_patterns').items():
new_dir = os.path.join(
config['global']['data_cache_path'],
key)
if not os.path.exists(new_dir):
os.makedirs(new_dir)
if val == 'mpaso.hist.am.timeSeriesStatsMonthly':
config['global']['mpas_dir'] = new_dir
elif val == 'mpascice.hist.am.timeSeriesStatsMonthly':
config['global']['mpas_cice_dir'] = new_dir
elif val == 'cam.h0':
config['global']['atm_dir'] = new_dir
elif val == 'mpaso.rst.0':
config['global']['mpas_rst_dir'] = new_dir
elif val == 'rpointer':
config['global']['rpt_dir'] = new_dir
elif val == 'mpas-o_in':
config['global']['mpas_o-in_dir'] = new_dir
elif val == 'mpas-cice_in':
config['global']['mpas_cice-in_dir'] = new_dir
elif 'stream' in val:
config['global']['streams_dir'] = new_dir
if not os.path.exists(config['global']['output_path']):
os.makedirs(config['global']['output_path'])
if not os.path.exists(config['global']['data_cache_path']):
os.makedirs(config['global']['data_cache_path'])
# setup run_scipts_path
config['global']['run_scripts_path'] = os.path.join(
config['global']['output_path'],
'run_scripts')
# setup tmp_path
config['global']['tmp_path'] = os.path.join(
config['global']['output_path'],
'tmp')
# setup logging
if args.log:
log_path = args.log
else:
log_path = os.path.join(
config.get('global').get('output_path'),
'workflow.log')
logging.basicConfig(
format='%(asctime)s:%(levelname)s: %(message)s',
datefmt='%m/%d/%Y %I:%M:%S %p',
filename=log_path,
filemode='w',
level=logging.DEBUG)
endpoints = [config['transfer']['source_endpoint'], config['transfer']['destination_endpoint']]
if not setup_globus(endpoints):
return -1
print 'Globus setup complete'
return config
def add_jobs(year_set):
"""
Initializes and adds all the jobs to the year_set
"""
# each required job is a key, the value is if its in the job list already or not
# this is here in case the jobs have already been added
run_coupled = 'coupled_diag' not in config.get('global').get('set_jobs', True)
patterns = config.get('global').get('output_patterns')
if not patterns.get('STREAMS') or \
not patterns.get('MPAS_AM') or \
not patterns.get('MPAS_O_IN') or \
not patterns.get('MPAS_CICE_IN'):
run_coupled = True
required_jobs = {
'climo': 'ncclimo' not in config.get('global').get('set_jobs', True),
'timeseries': 'timeseries' not in config.get('global').get('set_jobs', True),
'uvcmetrics': 'uvcmetrics' not in config.get('global').get('set_jobs', True),
'coupled_diagnostic': run_coupled,
'amwg_diagnostic': 'amwg' not in config.get('global').get('set_jobs', True)
}
year_set_str = 'year_set_{}'.format(year_set.set_number)
dataset_name = '{time}_{set}_{start}_{end}'.format(
time=time.strftime("%d-%m-%Y"),
set=year_set.set_number,
start=year_set.set_start_year,
end=year_set.set_end_year)
# create a temp directory full of just symlinks to the regridded output we need for this diagnostic job
diag_temp_dir = os.path.join(
config.get('global').get('tmp_path'),
'diag',
year_set_str)
if not os.path.exists(diag_temp_dir):
os.makedirs(diag_temp_dir)
for job in year_set.jobs:
if not required_jobs[job.get_type()]:
required_jobs[job.get_type()] = True
# create a temp directory, and fill it with symlinks to the actual data
key_list = []
for year in range(year_set.set_start_year, year_set.set_end_year + 1):
for month in range(1, 13):
key_list.append('{0}-{1}'.format(year, month))
climo_file_list = [file_name_list['ATM'].get(x) for x in key_list if file_name_list['ATM'].get(x)]
climo_temp_dir = os.path.join(config['global']['tmp_path'], 'climo', year_set_str)
create_symlink_dir(
src_dir=config.get('global').get('atm_dir'),
src_list=climo_file_list,
dst=climo_temp_dir)
g_config = config.get('global')
# first initialize the climo job
if not required_jobs['climo']:
required_jobs['climo'] = True
climo_output_dir = os.path.join(
config.get('global').get('output_path'),
'climo')
if not os.path.exists(climo_output_dir):
os.makedirs(climo_output_dir)
regrid_output_dir = os.path.join(
config.get('global').get('output_path'),
'regrid')
if not os.path.exists(regrid_output_dir):
os.makedirs(regrid_output_dir)
# create the configuration object for the climo job
climo_config = {
'run_scripts_path': config.get('global').get('run_scripts_path'),
'start_year': year_set.set_start_year,
'end_year': year_set.set_end_year,
'caseId': config.get('global').get('experiment'),
'annual_mode': 'sdd',
'regrid_map_path': config.get('ncclimo').get('regrid_map_path'),
'input_directory': climo_temp_dir,
'climo_output_directory': climo_output_dir,
'regrid_output_directory': regrid_output_dir,
'year_set': year_set.set_number,
'ncclimo_path': config.get('ncclimo').get('ncclimo_path'),
}
climo = Climo(climo_config, event_list=event_list)
msg = 'Adding Ncclimo job to the job list: {}'.format(str(climo))
logging.info(msg)
year_set.add_job(climo)
if not required_jobs['timeseries']:
required_jobs['timeseries'] = True
timeseries_output_dir = os.path.join(
config.get('global').get('output_path'),
'timeseries',
'year_set_{}'.format(year_set.set_number))
if not os.path.exists(timeseries_output_dir):
msg = 'Creating timeseries output directory'
logging.info(msg)
os.makedirs(timeseries_output_dir)
# create temp directory of symlinks to history files
# we can reuse the input directory for the climo generation
timeseries_config = {
'run_scripts_path': config.get('global').get('run_scripts_path'),
'annual_mode': 'sdd',
'caseId': config.get('global').get('experiment'),
'year_set': year_set.set_number,
'var_list': config.get('ncclimo').get('var_list'),
'start_year': year_set.set_start_year,
'end_year': year_set.set_end_year,
'input_directory': climo_temp_dir,
'output_directory': timeseries_output_dir,
}
timeseries = Timeseries(timeseries_config, event_list=event_list)
timeseries.depends_on = []
msg = 'Adding Timeseries job to the job list: {}'.format(str(timeseries))
logging.info(msg)
year_set.add_job(timeseries)
if not required_jobs['coupled_diagnostic']:
required_jobs['coupled_diagnostic'] = True
coupled_project_dir = os.path.join(
config.get('global').get('output_path'),
'coupled_diags',
'year_set_' + str(year_set.set_number))
if not os.path.exists(coupled_project_dir):
os.makedirs(coupled_project_dir)
host_prefix = os.path.join(
config.get('global').get('img_host_server'),
config.get('coupled_diags').get('host_prefix'))
c_config = config.get('coupled_diags')
coupled_diag_config = {
'rpt_dir': g_config.get('rpt_dir'),
'mpas_regions_file': g_config.get('mpas_regions_file'),
'run_scripts_path': config.get('global').get('run_scripts_path'),
'output_base_dir': coupled_project_dir,
'mpas_am_dir': g_config.get('mpas_dir'),
'mpas_cice_dir': g_config.get('mpas_cice_dir'),
'mpas_cice_in_dir': g_config.get('mpas_cice-in_dir'),
'mpas_o_dir': g_config.get('mpas_o-in_dir'),
'mpas_rst_dir': g_config.get('mpas_rst_dir'),
'streams_dir': g_config.get('streams_dir'),
'host_prefix': host_prefix,
'host_directory': c_config.get('host_directory'),
'run_id': config.get('global').get('run_id'),
'dataset_name': dataset_name,
'year_set': year_set.set_number,
'climo_tmp_dir': climo_temp_dir,
'regrid_path': regrid_output_dir,
'start_year': year_set.set_start_year,
'end_year': year_set.set_end_year,
'nco_path': config.get('ncclimo').get('ncclimo_path'),
'coupled_project_dir': coupled_project_dir,
'test_casename': g_config.get('experiment'),
'test_native_res': c_config.get('test_native_res'),
'test_archive_dir': diag_temp_dir,
'test_begin_yr_climo': year_set.set_start_year,
'test_end_yr_climo': year_set.set_end_year,
'test_begin_yr_ts': year_set.set_start_year,
'test_end_yr_ts': year_set.set_end_year,
'ref_case': c_config.get('ref_case'),
'ref_archive_dir': c_config.get('ref_archive_dir'),
'mpas_meshfile': c_config.get('mpas_meshfile'),
'mpas_remapfile': c_config.get('mpas_remapfile'),
'pop_remapfile': c_config.get('pop_remapfile'),
'remap_files_dir': c_config.get('remap_files_dir'),
'GPCP_regrid_wgt_file': c_config.get('gpcp_regrid_wgt_file'),
'CERES_EBAF_regrid_wgt_file': c_config.get('ceres_ebaf_regrid_wgt_file'),
'ERS_regrid_wgt_file': c_config.get('ers_regrid_wgt_file'),
'coupled_diags_home': c_config.get('coupled_diags_home'),
'coupled_template_path': os.path.join(os.path.abspath(os.path.dirname(__file__)), 'resources', 'run_AIMS_template.csh'),
'rendered_output_path': os.path.join(coupled_project_dir, 'run_AIMS.csh'),
'obs_ocndir': c_config.get('obs_ocndir'),
'obs_seaicedir': c_config.get('obs_seaicedir'),
'obs_sstdir': c_config.get('obs_sstdir'),
'depends_on': [], # 'climo'
'yr_offset': c_config.get('yr_offset')
}
coupled_diag = CoupledDiagnostic(coupled_diag_config, event_list)
msg = 'Adding CoupledDiagnostic job to the job list: {}'.format(str(coupled_diag))
logging.info(msg)
year_set.add_job(coupled_diag)
if not required_jobs['amwg_diagnostic']:
required_jobs['amwg_diagnostic'] = True
amwg_project_dir = os.path.join(
config.get('global').get('output_path'),
'amwg_diags',
'year_set_{}'.format(year_set.set_number))
if not os.path.exists(amwg_project_dir):
os.makedirs(amwg_project_dir)
host_prefix = os.path.join(
config.get('global').get('img_host_server'),
config.get('amwg').get('host_prefix'))
amwg_temp_dir = os.path.join(config['global']['tmp_path'], 'amwg', year_set_str)
if not os.path.exists(diag_temp_dir):
os.makedirs(diag_temp_dir)
template_path = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'resources', 'amwg_template.csh')
amwg_config = {
'run_scripts_path': config.get('global').get('run_scripts_path'),
'run_id': config.get('global').get('run_id'),
'host_directory': config.get('amwg').get('host_directory'),
'host_prefix': host_prefix,
'dataset_name': dataset_name,
'diag_home': config.get('amwg').get('diag_home'),
'test_path': amwg_project_dir + os.sep,
'test_casename': g_config.get('experiment'),
'test_path_history': climo_temp_dir + os.sep,
'regrided_climo_path': regrid_output_dir,
'test_path_climo': amwg_temp_dir,
'test_path_diag': amwg_project_dir,
'start_year': year_set.set_start_year,
'end_year': year_set.set_end_year,
'year_set': year_set.set_number,
'run_directory': amwg_project_dir,
'template_path': template_path,
'depends_on': ['climo']
}
amwg_diag = AMWGDiagnostic(amwg_config, event_list)
msg = 'Adding AMWGDiagnostic job to the job list: {}'.format(str(amwg_config))
logging.info(msg)
year_set.add_job(amwg_diag)
return year_set
def monitor_check(monitor, config, file_list, event_list, display_event):
"""
Check the remote directory for new files that match the given pattern,
if there are any new files, create new transfer jobs. If they're in a new job_set,
spawn the jobs for that set.
inputs:
monitor: a monitor object setup with a remote directory and an SSH session
"""
global job_sets
global active_transfers
global transfer_list
# if there are already three or more transfers in progress
# hold off on starting any new ones until they complete
if active_transfers >= 2:
return
event_list = push_event(event_list, "Running check for remote files")
monitor.check()
new_files = monitor.new_files
patterns = config.get('global').get('output_patterns')
for file_info in new_files:
for folder, file_type in patterns.items():
if file_type in file_info['filename']:
file_info['type'] = folder
break
checked_new_files = []
for new_file in new_files:
file_type = new_file.get('type')
if not file_type:
event_list = push_event(event_list, "Failed accessing remote directory, do you have access permissions?")
continue
file_key = ""
if file_type in ['ATM', 'MPAS_AM', 'MPAS_CICE', 'MPAS_RST']:
file_key = filename_to_file_list_key(new_file['filename'])
elif file_type == 'MPAS_CICE_IN':
file_key = 'mpas-cice_in'
elif file_type == 'MPAS_O_IN':
file_key = 'mpas-o_in'
elif file_type == 'STREAMS':
file_key = 'streams.cice' if 'cice' in new_file['filename'] else 'streams.ocean'
elif file_type == 'RPT':
if 'ocn' in new_file['filename']:
file_key = 'rpointer.ocn'
elif 'atm' in new_file['filename']:
file_key = 'rpointer.atm'
else:
continue
try:
status = file_list[file_type][file_key]
except KeyError:
continue
if not status:
continue
if status == SetStatus.DATA_READY:
local_path = os.path.join(
config.get('global').get('data_cache_path'),
new_file['type'],
new_file['filename'].split('/')[-1])
if not os.path.exists(local_path):
checked_new_files.append(new_file)
continue
if not int(os.path.getsize(local_path)) == int(new_file['size']):
os.remove(local_path)
checked_new_files.append(new_file)
if status == SetStatus.NO_DATA:
checked_new_files.append(new_file)
# if there are any new files
if not checked_new_files:
# print 'no new files'
return
else:
# print pformat(checked_new_files)
pass
# find which year set the data belongs to
frequencies = config.get('global').get('set_frequency')
for file_info in checked_new_files:
if file_info['type'] != 'ATM':
continue
for freq in frequencies:
year_set = filename_to_year_set(file_info['filename'], freq)
for job_set in job_sets:
if job_set.set_number == year_set and job_set.status == SetStatus.NO_DATA:
job_set.status = SetStatus.PARTIAL_DATA
# Spawn jobs for that yearset
job_set = add_jobs(job_set)
t_config = config.get('transfer')
g_config = config.get('global')
m_config = config.get('monitor')
transfer_config = {
'size': t_config.get('size'),
'file_list': checked_new_files,
'globus_username': t_config.get('globus_username'),
'globus_password': t_config.get('globus_password'),
'source_username': m_config.get('compute_username'),
'source_password': m_config.get('compute_password'),
'destination_username': t_config.get('processing_username'),
'destination_password': t_config.get('processing_password'),
'source_endpoint': t_config.get('source_endpoint'),
'destination_endpoint': t_config.get('destination_endpoint'),
'source_path': t_config.get('source_path'),
'destination_path': g_config.get('data_cache_path') + '/',
'recursive': 'False',
'pattern': config.get('global').get('output_patterns'),
'ncclimo_path': config.get('ncclimo').get('ncclimo_path')
}
# Check if the user is logged in, and all endpoints are active
endpoints = [config['transfer']['source_endpoint'], config['transfer']['destination_endpoint']]
client = get_client()
for endpoint in endpoints:
r = client.endpoint_autoactivate(endpoint, if_expires_in=3600)
if r["code"] == "AutoActivationFailed":
display_event.set()
sleep(3)
while not setup_globus(endpoints):
sleep(1)
display_event.clear()
diaplay_thread = threading.Thread(target=start_display, args=(config, display_event))
diaplay_thread.start()
transfer = Transfer(transfer_config, event_list)
for item in transfer.config.get('file_list'):
item_name = item['filename'].split('/').pop()
item_type = item['type']
if item_type in ['ATM', 'MPAS_AM']:
file_key = filename_to_file_list_key(item_name)
elif item_type == 'MPAS_CICE':
file_key = 'mpas-cice_in'
elif item_type == 'MPAS_O':
file_key = 'mpas-o_in'
elif item_type == 'MPAS_RST':
file_key = '0002-01-01'
elif item_type == 'RPT':
file_key = 'rpointer.ocn' if 'ocn' in item_name else 'rpointer.atm'
elif item_type == 'STREAMS':
file_key == 'streams.cice' if 'cice' in item_name else 'streams.ocean'
file_list[item_type][file_key] = SetStatus.IN_TRANSIT
start_file = transfer.config.get('file_list')[0]['filename']
end_file = transfer.config.get('file_list')[-1]['filename']
index = start_file.find('-')
start_readable = start_file[index - 4: index + 3]
index = end_file.find('-')
end_readable = end_file[index - 4: index + 3]
message = 'Found {0} new remote files, creating transfer job from {1} to {2}'.format(
len(checked_new_files),
start_readable,
end_readable)
event_list = push_event(event_list, message)
logging.info('## ' + message)
if not config.get('global').get('dry_run', False):
while True:
try:
thread = threading.Thread(target=handle_transfer, args=(transfer, checked_new_files, thread_kill_event, event_list))
except:
sleep(1)
else:
thread_list.append(thread)
thread.start()
break
def handle_transfer(transfer_job, f_list, event, event_list):
global active_transfers
"""
Wrapper around the transfer.execute() method, ment to be run inside a thread
inputs:
transfer_job: the transfer job to execute and monitor
f_list: the list of files being transfered
event: a thread event to handle shutting down from keyboard exception, not used in this case
but it needs to be there for any threads handlers
"""
active_transfers += 1
# start the transfer job
transfer_job.execute(event, event_list)
# the transfer is complete, so we can decrement the active_transfers counter
active_transfers -= 1
if transfer_job.status != JobStatus.COMPLETED:
print_message("File transfer failed")
message = "## Transfer {uuid} has failed".format(uuid=transfer_job.uuid)
logging.error(message)
event_list = push_event(event_list, 'Tranfer failed')
return
else:
message = "## Transfer {uuid} has completed".format(uuid=transfer_job.uuid)
logging.info(message)
for item in transfer_job.config['file_list']:
item_name = item['filename'].split('/').pop()
item_type = item['type']
if item_type in ['ATM', 'MPAS_AM', 'MPAS_RST']:
file_key = filename_to_file_list_key(item_name)
elif item_type == 'MPAS_CICE':
file_key = 'mpas-cice_in'
elif item_type == 'MPAS_O':
file_key = 'mpas-o_in'
elif item_type == 'STREAMS':
file_key == 'streams.cice' if 'cice' in item_name else 'streams.ocean'
elif item_type == 'RPT':
file_key = 'rpointer.ocn' if 'ocn' in item_name else 'rpointer.atm'
file_list[item_type][file_key] = SetStatus.DATA_READY
def is_all_done():
"""
Check if all job_sets are done, and all processing has been completed
"""
for job_set in job_sets:
if job_set.status != SetStatus.COMPLETED:
return False
return True
def cleanup():
"""
Clean up temp files created during the run
"""
if config.get('global').get('no_cleanup'):
return
logging.info('Cleaning up temp directories')
try:
tmp_path = config.get('global').get('tmp_path')
if os.path.exists(tmp_path):
rmtree(tmp_path)
except Exception as e:
logging.error(format_debug(e))
print_message('Error removing temp directories')
try:
archive_path = os.path.join(
config.get('global').get('output_path'),
'script_archive',
time.strftime("%Y-%m-%d-%I-%M"))
if not os.path.exists(archive_path):
os.makedirs(archive_path)
run_script_path = config.get('global').get('run_scripts_path')
if os.path.exists(run_script_path):
move(run_script_path, archive_path)
except Exception as e:
logging.error(format_debug(e))
logging.error('Error archiving run_scripts directory')
def xy_check(x, y, hmax, wmax):
if y >= hmax or x >= wmax:
return -1
else:
return 0
def write_line(pad, line, x, y, color):
try:
pad.addstr(y, x, line, color)
except:
pass
def display(stdscr, event, config):
"""
Display current execution status via curses
"""
initializing = True
height, width = stdscr.getmaxyx()
hmax = height - 3
wmax = width - 5
spinner = ['\\', '|', '/', '-']
spin_index = 0
spin_len = 4
try:
stdscr.nodelay(True)
curses.curs_set(0)
curses.init_pair(1, curses.COLOR_BLACK, curses.COLOR_WHITE)
curses.init_pair(2, curses.COLOR_GREEN, curses.COLOR_WHITE)
curses.init_pair(3, curses.COLOR_RED, curses.COLOR_BLACK)
curses.init_pair(4, curses.COLOR_WHITE, curses.COLOR_BLACK)
curses.init_pair(5, curses.COLOR_GREEN, curses.COLOR_BLACK)
curses.init_pair(6, curses.COLOR_MAGENTA, curses.COLOR_BLACK)
curses.init_pair(7, curses.COLOR_YELLOW, curses.COLOR_BLACK)
curses.init_pair(8, curses.COLOR_BLACK, curses.COLOR_BLACK)
stdscr.bkgd(curses.color_pair(8))
pad = curses.newpad(hmax, wmax)
last_y = 0
while True:
c = stdscr.getch()
if c == curses.KEY_RESIZE:
height, width = stdscr.getmaxyx()
hmax = height - 3
wmax = width - 5
pad.resize(hmax, wmax)
elif c == ord('w'):
config['global']['ui'] = False
pad.clear()
del pad
curses.endwin()
return
if len(job_sets) == 0:
sleep(1)
continue
pad.clrtobot()
y = 0
x = 0
for year_set in job_sets:
line = 'Year_set {num}: {start} - {end}'.format(
num=year_set.set_number,
start=year_set.set_start_year,
end=year_set.set_end_year)
#pad.addstr(y, x, line, curses.color_pair(1))
write_line(pad, line, x, y, curses.color_pair(1))
pad.clrtoeol()
y += 1
# if xy_check(x, y, hmax, wmax) == -1:
# sleep(1)
# break
color_pair = curses.color_pair(4)
if year_set.status == SetStatus.COMPLETED:
color_pair = curses.color_pair(5)
elif year_set.status == SetStatus.FAILED:
color_pair = curses.color_pair(3)
elif year_set.status == SetStatus.RUNNING:
color_pair = curses.color_pair(6)
line = 'status: {status}'.format(
status=year_set.status)
#pad.addstr(y, x, line, color_pair)
write_line(pad, line, x, y, color_pair)
if initializing:
sleep(0.01)
pad.refresh(0, 0, 3, 5, hmax, wmax)
pad.clrtoeol()
y += 1
# if xy_check(x, y, hmax, wmax) == -1:
# sleep(1)
# break
# if y >= (hmax/3):
# last_y = y
# y = 0
# x += (wmax/2)
# if x >= wmax:
# break
if year_set.status == SetStatus.COMPLETED \
or year_set.status == SetStatus.NO_DATA \
or year_set.status == SetStatus.PARTIAL_DATA:
continue
for job in year_set.jobs:
line = ' > {type} -- {id} '.format(
type=job.get_type(),
id=job.job_id)
# pad.addstr(y, x, line, curses.color_pair(4))
write_line(pad, line, x, y, curses.color_pair(4))
color_pair = curses.color_pair(4)
if job.status == JobStatus.COMPLETED:
color_pair = curses.color_pair(5)
elif job.status in [JobStatus.FAILED, 'CANCELED', JobStatus.INVALID]:
color_pair = curses.color_pair(3)
elif job.status == JobStatus.RUNNING:
color_pair = curses.color_pair(6)
elif job.status == JobStatus.SUBMITTED or job.status == JobStatus.PENDING:
color_pair = curses.color_pair(7)
line = '{status}'.format(status=job.status)
pad.addstr(line, color_pair)
pad.clrtoeol()
if initializing:
sleep(0.01)
pad.refresh(0, 0, 3, 5, hmax, wmax)
y += 1
# if y >= (hmax/3):
# last_y = y
# y = 0
# x += (wmax/2)
# if x >= wmax:
# break
x = 0
if last_y:
y = last_y
# pad.refresh(0, 0, 3, 5, hmax, wmax)
pad.clrtobot()
y += 1
# if xy_check(x, y, hmax, wmax) == -1:
# sleep(1)
# continue
for line in event_list[-10:]:
if 'Transfer' in line:
continue
if 'hosted' in line:
continue
if 'failed' in line or 'FAILED' in line:
prefix = '[-] '
pad.addstr(y, x, prefix, curses.color_pair(3))
else:
prefix = '[+] '
pad.addstr(y, x, prefix, curses.color_pair(5))
pad.addstr(line, curses.color_pair(4))
pad.clrtoeol()
if initializing:
sleep(0.01)
pad.refresh(0, 0, 3, 5, hmax, wmax)
#pad.refresh(0, 0, 3, 5, hmax, wmax)
y += 1
if xy_check(x, y, hmax, wmax) == -1:
sleep(1)
break
pad.clrtobot()
y += 1
if xy_check(x, y, hmax, wmax) == -1:
sleep(1)
continue
file_start_y = y
file_end_y = y
file_display_list = []
current_year = 1
year_ready = True
partial_data = False
# for line in sorted(file_list, cmp=file_list_cmp):
# index = line.find('-')
# year = int(line[:index])
# month = int(line[index + 1:])
# if month == 1:
# year_ready = True
# partial_data = False
# if file_list[line] != SetStatus.DATA_READY:
# year_ready = False
# else:
# partial_data = True
# if month == 12:
# if year_ready:
# status = SetStatus.DATA_READY
# else:
# if partial_data:
# status = SetStatus.PARTIAL_DATA
# else:
# status = SetStatus.NO_DATA
# file_display_list.append('Year {year} - {status}'.format(
# year=year,
# status=status))
# line_length = len(file_display_list[0])
# num_cols = wmax/line_length
# for line in file_display_list:
# if x + len(line) >= wmax:
# diff = wmax - (x + len(line))
# line = line[:diff]
# pad.addstr(y, x, line, curses.color_pair(4))
# pad.clrtoeol()
# y += 1
# if y >= (hmax-10):
# y = file_start_y
# x += line_length + 5
# if x >= wmax:
# break
# if y > file_end_y:
# file_end_y = y
y = file_end_y + 1
x = 0
msg = 'Active transfers: {}'.format(active_transfers)
pad.addstr(y, x, msg, curses.color_pair(4))
pad.clrtoeol()
if active_transfers:
for line in event_list:
if 'Transfer' in line:
index = line.find('%')
if index:
s_index = line.rfind(' ', 0, index)
percent = float(line[s_index: index])
if percent < 100:
y += 1
pad.addstr(y, x, line, curses.color_pair(4))
pad.clrtoeol()
for line in event_list:
if 'hosted' in line:
y += 1
pad.addstr(y, x, line, curses.color_pair(4))
spin_line = spinner[spin_index]
spin_index += 1
if spin_index == spin_len:
spin_index = 0
y += 1
pad.addstr(y, x, spin_line, curses.color_pair(4))
pad.clrtoeol()
pad.clrtobot()
y += 1
if event and event.is_set():
return
pad.refresh(0, 0, 3, 5, hmax, wmax)
initializing = False
sleep(1)
except KeyboardInterrupt as e:
raise
def sigwinch_handler(n, frame):
curses.endwin()
curses.initscr()
def start_display(config, event):
try:
curses.wrapper(display, event, config)
except KeyboardInterrupt as e:
return
if __name__ == "__main__":
# A list of all the expected files
file_list = {}
# A list of all the file names
file_name_list = {}
# Describes the state of each job jet
job_sets = []
# The master configuration object
config = {}
# A list of all the threads
thread_list = []
# An event to kill the threads on terminal exception
thread_kill_event = threading.Event()
display_event = threading.Event()
debug = False
from_saved_state = False
# The number of active globus transfers
active_transfers = 0
# A flag to tell if we have all the data locally
all_data = False
# Read in parameters from config
config = setup(parser)
# A list of strings for holding display info
event_list = []
# A list of files that have been transfered
transfer_list = []
state = []
if config == -1:
print "Error in setup, exiting"
sys.exit(1)
# check that all netCDF files exist
path_exists(config)
# cleanup any temp directories from previous runs
cleanup()
if not os.path.exists(config['global']['run_scripts_path']):
os.makedirs(config['global']['run_scripts_path'])
if not os.path.exists(config['global']['tmp_path']):
os.makedirs(config['global']['tmp_path'])
if config.get('global').get('ui', False):
try:
sys.stdout.write('Turning on the display')
for i in range(8):
sys.stdout.write('.')
sys.stdout.flush()
sleep(0.1)
print '\n'
diaplay_thread = threading.Thread(target=start_display, args=(config, display_event))
diaplay_thread.start()
except KeyboardInterrupt as e:
print 'keyboard'
display_event.set()
sys.exit()
# compute number of expected year_sets
sim_start_year = int(config.get('global').get('simulation_start_year'))
sim_end_year = int(config.get('global').get('simulation_end_year'))
number_of_sim_years = sim_end_year - (sim_start_year - 1)
frequencies = config.get('global').get('set_frequency')
if not from_saved_state:
job_sets = []
line = 'Initializing year sets'
event_list = push_event(event_list, line)
for freq in frequencies:
freq = int(freq)
year_set = number_of_sim_years / freq
# initialize the job_sets dict
for i in range(1, year_set + 1):
set_start_year = sim_start_year + ((i - 1) * freq)
set_end_year = set_start_year + freq - 1
new_set = YearSet(
set_number=len(job_sets) + 1,
start_year=set_start_year,
end_year=set_end_year)
job_sets.append(new_set)
# initialize the file_list
line = 'Initializing file list'
event_list = push_event(event_list, line)
for key, val in config.get('global').get('output_patterns').items():
file_list[key] = {}
file_name_list[key] = {}
if key in ['ATM', 'MPAS_AM', 'MPAS_CICE']:
for year in range(1, number_of_sim_years + 1):
for month in range(1, 13):
file_key = str(year) + '-' + str(month)
file_list[key][file_key] = SetStatus.NO_DATA
file_name_list[key][file_key] = ''
elif key == 'MPAS_CICE_IN':
file_list[key]['mpas-cice_in'] = SetStatus.NO_DATA
elif key == 'MPAS_O_IN':
file_list[key]['mpas-o_in'] = SetStatus.NO_DATA
elif key == 'RPT':
file_list[key]['rpointer.ocn'] = SetStatus.NO_DATA
file_list[key]['rpointer.atm'] = SetStatus.NO_DATA
elif key == 'MPAS_RST':
for year in range(2, number_of_sim_years + 1):
file_key = '{year}-1'.format(year=year)
file_list[key][file_key] = SetStatus.NO_DATA
elif key == 'STREAMS':
file_list[key]['streams.ocean'] = SetStatus.NO_DATA
file_list[key]['streams.cice'] = SetStatus.NO_DATA
# Check for any data already on the System
all_data = check_for_inplace_data(
file_list=file_list,
file_name_list=file_name_list,
job_sets=job_sets,
config=config)
check_year_sets(
job_sets=job_sets,
file_list=file_list,
sim_start_year=config.get('global').get('simulation_start_year'),
sim_end_year=config.get('global').get('simulation_end_year'),
debug=debug,
add_jobs=add_jobs)
state_path = os.path.join(
config.get('global').get('output_path'),
'run_state.txt')
if config.get('global').get('dry_run', False):
event_list = push_event(event_list, 'Running in dry-run mode')
write_human_state(event_list, job_sets, state_path)
if not config.get('global').get('no-ui', False):
sleep(50)
display_event.set()
for t in thread_list:
thread_kill_event.set()
t.join()
sys.exit()
if all_data:
# print_message('All data is local, disabling remote monitor', 'ok')
line = 'All data is local, disabling remote monitor'
event_list = push_event(event_list, line)
else:
# print_message('More data needed, enabling remote monitor', 'ok')
line = 'More data needed, enabling remote monitor'
event_list = push_event(event_list, line)
# If all the data is local, dont start the monitor
if all_data or config.get('global').get('no_monitor', False):
monitor = None
else:
output_pattern = config.get('global').get('output_patterns')
patterns = [v for k, v in config.get('global').get('output_patterns').items()]
monitor_config = {
'source_endpoint': config.get('transfer').get('source_endpoint'),
'remote_dir': config.get('transfer').get('source_path'),
'username': config.get('monitor').get('compute_username'),
'patterns': patterns,
'file_list': file_list
}
monitor = Monitor(monitor_config)
if not monitor:
line = 'error setting up monitor'
event_list = push_event(event_list, line)
sys.exit()
line = 'Attempting connection to {}'.format(config.get('monitor').get('source_endpoint'))
event_list = push_event(event_list, line)
status, message = monitor.connect()
event_list = push_event(event_list, message)
if not status:
line = "Unable to connect to globus service, exiting"
logging.error(line)
event_list = push_event(event_list, line)
sleep(4)
display_event.set()
for t in thread_list:
thread_kill_event.set()
t.join()
sleep(1)
print line
sleep(1)
sys.exit(1)
else:
line = 'Connected'
logging.info(line)
event_list = push_event(event_list, line)
# Main loop
try:
loop_count = 6
while True:
# only check the monitor once a minute, but check for jobs every loop
if monitor and \
not all_data and \
not config.get('global').get('no_monitor', False) and \
loop_count >= 6:
monitor_check(monitor, config, file_list, event_list, display_event)
loop_count = 0
all_data = check_for_inplace_data(
file_list=file_list,
file_name_list=file_name_list,
job_sets=job_sets,
config=config)
check_year_sets(
job_sets=job_sets,
file_list=file_list,
sim_start_year=config.get('global').get('simulation_start_year'),
sim_end_year=config.get('global').get('simulation_end_year'),
debug=debug,
add_jobs=add_jobs)
start_ready_job_sets(
job_sets=job_sets,
thread_list=thread_list,
debug=debug,
event=thread_kill_event,
upload_config=config.get('upload_diagnostic'),
event_list=event_list)
write_human_state(event_list, job_sets, state_path)
if is_all_done():
if not config.get('global').get('no-cleanup', False):
cleanup()
message = ' ---- All processing complete ----'
emailaddr = config.get('global').get('email')
if emailaddr:
try:
msg = '''
Processing job {id} has completed successfully
You can view your diagnostic output here:\n'''.format(
id= config.get('global').get('run_id'))
for event in event_list:
if 'hosted' in event:
msg += event + '\n'
m = Mailer(src=emailaddr, dst=emailaddr)
m.send(
status=message,
msg=msg)
except Exception as e:
logging.error(format_debug(e))
event_list = push_event(event_list, message)
sleep(5)
display_event.set()
sleep(2)
print_message(message, 'ok')
logging.info("## All processes complete")
sys.exit(0)
sleep(10)
loop_count += 1
except KeyboardInterrupt as e:
print_message('----- KEYBOARD INTERUPT -----')
print_message('cleaning up threads', 'ok')
display_event.set()
for t in thread_list:
thread_kill_event.set()
t.join()
|
986,097 | 984a31ef042251aee044a9ac8e696e9bfd527629 | # maze using Q-learning algorithm.
import numpy as np
from maze_recursive_division import generateMaze
def init(t_nrow, t_ncol, t_nAction=4):
r"""
| DOWN Right LEFT UP
--------|----------------------------------------
(0, 0) |
(0, 1) |
(., .) |
"""
nState = t_nrow * t_ncol
# initialize Q table
Qtable = np.zeros((nState, t_nAction), dtype=np.float32)
# an array for "state_idx --> pixel position"
States = np.empty((nState,2), np.int16)
for i in range(t_nrow):
for j in range(t_ncol):
index = i * t_ncol + j
States[index] = i, j
# an array for "choice --> position increment"
movements = np.array([[+1,0], [0,+1], [0,-1], [-1,0]], np.int16)
# an array for "maze pixel value --> reward"
rewards = np.array([-100,-1000,+1000,+1], np.int16)
return Qtable, movements, States, rewards
def move(QTable_row, t_pos, t_movements, eps=0.9):
r"""
modify t_pos and return choice
"""
# if all 4 value in a row are all 0, then pick choice randomly
if (QTable_row[:] == 0.).all(): # in this case it seems that eps-greedy is unnecessary
choice = np.random.randint(low=0, high=QTable_row[:].size, size=1)[0]
# else pick the one with largest QTable value
else:
choice = QTable_row[:].argmax()
# modify position
t_pos += t_movements[choice, :]
return choice
def playAGame(t_Param, t_Qtable, t_Movements, t_States, t_Rewards, t_Maze, t_line=None, t_point=None):
r"""
| y x
--------|-----------------
start | 1 0
end | -2 -1
|environment reward
--------|-----------------------------
barrier | 0 -100
start | 1 -1000
end | 2 +1000
road | 3 +1
"""
# start from the position next to the entrance of maze.
pos = np.array([1,1], np.int16)
# a list to memorize history step with maximum memory length of 2
path = [0,0]
# update plot
if t_line is not None and t_point is not None:
xdata = [pos[1],]; ydata = [pos[0],]
t_line.set_xdata(xdata); t_line.set_ydata(ydata)
t_point.set_xdata([pos[1],]); t_point.set_ydata(pos[0,])
#t_line.figure.canvas.draw()
plt.pause(0.01)
for k in range(t_Param["nStep_Max"]):
# calculate current state index
state_idx = t_Param["ncol"] * pos[0] + pos[1]
# modify history
path.append( state_idx ); path.remove( path[0] )
# update current position , and then return choice
choice = move(t_Qtable[state_idx, :], pos, t_Movements)
# update plot
if t_line is not None and t_point is not None:
xdata.append(pos[1]); ydata.append(pos[0])
t_line.set_xdata(xdata); t_line.set_ydata(ydata)
t_point.set_xdata([pos[1],]); t_point.set_ydata(pos[0,])
#t_line.figure.canvas.draw()
plt.pause(0.01)
# calculate new state index
state_idx_new = t_Param["ncol"] * pos[0] + pos[1]
#print(f"[{pos[0]:>2d}, {pos[1]:2d}]", end=" ")
# get environment; based on the new position, get reward
env = t_Maze[pos[0], pos[1]]
# if is turning back, punish
if state_idx_new in path:
R = -2
# get reward from the Maze pixel value of the new state
else:
R = t_Rewards[ env ]
# update Qtable
try:
t_Qtable[state_idx,choice] = (1-Param["alpha"]) * t_Qtable[state_idx,choice] + \
Param["alpha"] * (R + Param["gamma"] * t_Qtable[state_idx_new, :].max())
except IndexError:
print(pos[0],pos[1])
break
# whether game over
if env != 3:
break
step = k+1
# if reach maximum nStep, set env to 4
if step == t_Param["nStep_Max"]:
env = 4
return env, step, tuple(pos)
if __name__ == "__main__":
Param = {
"nrow": 5*3,
"ncol": 5*3,
"nGame": 100,
"nStep_Max": 100,
"alpha": 0.8,
"gamma": 0.2
}
# generate Maze
Maze= generateMaze(Param["nrow"]//3, Param["ncol"]//3, seed=24)
# Display the image
from matplotlib import pyplot as plt
plt.imshow(Maze, interpolation='none')
line, = plt.plot([], [], "-", linewidth=1, color="red")
point, = plt.plot([], [], "o", linewidth=1, color="red")
plt.pause(0.1)
#print(set(Maze.reshape(-1).tolist()))
Qtable, Movements, States, Rewards = init(Param["nrow"], Param["ncol"], t_nAction=4)
nstep_old = 0
for g in range(Param["nGame"]):
#break # turn off learning
env, step_collapse, final_pos = playAGame(Param, Qtable, Movements,
States, Rewards, Maze, t_line=line, t_point=point)
# print out the final state, collapsed step and final position
print(env, step_collapse, final_pos)
# if reaching goal with a constant number of step, quit learning.
if env == 2 and step_collapse == nstep_old:
break
else:
nstep_old = step_collapse
plt.show()
|
986,098 | 2c9512e8261e201d8d201e4d0d1cbbdf63ecdeb0 | def isValid(s):
# Write your code here
freq_dict, is_valid = {}, False
# Calculate frequencies of characters
for char in s:
if char not in freq_dict:
freq_dict[char] = 1
elif char in freq_dict:
freq_dict[char] += 1
# Determine if string is valid
frequency_values = list(freq_dict.values())
is_valid = all(x == frequency_values[0] for x in frequency_values)
if is_valid:
return "YES"
else:
highest_frequency_character = max(freq_dict, key=freq_dict.get)
freq_dict[highest_frequency_character] -= 1
frequency_values = list(freq_dict.values())
is_valid = all(x == frequency_values[0] for x in frequency_values)
return "YES" if is_valid else "NO" |
986,099 | 3fc0e0bb9fb3d98d87b77d05b00016c5baa00cae | from django.contrib import admin
from models import Tipo_grupo_especialidad, Grupo_especialidad, Especialidad
from models import Doctor, Doctor_horario, Paciente
from models import Cita
# from models import Tipo_grupo, Tipo, Doctor, Paciente
class DoctorAdmin(admin.ModelAdmin):
list_display = ('nombres', 'apellidos', 'telefono_fijo', 'telefono_celular')
# admin.site.register(Tipo_grupo)
# admin.site.register(Tipo)
admin.site.register(Paciente)
admin.site.register(Tipo_grupo_especialidad)
admin.site.register(Grupo_especialidad)
admin.site.register(Especialidad)
admin.site.register(Doctor, DoctorAdmin)
admin.site.register(Doctor_horario)
admin.site.register(Cita) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.