max_stars_repo_path stringlengths 4 286 | max_stars_repo_name stringlengths 5 119 | max_stars_count int64 0 191k | id stringlengths 1 7 | content stringlengths 6 1.03M | content_cleaned stringlengths 6 1.03M | language stringclasses 111 values | language_score float64 0.03 1 | comments stringlengths 0 556k | edu_score float64 0.32 5.03 | edu_int_score int64 0 5 |
|---|---|---|---|---|---|---|---|---|---|---|
utils/NNspecifications.py | webclinic017/time-series-pipeline | 3 | 6623751 | <filename>utils/NNspecifications.py
import tensorflow as tf
from tensorflow import keras as k
from keras_tuner import HyperModel
from matplotlib import pyplot as plt
class NNmodel(HyperModel):
def __init__(self, input_shape, num_classes):
self.input_shape = input_shape
self.num_classes = num_classes
def build(self, hp):
# Hyperparameter search space
learning_rate = hp.Float(
"learning_rate",
min_value=1e-6,
max_value=1e-4,
default=5e-5,
sampling="linear",
)
optimizer = hp.Choice("optimizer", values=["adam", "adagrad"])
# activation_i=hp.Choice('hidden_activation_i',values=['relu', 'tanh', 'softmax'],default='relu')
clipnorm = hp.Float("clipnorm", min_value=0.5, max_value=10.0, default=1.0)
clipvalue = hp.Float("clipvalue", min_value=0.1, max_value=0.3, default=0.2)
# # Initial hidden layers
units_i = hp.Int(
"units_i", min_value=10, max_value=100, default=15, sampling="linear"
)
batch_norm = hp.Boolean("bacht_norm")
# activation_i=hp.Choice('hidden_activation_i',values=['relu', 'tanh', 'softmax'],default='relu')
# l2regularization_i= hp.Float('l2regularization_i',min_value=0.0001,max_value=0.1,sampling='log')
# gaussianNoise_i= hp.Float('gaussianNoise_i',min_value=0.001,max_value=2,sampling='log')
# # Intermediate hidden layers
units = hp.Int(
"units", min_value=10, max_value=100, default=40, sampling="linear"
)
# max_value_ihl = 2
# num_ihl = hp.Int(
# "num_intermediate_hidden_layers",
# min_value=0,
# max_value=max_value_ihl,
# default=1,
# )
activation = hp.Choice(
"hidden_activation", values=["relu", "tanh"], default="relu"
)
# l2regularization= hp.Float('l2regularization',min_value=0.0001,max_value=0.1,sampling='log')
# gaussianNoise = hp.Float('gaussianNoise',min_value=0.001,max_value=2.0,sampling='log')
# # Final hidden layers
units_f = hp.Int(
"units_f", min_value=10, max_value=100, default=20, sampling="linear"
)
dropout_f = hp.Float(
"dropout_f", min_value=0.1, max_value=0.7, sampling="linear"
)
# activation_f=hp.Choice('hidden_activation_f',values=['relu', 'tanh', 'softmax'],default='relu')
# l2regularization_f= hp.Float('l2regularization_f',min_value=0.0001,max_value=0.1,sampling='log')
# gaussianNoise_f = hp.Float('gaussianNoise_f',min_value=0.001,max_value=2.0,sampling='log')
# Model
model = k.Sequential()
# Sequential() infers the input layer
# # Initial hidden layers
model.add(k.layers.Dense(units_i, activation=activation))
model.add(k.layers.Dropout(0.1))
if batch_norm == True:
model.add(k.layers.BatchNormalization())
# model.add( k.layers.GaussianNoise( gaussianNoise_i ) )
# model.add(
# k.layers.Dense(
# units=units_i,
# activation=activation_i,
# activity_regularizer= k.regularizers.l2(l2regularization_i)
# )
# )
# # Intermediate hidden layers
model.add(k.layers.Dense(units, activation=activation))
model.add(k.layers.Dropout(0.1))
if batch_norm == True:
model.add(k.layers.BatchNormalization())
# for i in range(num_ihl):
# with hp.conditional_scope(
# "num_intermediate_hidden_layers", list(range(i + 1, max_value_ihl + 1))
# ):
# model.add(
# k.layers.Dense(
# units=hp.Int(
# "units_" + str(i + 1), min_value=32, max_value=512, step=32
# ),
# activation="relu",
# # activity_regularizer= k.regularizers.l2(l2regularization)
# )
# )
# model.add(k.layers.Dropout(0.1))
# model.add(k.layers.BatchNormalization())
# model.add(k.layers.GaussianNoise(gaussianNoise))
# # Final hidden layers
model.add(k.layers.Dense(units_f, activation=activation))
model.add(k.layers.Dropout(dropout_f))
# model.add(tf.keras.layers.Reshape((-1,1)))
# model.add( k.layers.LSTM(16))
# # model.add( k.layers.GRU(16))
# # model.add( k.layers.SimpleRNN(16))
# model.add(
# k.layers.Dense(
# units=units_f,
# activation=activation_f,
# activity_regularizer= k.regularizers.l2(l2regularization_f)
# )
# )
# model.add( k.layers.Dropout( dropout_f ) )
# model.add( k.layers.GaussianDropout( 0.5 ) )
# model.add( k.layers.ActivityRegularization(l1=0.1, l2=0.1 ) )
# model.add( k.layers.LayerNormalization() )
# model.add( k.layers.BatchNormalization() )
# model.add( k.layers.GaussianNoise( gaussianNoise_f ) )
# Output layer
model.add(k.layers.Dense(self.num_classes, activation="softmax"))
# Compile
loss_fn = k.losses.CategoricalCrossentropy(name="loss")
if optimizer == "adam":
with hp.conditional_scope("optimizer", "adam"):
optimizer = k.optimizers.Adam(
learning_rate=learning_rate, clipnorm=clipnorm, clipvalue=clipvalue
)
elif optimizer == "adagrad":
with hp.conditional_scope("optimizer", "adagrad"):
optimizer = k.optimizers.Adagrad(
learning_rate=learning_rate, clipnorm=clipnorm, clipvalue=clipvalue
)
model.compile(
optimizer=optimizer,
loss=loss_fn,
metrics=[acurracy],
# metrics = [ acurracy, recall, precission, sensatspecf, specfatsens, auc_roc, auc_pr ]
)
return model
# Classification metrics
acurracy = k.metrics.CategoricalAccuracy(name="acurracy")
# Plot model history
def plotHistory(history):
fig, ((ax1, ax2)) = plt.subplots(2, 1, sharex=True, figsize=(7, 7))
# fig, ( (ax1, ax2), (ax3 , ax4), (ax5,ax6), (ax7,ax8)) = plt.subplots(4, 2, sharex=True, figsize= (10,10))
fig.text(0.5, 0.05, "Epochs", ha="center")
x = range(1, len(history.history["loss"]) + 1)
ax1.plot(x, history.history["loss"], label="train")
ax1.plot(x, history.history["val_loss"], label="validation")
ax1.set_title("Loss function")
ax2.plot(x, history.history["acurracy"], label="train")
ax2.plot(x, history.history["val_acurracy"], label="validation")
ax2.set_title("CategoricalAcurracy")
plt.legend()
plt.show()
| <filename>utils/NNspecifications.py
import tensorflow as tf
from tensorflow import keras as k
from keras_tuner import HyperModel
from matplotlib import pyplot as plt
class NNmodel(HyperModel):
def __init__(self, input_shape, num_classes):
self.input_shape = input_shape
self.num_classes = num_classes
def build(self, hp):
# Hyperparameter search space
learning_rate = hp.Float(
"learning_rate",
min_value=1e-6,
max_value=1e-4,
default=5e-5,
sampling="linear",
)
optimizer = hp.Choice("optimizer", values=["adam", "adagrad"])
# activation_i=hp.Choice('hidden_activation_i',values=['relu', 'tanh', 'softmax'],default='relu')
clipnorm = hp.Float("clipnorm", min_value=0.5, max_value=10.0, default=1.0)
clipvalue = hp.Float("clipvalue", min_value=0.1, max_value=0.3, default=0.2)
# # Initial hidden layers
units_i = hp.Int(
"units_i", min_value=10, max_value=100, default=15, sampling="linear"
)
batch_norm = hp.Boolean("bacht_norm")
# activation_i=hp.Choice('hidden_activation_i',values=['relu', 'tanh', 'softmax'],default='relu')
# l2regularization_i= hp.Float('l2regularization_i',min_value=0.0001,max_value=0.1,sampling='log')
# gaussianNoise_i= hp.Float('gaussianNoise_i',min_value=0.001,max_value=2,sampling='log')
# # Intermediate hidden layers
units = hp.Int(
"units", min_value=10, max_value=100, default=40, sampling="linear"
)
# max_value_ihl = 2
# num_ihl = hp.Int(
# "num_intermediate_hidden_layers",
# min_value=0,
# max_value=max_value_ihl,
# default=1,
# )
activation = hp.Choice(
"hidden_activation", values=["relu", "tanh"], default="relu"
)
# l2regularization= hp.Float('l2regularization',min_value=0.0001,max_value=0.1,sampling='log')
# gaussianNoise = hp.Float('gaussianNoise',min_value=0.001,max_value=2.0,sampling='log')
# # Final hidden layers
units_f = hp.Int(
"units_f", min_value=10, max_value=100, default=20, sampling="linear"
)
dropout_f = hp.Float(
"dropout_f", min_value=0.1, max_value=0.7, sampling="linear"
)
# activation_f=hp.Choice('hidden_activation_f',values=['relu', 'tanh', 'softmax'],default='relu')
# l2regularization_f= hp.Float('l2regularization_f',min_value=0.0001,max_value=0.1,sampling='log')
# gaussianNoise_f = hp.Float('gaussianNoise_f',min_value=0.001,max_value=2.0,sampling='log')
# Model
model = k.Sequential()
# Sequential() infers the input layer
# # Initial hidden layers
model.add(k.layers.Dense(units_i, activation=activation))
model.add(k.layers.Dropout(0.1))
if batch_norm == True:
model.add(k.layers.BatchNormalization())
# model.add( k.layers.GaussianNoise( gaussianNoise_i ) )
# model.add(
# k.layers.Dense(
# units=units_i,
# activation=activation_i,
# activity_regularizer= k.regularizers.l2(l2regularization_i)
# )
# )
# # Intermediate hidden layers
model.add(k.layers.Dense(units, activation=activation))
model.add(k.layers.Dropout(0.1))
if batch_norm == True:
model.add(k.layers.BatchNormalization())
# for i in range(num_ihl):
# with hp.conditional_scope(
# "num_intermediate_hidden_layers", list(range(i + 1, max_value_ihl + 1))
# ):
# model.add(
# k.layers.Dense(
# units=hp.Int(
# "units_" + str(i + 1), min_value=32, max_value=512, step=32
# ),
# activation="relu",
# # activity_regularizer= k.regularizers.l2(l2regularization)
# )
# )
# model.add(k.layers.Dropout(0.1))
# model.add(k.layers.BatchNormalization())
# model.add(k.layers.GaussianNoise(gaussianNoise))
# # Final hidden layers
model.add(k.layers.Dense(units_f, activation=activation))
model.add(k.layers.Dropout(dropout_f))
# model.add(tf.keras.layers.Reshape((-1,1)))
# model.add( k.layers.LSTM(16))
# # model.add( k.layers.GRU(16))
# # model.add( k.layers.SimpleRNN(16))
# model.add(
# k.layers.Dense(
# units=units_f,
# activation=activation_f,
# activity_regularizer= k.regularizers.l2(l2regularization_f)
# )
# )
# model.add( k.layers.Dropout( dropout_f ) )
# model.add( k.layers.GaussianDropout( 0.5 ) )
# model.add( k.layers.ActivityRegularization(l1=0.1, l2=0.1 ) )
# model.add( k.layers.LayerNormalization() )
# model.add( k.layers.BatchNormalization() )
# model.add( k.layers.GaussianNoise( gaussianNoise_f ) )
# Output layer
model.add(k.layers.Dense(self.num_classes, activation="softmax"))
# Compile
loss_fn = k.losses.CategoricalCrossentropy(name="loss")
if optimizer == "adam":
with hp.conditional_scope("optimizer", "adam"):
optimizer = k.optimizers.Adam(
learning_rate=learning_rate, clipnorm=clipnorm, clipvalue=clipvalue
)
elif optimizer == "adagrad":
with hp.conditional_scope("optimizer", "adagrad"):
optimizer = k.optimizers.Adagrad(
learning_rate=learning_rate, clipnorm=clipnorm, clipvalue=clipvalue
)
model.compile(
optimizer=optimizer,
loss=loss_fn,
metrics=[acurracy],
# metrics = [ acurracy, recall, precission, sensatspecf, specfatsens, auc_roc, auc_pr ]
)
return model
# Classification metrics
acurracy = k.metrics.CategoricalAccuracy(name="acurracy")
# Plot model history
def plotHistory(history):
fig, ((ax1, ax2)) = plt.subplots(2, 1, sharex=True, figsize=(7, 7))
# fig, ( (ax1, ax2), (ax3 , ax4), (ax5,ax6), (ax7,ax8)) = plt.subplots(4, 2, sharex=True, figsize= (10,10))
fig.text(0.5, 0.05, "Epochs", ha="center")
x = range(1, len(history.history["loss"]) + 1)
ax1.plot(x, history.history["loss"], label="train")
ax1.plot(x, history.history["val_loss"], label="validation")
ax1.set_title("Loss function")
ax2.plot(x, history.history["acurracy"], label="train")
ax2.plot(x, history.history["val_acurracy"], label="validation")
ax2.set_title("CategoricalAcurracy")
plt.legend()
plt.show()
| en | 0.193159 | # Hyperparameter search space # activation_i=hp.Choice('hidden_activation_i',values=['relu', 'tanh', 'softmax'],default='relu') # # Initial hidden layers # activation_i=hp.Choice('hidden_activation_i',values=['relu', 'tanh', 'softmax'],default='relu') # l2regularization_i= hp.Float('l2regularization_i',min_value=0.0001,max_value=0.1,sampling='log') # gaussianNoise_i= hp.Float('gaussianNoise_i',min_value=0.001,max_value=2,sampling='log') # # Intermediate hidden layers # max_value_ihl = 2 # num_ihl = hp.Int( # "num_intermediate_hidden_layers", # min_value=0, # max_value=max_value_ihl, # default=1, # ) # l2regularization= hp.Float('l2regularization',min_value=0.0001,max_value=0.1,sampling='log') # gaussianNoise = hp.Float('gaussianNoise',min_value=0.001,max_value=2.0,sampling='log') # # Final hidden layers # activation_f=hp.Choice('hidden_activation_f',values=['relu', 'tanh', 'softmax'],default='relu') # l2regularization_f= hp.Float('l2regularization_f',min_value=0.0001,max_value=0.1,sampling='log') # gaussianNoise_f = hp.Float('gaussianNoise_f',min_value=0.001,max_value=2.0,sampling='log') # Model # Sequential() infers the input layer # # Initial hidden layers # model.add( k.layers.GaussianNoise( gaussianNoise_i ) ) # model.add( # k.layers.Dense( # units=units_i, # activation=activation_i, # activity_regularizer= k.regularizers.l2(l2regularization_i) # ) # ) # # Intermediate hidden layers # for i in range(num_ihl): # with hp.conditional_scope( # "num_intermediate_hidden_layers", list(range(i + 1, max_value_ihl + 1)) # ): # model.add( # k.layers.Dense( # units=hp.Int( # "units_" + str(i + 1), min_value=32, max_value=512, step=32 # ), # activation="relu", # # activity_regularizer= k.regularizers.l2(l2regularization) # ) # ) # model.add(k.layers.Dropout(0.1)) # model.add(k.layers.BatchNormalization()) # model.add(k.layers.GaussianNoise(gaussianNoise)) # # Final hidden layers # model.add(tf.keras.layers.Reshape((-1,1))) # model.add( k.layers.LSTM(16)) # # model.add( k.layers.GRU(16)) # # model.add( k.layers.SimpleRNN(16)) # model.add( # k.layers.Dense( # units=units_f, # activation=activation_f, # activity_regularizer= k.regularizers.l2(l2regularization_f) # ) # ) # model.add( k.layers.Dropout( dropout_f ) ) # model.add( k.layers.GaussianDropout( 0.5 ) ) # model.add( k.layers.ActivityRegularization(l1=0.1, l2=0.1 ) ) # model.add( k.layers.LayerNormalization() ) # model.add( k.layers.BatchNormalization() ) # model.add( k.layers.GaussianNoise( gaussianNoise_f ) ) # Output layer # Compile # metrics = [ acurracy, recall, precission, sensatspecf, specfatsens, auc_roc, auc_pr ] # Classification metrics # Plot model history # fig, ( (ax1, ax2), (ax3 , ax4), (ax5,ax6), (ax7,ax8)) = plt.subplots(4, 2, sharex=True, figsize= (10,10)) | 2.746726 | 3 |
lambda.py | jessedeveloperinvestor/Multiple-Jesse-Projects | 0 | 6623752 | <reponame>jessedeveloperinvestor/Multiple-Jesse-Projects<filename>lambda.py
x=lambda a, b: a*b
print(x(5,6))
items=range(1,8)
multiples_of_two=list(map(lambda var: var*2, items))
print(multiples_of_two) | x=lambda a, b: a*b
print(x(5,6))
items=range(1,8)
multiples_of_two=list(map(lambda var: var*2, items))
print(multiples_of_two) | none | 1 | 3.413563 | 3 | |
python/.ipynb_checkpoints/Data Cleaner-checkpoint.py | EricParapini/fifaoptimization | 0 | 6623753 | <reponame>EricParapini/fifaoptimization
#!/usr/bin/env python
# coding: utf-8
# # The Data Cleaning Notebook
#
# This notebook documents the cleaning process for the Fifa 2019 Data. It creates a new csv file in ./data/out/clean.csv
# ## Import necessary libraries
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
from collections import Counter as counter
# ## Load Data to a data table
# In[2]:
df_fifa = pd.read_csv("../data/data.csv")
# # Manipulation
# ## Convert the value and wage into proper currency
# In[3]:
def value_to_int(df_value):
try:
value = float(df_value[1:-1]) # This return 110.5 from €110.5M
suffix = df_value[-1:] # This return M or K
if suffix == 'M':
value = value * 1000000
elif suffix == 'K':
value = value * 1000
except:
value = 0
return value
df_fifa['Value'] = df_fifa['Value'].apply(value_to_int)
df_fifa['Wage'] = df_fifa['Wage'].apply(value_to_int)
df_fifa['Release Clause'] = df_fifa['Release Clause'].apply(value_to_int)
# ## Convert the height to CM
# In[4]:
# Inch = 2.54 CM
# Foot = 2.54*12 = 30.48
def convert_to_cm(df_value):
height = 0
try:
feet,inches = str(df_value).split("'",)
feet = eval(feet)
inches = eval(inches)
height = 30.48*feet + 2.54*inches
except:
pass #do nothing
return int(height)
df_fifa['Height'] = df_fifa['Height'].apply(convert_to_cm)
# ## Clean weight data
# In[5]:
def remove_lbs(df_value):
try:
weight = int(df_value[0:-3])
except:
weight = 0
return weight
df_fifa['Weight'] = df_fifa['Weight'].apply(remove_lbs)
# ## Cycle through skill columns and add them up
# In[6]:
def evaluate_the_row(x):
try:
return eval(x)
except:
return 0
# 26 Positions need addition
for i in range(28,54):
df_fifa.iloc[:,i] = df_fifa.iloc[:,i].apply(evaluate_the_row)
# ## Remove Cells where key items are 0
# In[7]:
df_fifa = df_fifa[df_fifa.Value != 0]
df_fifa = df_fifa[df_fifa.Overall != 0]
df_fifa = df_fifa[df_fifa.Height != 0]
df_fifa = df_fifa[df_fifa.Weight != 0]
# ## Add new column: Create a variable with a classified position
# In[8]:
def classify_position(df_value):
if(df_value == 'GK'):
return 1
elif(df_value in ['RCB', 'CB', 'LCB', 'LB', 'RB', 'RWB', 'LWB']):
return 2
elif(df_value in ['RCM', 'LCM', 'LDM', 'CDM', 'CAM', 'RM', 'LAM', 'LM', 'RDM', 'CM', 'RAM']):
return 3
elif(df_value in ['RF', 'LF', 'ST', 'LW', 'RS', 'LS', 'RW', 'CF']):
return 4
return 0
df_fifa['PositionCode'] = df_fifa['Position'].apply(classify_position)
# # Error Checking
# ## Reviewing Value
# In[9]:
df_fifa['Value'].describe().apply(lambda x: format(x, 'f'))
# ## Reviewing Wage
# In[10]:
df_fifa['Wage'].describe().apply(lambda x: format(x, 'f'))
# ## Check Positions were added correctly
# In[11]:
df_fifa.iloc[:,28:54]
# # Write to CSV
# In[12]:
export_csv = df_fifa.to_csv(r'../out/clean.csv', index=None, header=True)
| #!/usr/bin/env python
# coding: utf-8
# # The Data Cleaning Notebook
#
# This notebook documents the cleaning process for the Fifa 2019 Data. It creates a new csv file in ./data/out/clean.csv
# ## Import necessary libraries
# In[1]:
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
from collections import Counter as counter
# ## Load Data to a data table
# In[2]:
df_fifa = pd.read_csv("../data/data.csv")
# # Manipulation
# ## Convert the value and wage into proper currency
# In[3]:
def value_to_int(df_value):
try:
value = float(df_value[1:-1]) # This return 110.5 from €110.5M
suffix = df_value[-1:] # This return M or K
if suffix == 'M':
value = value * 1000000
elif suffix == 'K':
value = value * 1000
except:
value = 0
return value
df_fifa['Value'] = df_fifa['Value'].apply(value_to_int)
df_fifa['Wage'] = df_fifa['Wage'].apply(value_to_int)
df_fifa['Release Clause'] = df_fifa['Release Clause'].apply(value_to_int)
# ## Convert the height to CM
# In[4]:
# Inch = 2.54 CM
# Foot = 2.54*12 = 30.48
def convert_to_cm(df_value):
height = 0
try:
feet,inches = str(df_value).split("'",)
feet = eval(feet)
inches = eval(inches)
height = 30.48*feet + 2.54*inches
except:
pass #do nothing
return int(height)
df_fifa['Height'] = df_fifa['Height'].apply(convert_to_cm)
# ## Clean weight data
# In[5]:
def remove_lbs(df_value):
try:
weight = int(df_value[0:-3])
except:
weight = 0
return weight
df_fifa['Weight'] = df_fifa['Weight'].apply(remove_lbs)
# ## Cycle through skill columns and add them up
# In[6]:
def evaluate_the_row(x):
try:
return eval(x)
except:
return 0
# 26 Positions need addition
for i in range(28,54):
df_fifa.iloc[:,i] = df_fifa.iloc[:,i].apply(evaluate_the_row)
# ## Remove Cells where key items are 0
# In[7]:
df_fifa = df_fifa[df_fifa.Value != 0]
df_fifa = df_fifa[df_fifa.Overall != 0]
df_fifa = df_fifa[df_fifa.Height != 0]
df_fifa = df_fifa[df_fifa.Weight != 0]
# ## Add new column: Create a variable with a classified position
# In[8]:
def classify_position(df_value):
if(df_value == 'GK'):
return 1
elif(df_value in ['RCB', 'CB', 'LCB', 'LB', 'RB', 'RWB', 'LWB']):
return 2
elif(df_value in ['RCM', 'LCM', 'LDM', 'CDM', 'CAM', 'RM', 'LAM', 'LM', 'RDM', 'CM', 'RAM']):
return 3
elif(df_value in ['RF', 'LF', 'ST', 'LW', 'RS', 'LS', 'RW', 'CF']):
return 4
return 0
df_fifa['PositionCode'] = df_fifa['Position'].apply(classify_position)
# # Error Checking
# ## Reviewing Value
# In[9]:
df_fifa['Value'].describe().apply(lambda x: format(x, 'f'))
# ## Reviewing Wage
# In[10]:
df_fifa['Wage'].describe().apply(lambda x: format(x, 'f'))
# ## Check Positions were added correctly
# In[11]:
df_fifa.iloc[:,28:54]
# # Write to CSV
# In[12]:
export_csv = df_fifa.to_csv(r'../out/clean.csv', index=None, header=True) | en | 0.603733 | #!/usr/bin/env python # coding: utf-8 # # The Data Cleaning Notebook # # This notebook documents the cleaning process for the Fifa 2019 Data. It creates a new csv file in ./data/out/clean.csv # ## Import necessary libraries # In[1]: # ## Load Data to a data table # In[2]: # # Manipulation # ## Convert the value and wage into proper currency # In[3]: # This return 110.5 from €110.5M # This return M or K # ## Convert the height to CM # In[4]: # Inch = 2.54 CM # Foot = 2.54*12 = 30.48 #do nothing # ## Clean weight data # In[5]: # ## Cycle through skill columns and add them up # In[6]: # 26 Positions need addition # ## Remove Cells where key items are 0 # In[7]: # ## Add new column: Create a variable with a classified position # In[8]: # # Error Checking # ## Reviewing Value # In[9]: # ## Reviewing Wage # In[10]: # ## Check Positions were added correctly # In[11]: # # Write to CSV # In[12]: | 3.359031 | 3 |
Sheller.py | bantya/Sheller | 3 | 6623754 | import sublime_plugin
import subprocess
import sublime
import shlex
import os
class ShellerCommand(sublime_plugin.TextCommand):
def __init__ (self, *args, **kwargs):
super(ShellerCommand, self).__init__(*args, **kwargs)
def run (self, *args, **kwargs):
command = kwargs.get('command', None)
file_name = self.view.file_name()
if file_name is None:
file_name = ''
if command == 'sheller_folder':
self.on_folder()
elif command == 'sheller_file':
self.on_file(file_name)
elif command == 'sheller_reveal_file':
self.reveal_file(file_name)
return
elif command == 'sheller_reveal_folder':
self.reveal_folder()
return
elif command == 'sheller_open_shell_file':
self.open_shell_file(file_name)
return
elif command == 'sheller_open_shell_folder':
self.open_shell_folder()
return
file_path = os.path.join(self.PROJECT_PATH, file_name)
self.show_menu_label = kwargs.get('show_menu_lable', 'Command: ')
self.args = []
self.on_command()
if not os.path.isfile(file_name):
self.PROJECT_PATH = self.view.window().folders()[0]
def folder_paras (self, path):
path = path.split("\\")
self.current_drive = path[0]
path.pop()
self.current_directory = "\\".join(path)
def on_folder (self):
self.check_dir_exist()
self.PROJECT_PATH = self.view.window().folders()[0]
self.show_status(self.PROJECT_PATH)
def on_file (self, file_name):
self.folder_paras(file_name)
self.PROJECT_PATH = self.current_directory
self.show_status(self.PROJECT_PATH)
def open_shell_file (self, file_name):
self.folder_paras(file_name)
directory = self.current_directory
command = "cd " + directory + " & " + self.current_drive + " & start cmd"
os.system(command)
self.show_status(directory)
def open_shell_folder (self):
self.check_dir_exist()
path = self.view.window().folders()[0]
self.folder_paras(path)
self.current_directory = path
command = "cd " + self.current_directory + " & " + self.current_drive + " & start cmd"
os.system(command)
self.show_status(path)
def reveal_file (self, file_name):
self.folder_paras(file_name)
directory = self.current_directory
self.args = []
self.view.window().run_command(
"open_dir", {
"dir": directory
}
)
self.show_status(directory)
def reveal_folder (self):
self.check_dir_exist()
directory = self.view.window().folders()[0]
self.args = []
self.view.window().run_command(
"open_dir",
{"dir": directory}
)
self.show_status(directory)
def on_command (self):
self.view.window().show_input_panel(
self.show_menu_label, '', self.on_show_menu, None, None
)
def on_show_menu (self, show_menu):
self.args.extend(
shlex.split(str(show_menu))
)
self.on_done()
def show_status(self, message):
sublime.status_message('Directory: ' + message + os.sep)
def check_dir_exist(self):
if self.view.window().folders() == []:
sublime.error_message("Project root directory not found!")
def on_done (self):
if os.name != 'posix':
self.args = subprocess.list2cmdline(self.args)
try:
self.view.window().run_command("exec", {
"cmd": self.args,
"shell": os.name == 'nt',
"working_dir": self.PROJECT_PATH
}
)
sublime.status_message('Command executed succesfully!')
except IOError:
sublime.status_message('IOError - Error occured')
| import sublime_plugin
import subprocess
import sublime
import shlex
import os
class ShellerCommand(sublime_plugin.TextCommand):
def __init__ (self, *args, **kwargs):
super(ShellerCommand, self).__init__(*args, **kwargs)
def run (self, *args, **kwargs):
command = kwargs.get('command', None)
file_name = self.view.file_name()
if file_name is None:
file_name = ''
if command == 'sheller_folder':
self.on_folder()
elif command == 'sheller_file':
self.on_file(file_name)
elif command == 'sheller_reveal_file':
self.reveal_file(file_name)
return
elif command == 'sheller_reveal_folder':
self.reveal_folder()
return
elif command == 'sheller_open_shell_file':
self.open_shell_file(file_name)
return
elif command == 'sheller_open_shell_folder':
self.open_shell_folder()
return
file_path = os.path.join(self.PROJECT_PATH, file_name)
self.show_menu_label = kwargs.get('show_menu_lable', 'Command: ')
self.args = []
self.on_command()
if not os.path.isfile(file_name):
self.PROJECT_PATH = self.view.window().folders()[0]
def folder_paras (self, path):
path = path.split("\\")
self.current_drive = path[0]
path.pop()
self.current_directory = "\\".join(path)
def on_folder (self):
self.check_dir_exist()
self.PROJECT_PATH = self.view.window().folders()[0]
self.show_status(self.PROJECT_PATH)
def on_file (self, file_name):
self.folder_paras(file_name)
self.PROJECT_PATH = self.current_directory
self.show_status(self.PROJECT_PATH)
def open_shell_file (self, file_name):
self.folder_paras(file_name)
directory = self.current_directory
command = "cd " + directory + " & " + self.current_drive + " & start cmd"
os.system(command)
self.show_status(directory)
def open_shell_folder (self):
self.check_dir_exist()
path = self.view.window().folders()[0]
self.folder_paras(path)
self.current_directory = path
command = "cd " + self.current_directory + " & " + self.current_drive + " & start cmd"
os.system(command)
self.show_status(path)
def reveal_file (self, file_name):
self.folder_paras(file_name)
directory = self.current_directory
self.args = []
self.view.window().run_command(
"open_dir", {
"dir": directory
}
)
self.show_status(directory)
def reveal_folder (self):
self.check_dir_exist()
directory = self.view.window().folders()[0]
self.args = []
self.view.window().run_command(
"open_dir",
{"dir": directory}
)
self.show_status(directory)
def on_command (self):
self.view.window().show_input_panel(
self.show_menu_label, '', self.on_show_menu, None, None
)
def on_show_menu (self, show_menu):
self.args.extend(
shlex.split(str(show_menu))
)
self.on_done()
def show_status(self, message):
sublime.status_message('Directory: ' + message + os.sep)
def check_dir_exist(self):
if self.view.window().folders() == []:
sublime.error_message("Project root directory not found!")
def on_done (self):
if os.name != 'posix':
self.args = subprocess.list2cmdline(self.args)
try:
self.view.window().run_command("exec", {
"cmd": self.args,
"shell": os.name == 'nt',
"working_dir": self.PROJECT_PATH
}
)
sublime.status_message('Command executed succesfully!')
except IOError:
sublime.status_message('IOError - Error occured')
| none | 1 | 2.461685 | 2 | |
rally/rally-plugins/subnet-router-create/subnet-router-create.py | jtaleric/browbeat | 23 | 6623755 | from rally.task import atomic
from rally.task import scenario
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
from rally.task import types
from rally.task import utils as task_utils
from rally.task import validation
class NeutronPlugin(neutron_utils.NeutronScenario,
scenario.Scenario):
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["neutron"]})
def create_router_and_net(self,num_networks=1,network_create_args=None,
subnet_create_args=None,**kwargs):
router = self._create_router({})
subnets = []
if num_networks == 1 :
network = self._create_network(network_create_args or {})
subnet = self._create_subnet(network, subnet_create_args or {})
subnets.append(subnet)
self._add_interface_router(subnet['subnet'],router['router'])
else :
for net in range(1,num_networks):
network = self._create_network(network_create_args or {})
subnet = self._create_subnet(network, subnet_create_args or {})
subnets.append(subnet)
self._add_interface_router(subnet['subnet'],router['router'])
for subnet in subnets :
self._remove_interface_router(subnet['subnet'],router['router'])
| from rally.task import atomic
from rally.task import scenario
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
from rally.task import types
from rally.task import utils as task_utils
from rally.task import validation
class NeutronPlugin(neutron_utils.NeutronScenario,
scenario.Scenario):
@types.set(image=types.ImageResourceType,
flavor=types.FlavorResourceType)
@validation.required_openstack(users=True)
@scenario.configure(context={"cleanup": ["neutron"]})
def create_router_and_net(self,num_networks=1,network_create_args=None,
subnet_create_args=None,**kwargs):
router = self._create_router({})
subnets = []
if num_networks == 1 :
network = self._create_network(network_create_args or {})
subnet = self._create_subnet(network, subnet_create_args or {})
subnets.append(subnet)
self._add_interface_router(subnet['subnet'],router['router'])
else :
for net in range(1,num_networks):
network = self._create_network(network_create_args or {})
subnet = self._create_subnet(network, subnet_create_args or {})
subnets.append(subnet)
self._add_interface_router(subnet['subnet'],router['router'])
for subnet in subnets :
self._remove_interface_router(subnet['subnet'],router['router'])
| none | 1 | 1.93647 | 2 | |
tests/fgnhg_test.py | sg893052/sonic-utilities | 0 | 6623756 | <reponame>sg893052/sonic-utilities<filename>tests/fgnhg_test.py
import os
import traceback
from click.testing import CliRunner
import config.main as config
import show.main as show
from utilities_common.db import Db
show_fgnhg_hash_view_output="""\
FG NHG Prefix Next Hop Hash buckets
--------------- ------------------ ------------------------------
192.168.127.12/32 172.16.17.32 0 1 2 3 4 5 6 7
192.168.127.12/32 172.16.31.10 8 9 10 11 12 13 14 15
fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b 0 1 2 3 4 5 6 7
fc:5::/128 fc00:db20:35b:7399::5 8 9 10 11 12 13 14 15
"""
show_fgnhgv4_hash_view_output="""\
FG NHG Prefix Next Hop Hash buckets
--------------- ------------- ------------------------------
192.168.127.12/32 172.16.17.32 0 1 2 3 4 5 6 7
192.168.127.12/32 172.16.31.10 8 9 10 11 12 13 14 15
"""
show_fgnhgv6_hash_view_output="""\
FG NHG Prefix Next Hop Hash buckets
--------------- ------------------ ------------------------------
fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b 0 1 2 3 4 5 6 7
fc:5::/128 fc00:db20:35b:7399::5 8 9 10 11 12 13 14 15
"""
show_fgnhg_active_hops_output="""\
FG NHG Prefix Active Next Hops
--------------- ------------------
192.168.127.12/32 172.16.17.32
172.16.31.10
fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
fc00:db20:35b:7399::5
"""
show_fgnhgv4_active_hops_output="""\
FG NHG Prefix Active Next Hops
--------------- ------------------
192.168.127.12/32 172.16.17.32
172.16.31.10
"""
show_fgnhgv6_active_hops_output="""\
FG NHG Prefix Active Next Hops
--------------- ------------------
fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
fc00:db20:35b:7399::5
"""
class TestFineGrainedNexthopGroup(object):
@classmethod
def setup_class(cls):
os.environ['UTILITIES_UNIT_TESTING'] = "1"
print("SETUP")
def test_show_fgnhg_hash_view(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["hash-view"], [])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhg_hash_view_output
def test_show_fgnhgv4_hash_view(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["hash-view"], ["fgnhg_v4"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhgv4_hash_view_output
def test_show_fgnhgv6_hash_view(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["hash-view"], ["fgnhg_v6"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhgv6_hash_view_output
def test_show_fgnhg_active_hops(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["active-hops"], [])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhg_active_hops_output
def test_show_fgnhgv4_active_hops(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["active-hops"], ["fgnhg_v4"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhgv4_active_hops_output
def test_show_fgnhgv6_active_hops(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["active-hops"], ["fgnhg_v6"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhgv6_active_hops_output
@classmethod
def teardown_class(cls):
os.environ['UTILITIES_UNIT_TESTING'] = "0"
print("TEARDOWN")
| import os
import traceback
from click.testing import CliRunner
import config.main as config
import show.main as show
from utilities_common.db import Db
show_fgnhg_hash_view_output="""\
FG NHG Prefix Next Hop Hash buckets
--------------- ------------------ ------------------------------
192.168.127.12/32 172.16.17.32 0 1 2 3 4 5 6 7
192.168.127.12/32 172.16.31.10 8 9 10 11 12 13 14 15
fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b 0 1 2 3 4 5 6 7
fc:5::/128 fc00:db20:35b:7399::5 8 9 10 11 12 13 14 15
"""
show_fgnhgv4_hash_view_output="""\
FG NHG Prefix Next Hop Hash buckets
--------------- ------------- ------------------------------
192.168.127.12/32 172.16.17.32 0 1 2 3 4 5 6 7
192.168.127.12/32 172.16.31.10 8 9 10 11 12 13 14 15
"""
show_fgnhgv6_hash_view_output="""\
FG NHG Prefix Next Hop Hash buckets
--------------- ------------------ ------------------------------
fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b 0 1 2 3 4 5 6 7
fc:5::/128 fc00:db20:35b:7399::5 8 9 10 11 12 13 14 15
"""
show_fgnhg_active_hops_output="""\
FG NHG Prefix Active Next Hops
--------------- ------------------
192.168.127.12/32 172.16.17.32
172.16.31.10
fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
fc00:db20:35b:7399::5
"""
show_fgnhgv4_active_hops_output="""\
FG NHG Prefix Active Next Hops
--------------- ------------------
192.168.127.12/32 172.16.17.32
172.16.31.10
"""
show_fgnhgv6_active_hops_output="""\
FG NHG Prefix Active Next Hops
--------------- ------------------
fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b
fc00:db20:35b:7399::5
"""
class TestFineGrainedNexthopGroup(object):
@classmethod
def setup_class(cls):
os.environ['UTILITIES_UNIT_TESTING'] = "1"
print("SETUP")
def test_show_fgnhg_hash_view(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["hash-view"], [])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhg_hash_view_output
def test_show_fgnhgv4_hash_view(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["hash-view"], ["fgnhg_v4"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhgv4_hash_view_output
def test_show_fgnhgv6_hash_view(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["hash-view"], ["fgnhg_v6"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhgv6_hash_view_output
def test_show_fgnhg_active_hops(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["active-hops"], [])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhg_active_hops_output
def test_show_fgnhgv4_active_hops(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["active-hops"], ["fgnhg_v4"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhgv4_active_hops_output
def test_show_fgnhgv6_active_hops(self):
runner = CliRunner()
result = runner.invoke(show.cli.commands["fgnhg"].commands["active-hops"], ["fgnhg_v6"])
print(result.exit_code)
print(result.output)
assert result.exit_code == 0
assert result.output == show_fgnhgv6_active_hops_output
@classmethod
def teardown_class(cls):
os.environ['UTILITIES_UNIT_TESTING'] = "0"
print("TEARDOWN") | en | 0.307607 | \ FG NHG Prefix Next Hop Hash buckets --------------- ------------------ ------------------------------ 192.168.127.12/32 172.16.17.32 0 1 2 3 4 5 6 7 192.168.127.12/32 172.16.31.10 8 9 10 11 12 13 14 15 fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b 0 1 2 3 4 5 6 7 fc:5::/128 fc00:db20:35b:7399::5 8 9 10 11 12 13 14 15 \ FG NHG Prefix Next Hop Hash buckets --------------- ------------- ------------------------------ 192.168.127.12/32 172.16.17.32 0 1 2 3 4 5 6 7 192.168.127.12/32 172.16.31.10 8 9 10 11 12 13 14 15 \ FG NHG Prefix Next Hop Hash buckets --------------- ------------------ ------------------------------ fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b 0 1 2 3 4 5 6 7 fc:5::/128 fc00:db20:35b:7399::5 8 9 10 11 12 13 14 15 \ FG NHG Prefix Active Next Hops --------------- ------------------ 192.168.127.12/32 172.16.17.32 172.16.31.10 fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b fc00:db20:35b:7399::5 \ FG NHG Prefix Active Next Hops --------------- ------------------ 192.168.127.12/32 172.16.17.32 172.16.31.10 \ FG NHG Prefix Active Next Hops --------------- ------------------ fc:5::/128 fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b fc00:db20:35b:7399::5 | 2.118709 | 2 |
kapsoya/migrations/0001_initial.py | Chebichii-Lab/Kapsoya-Estate | 0 | 6623757 | <gh_stars>0
# Generated by Django 3.2.5 on 2021-07-25 10:38
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Neighbourhood',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hood_name', models.CharField(max_length=200)),
('hood_location', models.CharField(max_length=200)),
('hood_description', models.TextField(blank=True, max_length=500)),
('hood_photo', cloudinary.models.CloudinaryField(default='photo', max_length=255, verbose_name='photo')),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='admin', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idNo', models.IntegerField(default=0)),
('email', models.CharField(blank=True, max_length=30)),
('profile_pic', cloudinary.models.CloudinaryField(max_length=255, verbose_name='profile')),
('bio', models.TextField(blank=True, max_length=500)),
('neighbourhood', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='kapsoya.neighbourhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
]
| # Generated by Django 3.2.5 on 2021-07-25 10:38
import cloudinary.models
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Neighbourhood',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('hood_name', models.CharField(max_length=200)),
('hood_location', models.CharField(max_length=200)),
('hood_description', models.TextField(blank=True, max_length=500)),
('hood_photo', cloudinary.models.CloudinaryField(default='photo', max_length=255, verbose_name='photo')),
('admin', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='admin', to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Profile',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('idNo', models.IntegerField(default=0)),
('email', models.CharField(blank=True, max_length=30)),
('profile_pic', cloudinary.models.CloudinaryField(max_length=255, verbose_name='profile')),
('bio', models.TextField(blank=True, max_length=500)),
('neighbourhood', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='kapsoya.neighbourhood')),
('user', models.OneToOneField(on_delete=django.db.models.deletion.CASCADE, to=settings.AUTH_USER_MODEL)),
],
),
] | en | 0.825989 | # Generated by Django 3.2.5 on 2021-07-25 10:38 | 1.808814 | 2 |
app/recipe/tests/test_ingredient_api.py | Dr4g0s/recipe-app-api | 0 | 6623758 | <reponame>Dr4g0s/recipe-app-api<gh_stars>0
from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENT_URL = reverse('recipe:ingredient-list')
class PublicIngredientAPITests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientAPITests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create(
email='<EMAIL>',
password='<PASSWORD>'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_ingredient(self):
Ingredient.objects.create(name='test 1', user=self.user)
Ingredient.objects.create(name='test 2', user=self.user)
res = self.client.get(INGREDIENT_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_retrieve_ingredient_limited_to_user(self):
user2 = get_user_model().objects.create(
email='<EMAIL>',
password='<PASSWORD>'
)
Ingredient.objects.create(name='test 1', user=user2)
ing = Ingredient.objects.create(name='test', user=self.user)
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ing.name)
def test_create_ingredient_successfull(self):
payload = {'name': 'test'}
res = self.client.post(INGREDIENT_URL, payload)
exists = Ingredient.objects.filter(
name=payload['name'],
user=self.user
).exists()
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertTrue(exists)
def test_create_ingedient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipe(self):
ingredient1 = Ingredient.objects.create(
user=self.user,
name='ingredient 1'
)
ingredient2 = Ingredient.objects.create(
user=self.user,
name='ingredient 2'
)
recipe = Recipe.objects.create(
title='test title',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_unique(self):
"""Test filtering ingredients by assigning returns unique items"""
ingredient = Ingredient.objects.create(
user=self.user,
name='ingredient 1'
)
Ingredient.objects.create(user=self.user, name='ingredient 2')
recipe1 = Recipe.objects.create(
title='recipe 1',
time_minutes=10,
price=5.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='recipe 2',
time_minutes=10,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1)
| from django.contrib.auth import get_user_model
from django.test import TestCase
from django.urls import reverse
from rest_framework import status
from rest_framework.test import APIClient
from core.models import Ingredient, Recipe
from recipe.serializers import IngredientSerializer
INGREDIENT_URL = reverse('recipe:ingredient-list')
class PublicIngredientAPITests(TestCase):
def setUp(self):
self.client = APIClient()
def test_login_required(self):
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)
class PrivateIngredientAPITests(TestCase):
def setUp(self):
self.user = get_user_model().objects.create(
email='<EMAIL>',
password='<PASSWORD>'
)
self.client = APIClient()
self.client.force_authenticate(self.user)
def test_retrieve_ingredient(self):
Ingredient.objects.create(name='test 1', user=self.user)
Ingredient.objects.create(name='test 2', user=self.user)
res = self.client.get(INGREDIENT_URL)
ingredients = Ingredient.objects.all().order_by('-name')
serializer = IngredientSerializer(ingredients, many=True)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(res.data, serializer.data)
def test_retrieve_ingredient_limited_to_user(self):
user2 = get_user_model().objects.create(
email='<EMAIL>',
password='<PASSWORD>'
)
Ingredient.objects.create(name='test 1', user=user2)
ing = Ingredient.objects.create(name='test', user=self.user)
res = self.client.get(INGREDIENT_URL)
self.assertEqual(res.status_code, status.HTTP_200_OK)
self.assertEqual(len(res.data), 1)
self.assertEqual(res.data[0]['name'], ing.name)
def test_create_ingredient_successfull(self):
payload = {'name': 'test'}
res = self.client.post(INGREDIENT_URL, payload)
exists = Ingredient.objects.filter(
name=payload['name'],
user=self.user
).exists()
self.assertEqual(res.status_code, status.HTTP_201_CREATED)
self.assertTrue(exists)
def test_create_ingedient_invalid(self):
payload = {'name': ''}
res = self.client.post(INGREDIENT_URL, payload)
self.assertEqual(res.status_code, status.HTTP_400_BAD_REQUEST)
def test_retrieve_ingredients_assigned_to_recipe(self):
ingredient1 = Ingredient.objects.create(
user=self.user,
name='ingredient 1'
)
ingredient2 = Ingredient.objects.create(
user=self.user,
name='ingredient 2'
)
recipe = Recipe.objects.create(
title='test title',
time_minutes=10,
price=5.00,
user=self.user
)
recipe.ingredients.add(ingredient1)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
serializer1 = IngredientSerializer(ingredient1)
serializer2 = IngredientSerializer(ingredient2)
self.assertIn(serializer1.data, res.data)
self.assertNotIn(serializer2.data, res.data)
def test_retrieve_ingredients_assigned_unique(self):
"""Test filtering ingredients by assigning returns unique items"""
ingredient = Ingredient.objects.create(
user=self.user,
name='ingredient 1'
)
Ingredient.objects.create(user=self.user, name='ingredient 2')
recipe1 = Recipe.objects.create(
title='recipe 1',
time_minutes=10,
price=5.00,
user=self.user
)
recipe1.ingredients.add(ingredient)
recipe2 = Recipe.objects.create(
title='recipe 2',
time_minutes=10,
price=5.00,
user=self.user
)
recipe2.ingredients.add(ingredient)
res = self.client.get(INGREDIENT_URL, {'assigned_only': 1})
self.assertEqual(len(res.data), 1) | en | 0.531918 | Test filtering ingredients by assigning returns unique items | 2.438246 | 2 |
custom_layer_constraints.py | XiaowanYi/Attention_vgg16 | 3 | 6623759 | <gh_stars>1-10
# -*- coding: utf-8 -*-
"""
Customized singly connected layer
"""
import keras
from keras.models import Sequential, Model
from keras import backend as K
from keras.layers import Layer
import numpy as np
#Customize a constraint class that clip w to be [K.epsilon(), inf]
from keras.constraints import Constraint
class CustomConstraint (Constraint):
def __call__(self, w):
new_w = K.clip(w, K.epsilon(), None)
return new_w
#Customize a element-wise multiplication layer with trainable weights
class SinglyConnected(Layer):
def __init__(self,
kernel_constraint=None,
**kwargs):
self.kernel_constraint = kernel_constraint
super(SinglyConnected, self).__init__(**kwargs)
def build(self, input_shape):
if input_shape[-1] is None:
raise ValueError('Axis ' + + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape) + '.')
#self.input_spec = InputSpec(ndim=len(input_shape),
# axes=dict(list(enumerate(input_shape[1:], start=1))))
self.kernel = self.add_weight(name='kernel',
shape=input_shape[1:],
initializer='ones',
constraint=self.kernel_constraint,
trainable=True)
super(SinglyConnected, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return np.multiply(x,self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape)
| # -*- coding: utf-8 -*-
"""
Customized singly connected layer
"""
import keras
from keras.models import Sequential, Model
from keras import backend as K
from keras.layers import Layer
import numpy as np
#Customize a constraint class that clip w to be [K.epsilon(), inf]
from keras.constraints import Constraint
class CustomConstraint (Constraint):
def __call__(self, w):
new_w = K.clip(w, K.epsilon(), None)
return new_w
#Customize a element-wise multiplication layer with trainable weights
class SinglyConnected(Layer):
def __init__(self,
kernel_constraint=None,
**kwargs):
self.kernel_constraint = kernel_constraint
super(SinglyConnected, self).__init__(**kwargs)
def build(self, input_shape):
if input_shape[-1] is None:
raise ValueError('Axis ' + + ' of '
'input tensor should have a defined dimension '
'but the layer received an input with shape ' +
str(input_shape) + '.')
#self.input_spec = InputSpec(ndim=len(input_shape),
# axes=dict(list(enumerate(input_shape[1:], start=1))))
self.kernel = self.add_weight(name='kernel',
shape=input_shape[1:],
initializer='ones',
constraint=self.kernel_constraint,
trainable=True)
super(SinglyConnected, self).build(input_shape) # Be sure to call this at the end
def call(self, x):
return np.multiply(x,self.kernel)
def compute_output_shape(self, input_shape):
return (input_shape) | en | 0.555009 | # -*- coding: utf-8 -*- Customized singly connected layer #Customize a constraint class that clip w to be [K.epsilon(), inf] #Customize a element-wise multiplication layer with trainable weights #self.input_spec = InputSpec(ndim=len(input_shape), # axes=dict(list(enumerate(input_shape[1:], start=1)))) # Be sure to call this at the end | 2.717219 | 3 |
ils_loc_mapper/lib/mapper_helper.py | birkin/ils_location_mapper_project | 0 | 6623760 | # -*- coding: utf-8 -*-
import datetime, json, logging, pprint
from . import common
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
from ils_loc_mapper import settings_app
from ils_loc_mapper.models import LocationCodeMapper
log = logging.getLogger(__name__)
class Mapper(object):
def __init__(self):
pass
def validate_request( self, get_dct ):
""" Validates params.
Called by views.map_location_code() """
out = {'rslt': False, 'err': 'Bad Request'}
( code_val, data_val ) = ( get_dct.get('code', None), get_dct.get('data', None) )
if code_val:
if len(code_val) > 0:
out = {'rslt': True, 'err': None}
elif data_val:
if len(data_val) > 0:
out = {'rslt': True, 'err': None}
log.debug( 'validity-out, ```%s```' % out )
return out
def get_request_type( self, get_dct ):
""" Returns `code` or `dump`.
Called by views.map_location_code() """
try:
get_dct['code']
code_type = 'code'
except Exception as e:
code_type = 'data'
log.debug( 'code_type, `%s`' % code_type )
return code_type
def prep_code_data( self, code ):
""" Performs lookup & returns data.
Called by views.map_location_code() """
out = { 'rslt': None, 'err': None }
try:
match = self.run_code_lookup( code )
out['rslt'] = {
'building': match.building, 'code': match.code, 'display': match.display, 'format': match.format
}
except Exception as e:
log.warning( 'exception getting data, ```%s```' % e )
out['err'] = 'not found'
log.debug( 'data-out, ```%s```' % out )
return out
def run_code_lookup( self, code ):
""" Returns match from cache or db lookup.
Called by prep_code_data() """
cache_key = code
match = cache.get( cache_key )
if match is None:
log.debug( 'code-data _not_ from cache' )
match = LocationCodeMapper.objects.get( code=code )
cache.set( cache_key, match ) # time could be last argument; defaults to settings.py entry
return match
def prep_dump_data( self ):
""" Returns all data.
Called by views.map_location_code() """
items_dct = cache.get( 'all' ) # key normally dynamic, but can be static here
if items_dct is None:
log.debug( 'dump-data _not_ from cache' )
( items_dct, data_objs ) = ( {}, LocationCodeMapper.objects.all().order_by('code') )
for obj in data_objs:
obj_dct = obj.dictify()
del( obj_dct['code'] )
items_dct[obj.code] = obj_dct
cache.set( 'all', items_dct ) # time could be last argument; defaults to settings.py entry
log.debug( 'items_dct, ```%s...```' % pprint.pformat(items_dct)[0:100] )
return items_dct
def prep_code_response( self, data_dct, request, rq_now ):
""" Returns appropriate response based on data.
Called by views.map_location_code() """
if data_dct['err']:
rsp = HttpResponseNotFound( '404 / no match for code')
else:
out_dct = {
'request': {
'url': common.make_request_url( request ),
'timestamp': str( rq_now )
},
'result': {
'items': [ data_dct['rslt'] ],
'documentation': settings_app.README_URL,
'elapsed_time': str( datetime.datetime.now() - rq_now )
}
}
j_out = json.dumps( out_dct, sort_keys=True, indent=2 )
rsp = HttpResponse( j_out, content_type='application/json; charset=utf-8' )
return rsp
def prep_dump_response( self, data_dct, request, rq_now ):
""" Returns json response.
Called by views.map_location_code() """
out_dct = {
'request': {
'url': common.make_request_url( request ),
'timestamp': str( rq_now ) },
'result': {
'items': data_dct,
'documentation': settings_app.README_URL,
'elapsed_time': str( datetime.datetime.now() - rq_now ) } }
j_out = json.dumps( out_dct, sort_keys=True, indent=2 )
rsp = HttpResponse( j_out, content_type='application/json; charset=utf-8' )
return rsp
def prep_bad_request_response( self, err ):
rsp = HttpResponseBadRequest( '400 / %s' % err )
return rsp
def prep_server_error_response( self, message ):
""" Triggered by prep_data() problem:
Called by views.map_location_code() """
rsp =HttpResponseServerError( '500 / %s' % message )
return rsp
## end class Mapper()
| # -*- coding: utf-8 -*-
import datetime, json, logging, pprint
from . import common
from django.core.cache import cache
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, HttpResponseServerError
from ils_loc_mapper import settings_app
from ils_loc_mapper.models import LocationCodeMapper
log = logging.getLogger(__name__)
class Mapper(object):
def __init__(self):
pass
def validate_request( self, get_dct ):
""" Validates params.
Called by views.map_location_code() """
out = {'rslt': False, 'err': 'Bad Request'}
( code_val, data_val ) = ( get_dct.get('code', None), get_dct.get('data', None) )
if code_val:
if len(code_val) > 0:
out = {'rslt': True, 'err': None}
elif data_val:
if len(data_val) > 0:
out = {'rslt': True, 'err': None}
log.debug( 'validity-out, ```%s```' % out )
return out
def get_request_type( self, get_dct ):
""" Returns `code` or `dump`.
Called by views.map_location_code() """
try:
get_dct['code']
code_type = 'code'
except Exception as e:
code_type = 'data'
log.debug( 'code_type, `%s`' % code_type )
return code_type
def prep_code_data( self, code ):
""" Performs lookup & returns data.
Called by views.map_location_code() """
out = { 'rslt': None, 'err': None }
try:
match = self.run_code_lookup( code )
out['rslt'] = {
'building': match.building, 'code': match.code, 'display': match.display, 'format': match.format
}
except Exception as e:
log.warning( 'exception getting data, ```%s```' % e )
out['err'] = 'not found'
log.debug( 'data-out, ```%s```' % out )
return out
def run_code_lookup( self, code ):
""" Returns match from cache or db lookup.
Called by prep_code_data() """
cache_key = code
match = cache.get( cache_key )
if match is None:
log.debug( 'code-data _not_ from cache' )
match = LocationCodeMapper.objects.get( code=code )
cache.set( cache_key, match ) # time could be last argument; defaults to settings.py entry
return match
def prep_dump_data( self ):
""" Returns all data.
Called by views.map_location_code() """
items_dct = cache.get( 'all' ) # key normally dynamic, but can be static here
if items_dct is None:
log.debug( 'dump-data _not_ from cache' )
( items_dct, data_objs ) = ( {}, LocationCodeMapper.objects.all().order_by('code') )
for obj in data_objs:
obj_dct = obj.dictify()
del( obj_dct['code'] )
items_dct[obj.code] = obj_dct
cache.set( 'all', items_dct ) # time could be last argument; defaults to settings.py entry
log.debug( 'items_dct, ```%s...```' % pprint.pformat(items_dct)[0:100] )
return items_dct
def prep_code_response( self, data_dct, request, rq_now ):
""" Returns appropriate response based on data.
Called by views.map_location_code() """
if data_dct['err']:
rsp = HttpResponseNotFound( '404 / no match for code')
else:
out_dct = {
'request': {
'url': common.make_request_url( request ),
'timestamp': str( rq_now )
},
'result': {
'items': [ data_dct['rslt'] ],
'documentation': settings_app.README_URL,
'elapsed_time': str( datetime.datetime.now() - rq_now )
}
}
j_out = json.dumps( out_dct, sort_keys=True, indent=2 )
rsp = HttpResponse( j_out, content_type='application/json; charset=utf-8' )
return rsp
def prep_dump_response( self, data_dct, request, rq_now ):
""" Returns json response.
Called by views.map_location_code() """
out_dct = {
'request': {
'url': common.make_request_url( request ),
'timestamp': str( rq_now ) },
'result': {
'items': data_dct,
'documentation': settings_app.README_URL,
'elapsed_time': str( datetime.datetime.now() - rq_now ) } }
j_out = json.dumps( out_dct, sort_keys=True, indent=2 )
rsp = HttpResponse( j_out, content_type='application/json; charset=utf-8' )
return rsp
def prep_bad_request_response( self, err ):
rsp = HttpResponseBadRequest( '400 / %s' % err )
return rsp
def prep_server_error_response( self, message ):
""" Triggered by prep_data() problem:
Called by views.map_location_code() """
rsp =HttpResponseServerError( '500 / %s' % message )
return rsp
## end class Mapper()
| en | 0.741206 | # -*- coding: utf-8 -*- Validates params. Called by views.map_location_code() Returns `code` or `dump`. Called by views.map_location_code() Performs lookup & returns data. Called by views.map_location_code() Returns match from cache or db lookup. Called by prep_code_data() # time could be last argument; defaults to settings.py entry Returns all data. Called by views.map_location_code() # key normally dynamic, but can be static here # time could be last argument; defaults to settings.py entry Returns appropriate response based on data. Called by views.map_location_code() Returns json response. Called by views.map_location_code() Triggered by prep_data() problem: Called by views.map_location_code() ## end class Mapper() | 2.025477 | 2 |
fsleyes/tests/test_screenshot.py | pauldmccarthy/fsleyes | 12 | 6623761 | <reponame>pauldmccarthy/fsleyes
#!/usr/bin/env python
#
# test_screenshot.py - Test fsleyes.actions.screenshot
#
# Author: <NAME> <<EMAIL>>
#
import os.path as op
import fsl.data.image as fslimage
import fsl.utils.idle as idle
from fsleyes.tests import (run_with_orthopanel,
run_with_lightboxpanel,
run_with_scene3dpanel,
run_with_timeseriespanel,
run_with_histogrampanel,
run_with_powerspectrumpanel,
tempdir,
realYield,
compare_images)
datadir = op.join(op.dirname(__file__), 'testdata')
def _test_screenshot(panel, overlayList, displayCtx, stype, imgfile):
import matplotlib.image as mplimg
import fsleyes.actions.screenshot as screenshot
import fsleyes.views.orthopanel as orthopanel
if isinstance(panel, orthopanel.OrthoPanel):
panel.sceneOpts.showCursor = False
panel.sceneOpts.showLabels = False
img = fslimage.Image(op.join(datadir, imgfile))
overlayList.append(img)
with tempdir():
fname = 'test_screenshot_{}.png'.format(stype)
realYield(100)
idle.idle(screenshot.screenshot, panel, fname)
idle.block(10, until=lambda : op.exists(fname))
realYield()
bfname = op.join(datadir, 'test_screenshot_{}.png'.format(stype))
screenshot = mplimg.imread(fname)
benchmark = mplimg.imread(bfname)
result, diff = compare_images(screenshot, benchmark, 50)
print('Comparing {} with {}: {}'.format(fname, bfname, diff))
assert result
def test_screenshot_ortho():
run_with_orthopanel(_test_screenshot, 'ortho', '3d')
def test_screenshot_lightbox():
run_with_lightboxpanel(_test_screenshot, 'lightbox', '3d')
def test_screenshot_3d():
run_with_scene3dpanel(_test_screenshot, '3d', '3d')
def test_screenshot_timeseries():
run_with_timeseriespanel(_test_screenshot, 'timeseries', '4d')
def test_screenshot_histogram():
run_with_histogrampanel(_test_screenshot, 'histogram', '4d')
def test_screenshot_powerspectrum():
run_with_powerspectrumpanel(_test_screenshot, 'powerspectrum', '4d')
| #!/usr/bin/env python
#
# test_screenshot.py - Test fsleyes.actions.screenshot
#
# Author: <NAME> <<EMAIL>>
#
import os.path as op
import fsl.data.image as fslimage
import fsl.utils.idle as idle
from fsleyes.tests import (run_with_orthopanel,
run_with_lightboxpanel,
run_with_scene3dpanel,
run_with_timeseriespanel,
run_with_histogrampanel,
run_with_powerspectrumpanel,
tempdir,
realYield,
compare_images)
datadir = op.join(op.dirname(__file__), 'testdata')
def _test_screenshot(panel, overlayList, displayCtx, stype, imgfile):
import matplotlib.image as mplimg
import fsleyes.actions.screenshot as screenshot
import fsleyes.views.orthopanel as orthopanel
if isinstance(panel, orthopanel.OrthoPanel):
panel.sceneOpts.showCursor = False
panel.sceneOpts.showLabels = False
img = fslimage.Image(op.join(datadir, imgfile))
overlayList.append(img)
with tempdir():
fname = 'test_screenshot_{}.png'.format(stype)
realYield(100)
idle.idle(screenshot.screenshot, panel, fname)
idle.block(10, until=lambda : op.exists(fname))
realYield()
bfname = op.join(datadir, 'test_screenshot_{}.png'.format(stype))
screenshot = mplimg.imread(fname)
benchmark = mplimg.imread(bfname)
result, diff = compare_images(screenshot, benchmark, 50)
print('Comparing {} with {}: {}'.format(fname, bfname, diff))
assert result
def test_screenshot_ortho():
run_with_orthopanel(_test_screenshot, 'ortho', '3d')
def test_screenshot_lightbox():
run_with_lightboxpanel(_test_screenshot, 'lightbox', '3d')
def test_screenshot_3d():
run_with_scene3dpanel(_test_screenshot, '3d', '3d')
def test_screenshot_timeseries():
run_with_timeseriespanel(_test_screenshot, 'timeseries', '4d')
def test_screenshot_histogram():
run_with_histogrampanel(_test_screenshot, 'histogram', '4d')
def test_screenshot_powerspectrum():
run_with_powerspectrumpanel(_test_screenshot, 'powerspectrum', '4d') | en | 0.231237 | #!/usr/bin/env python # # test_screenshot.py - Test fsleyes.actions.screenshot # # Author: <NAME> <<EMAIL>> # | 2.101018 | 2 |
Ex049.py | leonardoDelefrate/Curso-de-Python | 0 | 6623762 | <reponame>leonardoDelefrate/Curso-de-Python<filename>Ex049.py
import datetime
h = datetime.date.today().year
tma = 0
tme = 0
for p in range(1,8):
ano = int(input('Em que ano a {}° pessoa nasceu? '.format(p)))
idade = h - ano
if idade >= 18:
tma += 1
else:
tme += 1
print('{} pessoas atingiram a maioridade.'.format(tma))
print('{} pessoas ainda não atingiram a maioridade.'.format(tme))
| import datetime
h = datetime.date.today().year
tma = 0
tme = 0
for p in range(1,8):
ano = int(input('Em que ano a {}° pessoa nasceu? '.format(p)))
idade = h - ano
if idade >= 18:
tma += 1
else:
tme += 1
print('{} pessoas atingiram a maioridade.'.format(tma))
print('{} pessoas ainda não atingiram a maioridade.'.format(tme)) | none | 1 | 3.839503 | 4 | |
process/introduce_wer.py | judyfong/punctuation-prediction | 43 | 6623763 | <gh_stars>10-100
# Copyright 2020 <NAME> <EMAIL>
# In this script, the word error rate is introduced to data
# and the data then saved to a file.
from wer_assist import apply_wer
import sys
try:
wordList_wer = apply_wer(float(sys.argv[3]))
sentences_wer = [" ".join(sentence) for sentence in wordList_wer]
except:
print("There is no number to define the desired word error rate")
try:
with open(sys.argv[2] + "/wer" + sys.argv[3] + ".txt", "w", encoding="utf-8") as show_unurl:
for item in sentences_wer:
show_unurl.write("%s\n" % item)
except:
print("Unable to save to directory")
| # Copyright 2020 <NAME> <EMAIL>
# In this script, the word error rate is introduced to data
# and the data then saved to a file.
from wer_assist import apply_wer
import sys
try:
wordList_wer = apply_wer(float(sys.argv[3]))
sentences_wer = [" ".join(sentence) for sentence in wordList_wer]
except:
print("There is no number to define the desired word error rate")
try:
with open(sys.argv[2] + "/wer" + sys.argv[3] + ".txt", "w", encoding="utf-8") as show_unurl:
for item in sentences_wer:
show_unurl.write("%s\n" % item)
except:
print("Unable to save to directory") | en | 0.804188 | # Copyright 2020 <NAME> <EMAIL> # In this script, the word error rate is introduced to data # and the data then saved to a file. | 3.262791 | 3 |
aerismodsdk/modules/quectel.py | ethaeris/aeris-modsdk-py | 0 | 6623764 | <gh_stars>0
"""
Copyright 2020 Aeris Communications Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import aerismodsdk.utils.rmutils as rmutils
import aerismodsdk.utils.aerisutils as aerisutils
from aerismodsdk.modules.module import Module
class QuectelModule(Module):
# ========================================================================
#
# The network stuff
#
def get_network_info(self, scan, verbose):
ser = self.myserial
# Enable unsolicited reg results
rmutils.write(ser, 'AT+CREG=2')
# Quectel-specific advanced configuration
rmutils.write(ser, 'AT+QPSMEXTCFG?')
return super().get_network_info(scan, verbose)
# ========================================================================
#
# The packet stuff
#
def parse_constate(self, constate):
if len(constate) < len('+QIACT: '):
return False
else:
vals = constate.split(',')
if len(vals) < 4:
return False
vals2 = vals[3].split('"')
self.my_ip = vals2[1]
# print('My IP: ' + self.my_ip)
return self.my_ip
def create_packet_session(self, verbose=True):
ser = self.myserial
rmutils.write(ser, 'AT+QICSGP=1,1,"' + self.apn + '","","",0', verbose=verbose)
constate = rmutils.write(ser, 'AT+QIACT?', verbose=verbose) # Check if we are already connected
if not self.parse_constate(constate): # Returns packet session info if in session
rmutils.write(ser, 'AT+QIACT=1', verbose=verbose) # Activate context / create packet session
constate = rmutils.write(ser, 'AT+QIACT?', verbose=verbose) # Verify that we connected
self.parse_constate(constate)
if not self.parse_constate(constate):
return False
return True
def get_packet_info(self, verbose=True):
ser = self.myserial
constate = rmutils.write(ser, 'AT+QIACT?', verbose=verbose) # Check if we are already connected
return self.parse_constate(constate)
def start_packet_session(self,verbose=True):
self.create_packet_session()
def stop_packet_session(self, verbose=True):
ser = self.myserial
rmutils.write(ser, 'AT+QIDEACT=1') # Deactivate context
def ping(self,host,verbose):
ser = self.myserial
self.create_packet_session()
mycmd = 'AT+QPING=1,\"' + host + '\",4,4' # Context, host, timeout, pingnum
rmutils.write(ser, mycmd, delay=6) # Write a ping command; Wait timeout plus 2 seconds
def lookup(self, host, verbose):
ser = self.myserial
self.create_packet_session()
rmutils.write(ser, 'AT+QIDNSCFG=1') # Check DNS server
mycmd = 'AT+QIDNSGIP=1,\"' + host + '\"'
rmutils.write(ser, mycmd, timeout=0) # Write a dns lookup command
rmutils.wait_urc(ser, 4,self.com_port) # Wait up to 4 seconds for results to come back via urc
# ========================================================================
#
# The http stuff
#
def http_get(self, host, verbose):
ser = self.myserial
self.create_packet_session()
# Open TCP socket to the host
rmutils.write(ser, 'AT+QICLOSE=0', delay=1) # Make sure no sockets open
mycmd = 'AT+QIOPEN=1,0,\"TCP\",\"' + host + '\",80,0,0'
rmutils.write(ser, mycmd, delay=1) # Create TCP socket connection as a client
sostate = rmutils.write(ser, 'AT+QISTATE=1,0') # Check socket state
if "TCP" not in sostate: # Try one more time with a delay if not connected
sostate = rmutils.write(ser, 'AT+QISTATE=1,0', delay=1) # Check socket state
# Send HTTP GET
getpacket = self.get_http_packet(host)
mycmd = 'AT+QISEND=0,' + str(len(getpacket))
rmutils.write(ser, mycmd, getpacket, delay=0) # Write an http get command
rmutils.write(ser, 'AT+QISEND=0,0') # Check how much data sent
# Read the response
rmutils.write(ser, 'AT+QIRD=0,1500') # Check receive
# ========================================================================
#
# The udp stuff
#
def udp_listen(self,listen_port, listen_wait, verbose=True):
ser = self.myserial
read_sock = '1' # Use socket 1 for listen
if self.create_packet_session(verbose=verbose):
aerisutils.print_log('Packet session active: ' + self.my_ip)
else:
return False
# Open UDP socket for listen
mycmd = 'AT+QIOPEN=1,' + read_sock + ',"UDP SERVICE","127.0.0.1",0,3030,1'
rmutils.write(ser, mycmd, delay=1, verbose=verbose) # Create UDP socket connection
sostate = rmutils.write(ser, 'AT+QISTATE=1,' + read_sock, verbose=verbose) # Check socket state
if "UDP" not in sostate: # Try one more time with a delay if not connected
sostate = rmutils.write(ser, 'AT+QISTATE=1,' + read_sock, delay=1, verbose=verbose) # Check socket state
if "UDP" not in sostate:
return False
# Wait for data
if listen_wait > 0:
rmutils.wait_urc(ser, listen_wait, self.com_port,returnonreset=True) # Wait up to X seconds for UDP data to come in
return True
def udp_echo(self, host, port, echo_delay, echo_wait, verbose=True):
ser = self.myserial
echo_host = '192.168.3.11'
port = '3030'
write_sock = '0' # Use socket 0 for sending
if self.udp_listen(port, 0, verbose=verbose): # Open listen port
aerisutils.print_log('Listening on port: ' + port)
else:
return False
# Open UDP socket to the host for sending echo command
rmutils.write(ser, 'AT+QICLOSE=0', delay=1, verbose=verbose) # Make sure no sockets open
mycmd = 'AT+QIOPEN=1,0,\"UDP\",\"' + echo_host + '\",' + port + ',0,1'
rmutils.write(ser, mycmd, delay=1, verbose=verbose) # Create UDP socket connection as a client
sostate = rmutils.write(ser, 'AT+QISTATE=1,0', verbose=verbose) # Check socket state
if "UDP" not in sostate: # Try one more time with a delay if not connected
sostate = rmutils.write(ser, 'AT+QISTATE=1,0', delay=1, verbose=verbose) # Check socket state
# Send data
udppacket = str('{"delay":' + str(echo_delay * 1000) + ', "ip":"' + self.my_ip + '","port":' + str(port) + '}')
# print('UDP packet: ' + udppacket)
mycmd = 'AT+QISEND=0,' + str(len(udppacket))
rmutils.write(ser, mycmd, udppacket, delay=0, verbose=verbose) # Write udp packet
rmutils.write(ser, 'AT+QISEND=0,0', verbose=verbose) # Check how much data sent
aerisutils.print_log('Sent echo command: ' + udppacket)
if echo_wait == 0:
# True indicates we sent the echo
return True
else:
echo_wait = round(echo_wait + echo_delay)
vals = rmutils.wait_urc(ser, echo_wait, self.com_port, returnonreset=True,
returnonvalue='OK') # Wait up to X seconds to confirm data sent
#print('Return: ' + str(vals))
vals = rmutils.wait_urc(ser, echo_wait, self.com_port, returnonreset=True,
returnonvalue='+QIURC:') # Wait up to X seconds for UDP data to come in
vals = super().parse_response(vals, '+QIURC:')
print('Return: ' + str(vals))
if len(vals) > 3 and int(vals[2]) == len(udppacket):
return True
else:
return False
# ========================================================================
#
# The PSM stuff
#
def psm_mode(self, i): # PSM mode
switcher = {
0b0001: 'PSM without network coordination',
0b0010: 'Rel 12 PSM without context retention',
0b0100: 'Rel 12 PSM with context retention',
0b1000: 'PSM in between eDRX cycles'}
return switcher.get(i, "Invalid value")
def get_psm_info(self, verbose):
ser = self.myserial
psmsettings = rmutils.write(ser, 'AT+QPSMCFG?',
verbose=verbose) # Check PSM feature mode and min time threshold
vals = super().parse_response(psmsettings, '+QPSMCFG:')
print('Minimum seconds to enter PSM: ' + vals[0])
print('PSM mode: ' + self.psm_mode(int(vals[1])))
# Check on urc setting
psmsettings = rmutils.write(ser, 'AT+QCFG="psm/urc"', verbose=verbose) # Check if urc enabled
vals = super().parse_response(psmsettings, '+QCFG: ')
print('PSM unsolicited response codes (urc): ' + vals[1])
# Query settings
return super().get_psm_info('+QPSMS', 2, 10, verbose)
def enable_psm(self,tau_time, atime, verbose=True):
ser = self.myserial
super().enable_psm(tau_time, atime, verbose)
rmutils.write(ser, 'AT+QCFG="psm/urc",1', verbose=verbose) # Enable urc for PSM
aerisutils.print_log('PSM is enabled with TAU: {0} s and AT: {1} s'.format(str(tau_time), str(atime)))
def disable_psm(self,verbose):
ser = self.myserial
super().disable_psm(verbose)
rmutils.write(ser, 'AT+QCFG="psm/urc",0', verbose=verbose) # Disable urc for PSM
aerisutils.print_log('PSM and PSM/URC disabled')
def psm_now(self):
mycmd = 'AT+QCFG="psm/enter",1' # Enter PSM right after RRC
ser = self.myserial
rmutils.write(ser, mycmd)
# Enable urc setting
rmutils.write(ser, 'AT+QCFG="psm/urc",1') # Enable urc for PSM
# Let's try to wait for such a urc
# rmutils.wait_urc(ser, 120) # Wait up to 120 seconds for urc
# ========================================================================
#
# The eDRX stuff - see base class
#
| """
Copyright 2020 Aeris Communications Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import aerismodsdk.utils.rmutils as rmutils
import aerismodsdk.utils.aerisutils as aerisutils
from aerismodsdk.modules.module import Module
class QuectelModule(Module):
# ========================================================================
#
# The network stuff
#
def get_network_info(self, scan, verbose):
ser = self.myserial
# Enable unsolicited reg results
rmutils.write(ser, 'AT+CREG=2')
# Quectel-specific advanced configuration
rmutils.write(ser, 'AT+QPSMEXTCFG?')
return super().get_network_info(scan, verbose)
# ========================================================================
#
# The packet stuff
#
def parse_constate(self, constate):
if len(constate) < len('+QIACT: '):
return False
else:
vals = constate.split(',')
if len(vals) < 4:
return False
vals2 = vals[3].split('"')
self.my_ip = vals2[1]
# print('My IP: ' + self.my_ip)
return self.my_ip
def create_packet_session(self, verbose=True):
ser = self.myserial
rmutils.write(ser, 'AT+QICSGP=1,1,"' + self.apn + '","","",0', verbose=verbose)
constate = rmutils.write(ser, 'AT+QIACT?', verbose=verbose) # Check if we are already connected
if not self.parse_constate(constate): # Returns packet session info if in session
rmutils.write(ser, 'AT+QIACT=1', verbose=verbose) # Activate context / create packet session
constate = rmutils.write(ser, 'AT+QIACT?', verbose=verbose) # Verify that we connected
self.parse_constate(constate)
if not self.parse_constate(constate):
return False
return True
def get_packet_info(self, verbose=True):
ser = self.myserial
constate = rmutils.write(ser, 'AT+QIACT?', verbose=verbose) # Check if we are already connected
return self.parse_constate(constate)
def start_packet_session(self,verbose=True):
self.create_packet_session()
def stop_packet_session(self, verbose=True):
ser = self.myserial
rmutils.write(ser, 'AT+QIDEACT=1') # Deactivate context
def ping(self,host,verbose):
ser = self.myserial
self.create_packet_session()
mycmd = 'AT+QPING=1,\"' + host + '\",4,4' # Context, host, timeout, pingnum
rmutils.write(ser, mycmd, delay=6) # Write a ping command; Wait timeout plus 2 seconds
def lookup(self, host, verbose):
ser = self.myserial
self.create_packet_session()
rmutils.write(ser, 'AT+QIDNSCFG=1') # Check DNS server
mycmd = 'AT+QIDNSGIP=1,\"' + host + '\"'
rmutils.write(ser, mycmd, timeout=0) # Write a dns lookup command
rmutils.wait_urc(ser, 4,self.com_port) # Wait up to 4 seconds for results to come back via urc
# ========================================================================
#
# The http stuff
#
def http_get(self, host, verbose):
ser = self.myserial
self.create_packet_session()
# Open TCP socket to the host
rmutils.write(ser, 'AT+QICLOSE=0', delay=1) # Make sure no sockets open
mycmd = 'AT+QIOPEN=1,0,\"TCP\",\"' + host + '\",80,0,0'
rmutils.write(ser, mycmd, delay=1) # Create TCP socket connection as a client
sostate = rmutils.write(ser, 'AT+QISTATE=1,0') # Check socket state
if "TCP" not in sostate: # Try one more time with a delay if not connected
sostate = rmutils.write(ser, 'AT+QISTATE=1,0', delay=1) # Check socket state
# Send HTTP GET
getpacket = self.get_http_packet(host)
mycmd = 'AT+QISEND=0,' + str(len(getpacket))
rmutils.write(ser, mycmd, getpacket, delay=0) # Write an http get command
rmutils.write(ser, 'AT+QISEND=0,0') # Check how much data sent
# Read the response
rmutils.write(ser, 'AT+QIRD=0,1500') # Check receive
# ========================================================================
#
# The udp stuff
#
def udp_listen(self,listen_port, listen_wait, verbose=True):
ser = self.myserial
read_sock = '1' # Use socket 1 for listen
if self.create_packet_session(verbose=verbose):
aerisutils.print_log('Packet session active: ' + self.my_ip)
else:
return False
# Open UDP socket for listen
mycmd = 'AT+QIOPEN=1,' + read_sock + ',"UDP SERVICE","127.0.0.1",0,3030,1'
rmutils.write(ser, mycmd, delay=1, verbose=verbose) # Create UDP socket connection
sostate = rmutils.write(ser, 'AT+QISTATE=1,' + read_sock, verbose=verbose) # Check socket state
if "UDP" not in sostate: # Try one more time with a delay if not connected
sostate = rmutils.write(ser, 'AT+QISTATE=1,' + read_sock, delay=1, verbose=verbose) # Check socket state
if "UDP" not in sostate:
return False
# Wait for data
if listen_wait > 0:
rmutils.wait_urc(ser, listen_wait, self.com_port,returnonreset=True) # Wait up to X seconds for UDP data to come in
return True
def udp_echo(self, host, port, echo_delay, echo_wait, verbose=True):
ser = self.myserial
echo_host = '192.168.3.11'
port = '3030'
write_sock = '0' # Use socket 0 for sending
if self.udp_listen(port, 0, verbose=verbose): # Open listen port
aerisutils.print_log('Listening on port: ' + port)
else:
return False
# Open UDP socket to the host for sending echo command
rmutils.write(ser, 'AT+QICLOSE=0', delay=1, verbose=verbose) # Make sure no sockets open
mycmd = 'AT+QIOPEN=1,0,\"UDP\",\"' + echo_host + '\",' + port + ',0,1'
rmutils.write(ser, mycmd, delay=1, verbose=verbose) # Create UDP socket connection as a client
sostate = rmutils.write(ser, 'AT+QISTATE=1,0', verbose=verbose) # Check socket state
if "UDP" not in sostate: # Try one more time with a delay if not connected
sostate = rmutils.write(ser, 'AT+QISTATE=1,0', delay=1, verbose=verbose) # Check socket state
# Send data
udppacket = str('{"delay":' + str(echo_delay * 1000) + ', "ip":"' + self.my_ip + '","port":' + str(port) + '}')
# print('UDP packet: ' + udppacket)
mycmd = 'AT+QISEND=0,' + str(len(udppacket))
rmutils.write(ser, mycmd, udppacket, delay=0, verbose=verbose) # Write udp packet
rmutils.write(ser, 'AT+QISEND=0,0', verbose=verbose) # Check how much data sent
aerisutils.print_log('Sent echo command: ' + udppacket)
if echo_wait == 0:
# True indicates we sent the echo
return True
else:
echo_wait = round(echo_wait + echo_delay)
vals = rmutils.wait_urc(ser, echo_wait, self.com_port, returnonreset=True,
returnonvalue='OK') # Wait up to X seconds to confirm data sent
#print('Return: ' + str(vals))
vals = rmutils.wait_urc(ser, echo_wait, self.com_port, returnonreset=True,
returnonvalue='+QIURC:') # Wait up to X seconds for UDP data to come in
vals = super().parse_response(vals, '+QIURC:')
print('Return: ' + str(vals))
if len(vals) > 3 and int(vals[2]) == len(udppacket):
return True
else:
return False
# ========================================================================
#
# The PSM stuff
#
def psm_mode(self, i): # PSM mode
switcher = {
0b0001: 'PSM without network coordination',
0b0010: 'Rel 12 PSM without context retention',
0b0100: 'Rel 12 PSM with context retention',
0b1000: 'PSM in between eDRX cycles'}
return switcher.get(i, "Invalid value")
def get_psm_info(self, verbose):
ser = self.myserial
psmsettings = rmutils.write(ser, 'AT+QPSMCFG?',
verbose=verbose) # Check PSM feature mode and min time threshold
vals = super().parse_response(psmsettings, '+QPSMCFG:')
print('Minimum seconds to enter PSM: ' + vals[0])
print('PSM mode: ' + self.psm_mode(int(vals[1])))
# Check on urc setting
psmsettings = rmutils.write(ser, 'AT+QCFG="psm/urc"', verbose=verbose) # Check if urc enabled
vals = super().parse_response(psmsettings, '+QCFG: ')
print('PSM unsolicited response codes (urc): ' + vals[1])
# Query settings
return super().get_psm_info('+QPSMS', 2, 10, verbose)
def enable_psm(self,tau_time, atime, verbose=True):
ser = self.myserial
super().enable_psm(tau_time, atime, verbose)
rmutils.write(ser, 'AT+QCFG="psm/urc",1', verbose=verbose) # Enable urc for PSM
aerisutils.print_log('PSM is enabled with TAU: {0} s and AT: {1} s'.format(str(tau_time), str(atime)))
def disable_psm(self,verbose):
ser = self.myserial
super().disable_psm(verbose)
rmutils.write(ser, 'AT+QCFG="psm/urc",0', verbose=verbose) # Disable urc for PSM
aerisutils.print_log('PSM and PSM/URC disabled')
def psm_now(self):
mycmd = 'AT+QCFG="psm/enter",1' # Enter PSM right after RRC
ser = self.myserial
rmutils.write(ser, mycmd)
# Enable urc setting
rmutils.write(ser, 'AT+QCFG="psm/urc",1') # Enable urc for PSM
# Let's try to wait for such a urc
# rmutils.wait_urc(ser, 120) # Wait up to 120 seconds for urc
# ========================================================================
#
# The eDRX stuff - see base class
# | en | 0.726031 | Copyright 2020 Aeris Communications Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at https://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. # ======================================================================== # # The network stuff # # Enable unsolicited reg results # Quectel-specific advanced configuration # ======================================================================== # # The packet stuff # # print('My IP: ' + self.my_ip) # Check if we are already connected # Returns packet session info if in session # Activate context / create packet session # Verify that we connected # Check if we are already connected # Deactivate context # Context, host, timeout, pingnum # Write a ping command; Wait timeout plus 2 seconds # Check DNS server # Write a dns lookup command # Wait up to 4 seconds for results to come back via urc # ======================================================================== # # The http stuff # # Open TCP socket to the host # Make sure no sockets open # Create TCP socket connection as a client # Check socket state # Try one more time with a delay if not connected # Check socket state # Send HTTP GET # Write an http get command # Check how much data sent # Read the response # Check receive # ======================================================================== # # The udp stuff # # Use socket 1 for listen # Open UDP socket for listen # Create UDP socket connection # Check socket state # Try one more time with a delay if not connected # Check socket state # Wait for data # Wait up to X seconds for UDP data to come in # Use socket 0 for sending # Open listen port # Open UDP socket to the host for sending echo command # Make sure no sockets open # Create UDP socket connection as a client # Check socket state # Try one more time with a delay if not connected # Check socket state # Send data # print('UDP packet: ' + udppacket) # Write udp packet # Check how much data sent # True indicates we sent the echo # Wait up to X seconds to confirm data sent #print('Return: ' + str(vals)) # Wait up to X seconds for UDP data to come in # ======================================================================== # # The PSM stuff # # PSM mode # Check PSM feature mode and min time threshold # Check on urc setting # Check if urc enabled # Query settings # Enable urc for PSM # Disable urc for PSM # Enter PSM right after RRC # Enable urc setting # Enable urc for PSM # Let's try to wait for such a urc # rmutils.wait_urc(ser, 120) # Wait up to 120 seconds for urc # ======================================================================== # # The eDRX stuff - see base class # | 1.762679 | 2 |
textkit/tokenize/bigrams.py | learntextvis/textkit | 29 | 6623765 | <reponame>learntextvis/textkit<gh_stars>10-100
import click
import nltk
from textkit.utils import output, read_tokens
@click.command()
@click.argument('tokens', type=click.File('r'), default=click.open_file('-'))
@click.option('-s', '--sep', default=' ',
help='Separator between words in bigram output.',
show_default=True)
def words2bigrams(sep, tokens):
'''Tokenize words into bigrams. Bigrams are two word tokens.
Punctuation is considered as a separate token.'''
content = read_tokens(tokens)
bigrams = []
try:
bigrams = list(nltk.bigrams(content))
except LookupError as err:
click.echo(message="Error with tokenization", nl=True)
click.echo(message="Have you run \"textkit download\"?", nl=True)
click.echo(message="\nOriginal Error:", nl=True)
click.echo(err)
[output(sep.join(bigram)) for bigram in bigrams]
| import click
import nltk
from textkit.utils import output, read_tokens
@click.command()
@click.argument('tokens', type=click.File('r'), default=click.open_file('-'))
@click.option('-s', '--sep', default=' ',
help='Separator between words in bigram output.',
show_default=True)
def words2bigrams(sep, tokens):
'''Tokenize words into bigrams. Bigrams are two word tokens.
Punctuation is considered as a separate token.'''
content = read_tokens(tokens)
bigrams = []
try:
bigrams = list(nltk.bigrams(content))
except LookupError as err:
click.echo(message="Error with tokenization", nl=True)
click.echo(message="Have you run \"textkit download\"?", nl=True)
click.echo(message="\nOriginal Error:", nl=True)
click.echo(err)
[output(sep.join(bigram)) for bigram in bigrams] | en | 0.936847 | Tokenize words into bigrams. Bigrams are two word tokens. Punctuation is considered as a separate token. | 3.287188 | 3 |
psafe3-to-keepass-csv.py | hupf/psafe3-to-keepass-csv | 0 | 6623766 | import sys
import csv
from datetime import datetime
from argparse import ArgumentParser
from getpass import getpass
from loxodo.vault import Vault
class HelpfulArgumentParser(ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = HelpfulArgumentParser(description='Convert a Password Safe v3 file to a CSV file (cleartext!) that can be imported with KeePassXC.')
parser.add_argument('input_file', help='Input file in Password Safe v3 format')
parser.add_argument('output_file', help='Output file in unencrypted (!) CSV format')
args = parser.parse_args()
input_file = args.input_file
output_file = args.output_file
password = <PASSWORD>()
vault = Vault(password, input_file)
with open(output_file, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['group', 'title', 'username', 'password', 'url', 'notes', 'modified'])
for record in vault.records:
writer.writerow([
# group
record._get_group().encode('utf-8'),
# title
record._get_title().encode('utf-8'),
# username
record._get_user().encode('utf-8'),
# password
record._get_passwd().encode('utf-8'),
# url
record._get_url(),
# notes
record._get_notes().encode('utf-8'),
# last mofified
datetime.fromtimestamp(record.last_mod).isoformat()
])
| import sys
import csv
from datetime import datetime
from argparse import ArgumentParser
from getpass import getpass
from loxodo.vault import Vault
class HelpfulArgumentParser(ArgumentParser):
def error(self, message):
sys.stderr.write('error: %s\n' % message)
self.print_help()
sys.exit(2)
parser = HelpfulArgumentParser(description='Convert a Password Safe v3 file to a CSV file (cleartext!) that can be imported with KeePassXC.')
parser.add_argument('input_file', help='Input file in Password Safe v3 format')
parser.add_argument('output_file', help='Output file in unencrypted (!) CSV format')
args = parser.parse_args()
input_file = args.input_file
output_file = args.output_file
password = <PASSWORD>()
vault = Vault(password, input_file)
with open(output_file, 'wb') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['group', 'title', 'username', 'password', 'url', 'notes', 'modified'])
for record in vault.records:
writer.writerow([
# group
record._get_group().encode('utf-8'),
# title
record._get_title().encode('utf-8'),
# username
record._get_user().encode('utf-8'),
# password
record._get_passwd().encode('utf-8'),
# url
record._get_url(),
# notes
record._get_notes().encode('utf-8'),
# last mofified
datetime.fromtimestamp(record.last_mod).isoformat()
])
| en | 0.742821 | # group # title # username # password # url # notes # last mofified | 3.011438 | 3 |
test.py | deep-compute/gmaildump | 0 | 6623767 | import doctest
import unittest
from gmaildump import gmailhistory
def suitefn():
suite = unittest.TestSuite()
suite.addTests(doctest.DocTestSuite(gmailhistory))
return suite
if __name__ == "__main__":
doctest.testmod(gmailhistory)
| import doctest
import unittest
from gmaildump import gmailhistory
def suitefn():
suite = unittest.TestSuite()
suite.addTests(doctest.DocTestSuite(gmailhistory))
return suite
if __name__ == "__main__":
doctest.testmod(gmailhistory)
| none | 1 | 1.794396 | 2 | |
molecule/default/tests/test_namenodes.py | mikemillerr/ansible-hdfs | 19 | 6623768 | <gh_stars>10-100
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/inventory').get_hosts('namenodes')
def test_hdfs_printTopology_command(Sudo, Command):
with Sudo("hdfs"):
c = Command("/usr/local/hadoop/bin/hdfs dfsadmin -printTopology")
assert len(c.stdout.rstrip().split('\n')) == 4
assert c.rc == 0
def test_hdfs_check_safemode_is_off(Sudo, Command):
with Sudo("hdfs"):
c = Command("/usr/local/hadoop/bin/hdfs dfsadmin -safemode get")
assert len(c.stdout.rstrip().split('\n')) == 2
for row in c.stdout.rstrip().split('\n'):
assert row.find("OFF") != -1
assert c.rc == 0
def test_hdfs_is_empty(Sudo, Command):
with Sudo("hdfs"):
c = Command("/usr/local/hadoop/bin/hdfs dfs -ls /")
assert c.stdout.rstrip() == ''
assert c.rc == 0
def test_hdfs_namenode_running(Service):
service = Service('hdfs-namenode')
assert service.is_running
assert service.is_enabled
def test_hdfs_zkfc_running(Service):
service = Service('hdfs-zkfc')
assert service.is_running
assert service.is_enabled
def test_hdfs_listening(Socket):
socket = Socket('tcp://0.0.0.0:8020')
assert socket.is_listening
def test_hdfs_web_listening(Socket):
socket = Socket('tcp://0.0.0.0:50070')
assert socket.is_listening
| import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
'.molecule/inventory').get_hosts('namenodes')
def test_hdfs_printTopology_command(Sudo, Command):
with Sudo("hdfs"):
c = Command("/usr/local/hadoop/bin/hdfs dfsadmin -printTopology")
assert len(c.stdout.rstrip().split('\n')) == 4
assert c.rc == 0
def test_hdfs_check_safemode_is_off(Sudo, Command):
with Sudo("hdfs"):
c = Command("/usr/local/hadoop/bin/hdfs dfsadmin -safemode get")
assert len(c.stdout.rstrip().split('\n')) == 2
for row in c.stdout.rstrip().split('\n'):
assert row.find("OFF") != -1
assert c.rc == 0
def test_hdfs_is_empty(Sudo, Command):
with Sudo("hdfs"):
c = Command("/usr/local/hadoop/bin/hdfs dfs -ls /")
assert c.stdout.rstrip() == ''
assert c.rc == 0
def test_hdfs_namenode_running(Service):
service = Service('hdfs-namenode')
assert service.is_running
assert service.is_enabled
def test_hdfs_zkfc_running(Service):
service = Service('hdfs-zkfc')
assert service.is_running
assert service.is_enabled
def test_hdfs_listening(Socket):
socket = Socket('tcp://0.0.0.0:8020')
assert socket.is_listening
def test_hdfs_web_listening(Socket):
socket = Socket('tcp://0.0.0.0:50070')
assert socket.is_listening | none | 1 | 2.040877 | 2 | |
gntp/readers/sru.py | nagendra20001414/gntp | 0 | 6623769 | # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
class SRUFusedRNN(tf.contrib.rnn.FusedRNNCell):
"""Simple Recurrent Unit, very fast. https://openreview.net/pdf?id=rJBiunlAW"""
def __init__(self, num_units, f_bias=1.0, r_bias=0.0, with_residual=True):
self._num_units = num_units
cell = _SRUUpdateCell(num_units, with_residual)
self._rnn = tf.contrib.rnn.FusedRNNCellAdaptor(cell, use_dynamic_rnn=True)
self._constant_bias = [0.0] * self._num_units + [f_bias] * self._num_units
if with_residual:
self._constant_bias += [r_bias] * self._num_units
self._constant_bias = np.array(self._constant_bias, np.float32)
self._with_residual = with_residual
def __call__(self, inputs, initial_state=None, dtype=tf.float32, sequence_length=None, scope=None):
num_gates = 3 if self._with_residual else 2
transformed = tf.layers.dense(inputs, num_gates * self._num_units,
bias_initializer=tf.constant_initializer(self._constant_bias))
gates = tf.split(transformed, num_gates, axis=2)
forget_gate = tf.sigmoid(gates[1])
transformed_inputs = (1.0 - forget_gate) * gates[0]
if self._with_residual:
residual_gate = tf.sigmoid(gates[2])
inputs *= (1.0 - residual_gate)
new_inputs = tf.concat([inputs, transformed_inputs, forget_gate, residual_gate], axis=2)
else:
new_inputs = tf.concat([transformed_inputs, forget_gate], axis=2)
return self._rnn(new_inputs, initial_state, dtype, sequence_length, scope)
class _SRUUpdateCell(tf.contrib.rnn.RNNCell):
"""Simple Recurrent Unit, very fast. https://openreview.net/pdf?id=rJBiunlAW"""
def __init__(self, num_units, with_residual, activation=None, reuse=None):
super(_SRUUpdateCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._with_residual = with_residual
self._activation = activation or tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Simple recurrent unit (SRU)."""
if self._with_residual:
base_inputs, transformed_inputs, forget_gate, residual_gate = tf.split(inputs, 4, axis=1)
new_state = forget_gate * state + transformed_inputs
new_h = residual_gate * self._activation(new_state) + base_inputs
else:
transformed_inputs, forget_gate = tf.split(inputs, 2, axis=1)
new_state = new_h = forget_gate * state + transformed_inputs
return new_h, new_state
| # -*- coding: utf-8 -*-
import numpy as np
import tensorflow as tf
class SRUFusedRNN(tf.contrib.rnn.FusedRNNCell):
"""Simple Recurrent Unit, very fast. https://openreview.net/pdf?id=rJBiunlAW"""
def __init__(self, num_units, f_bias=1.0, r_bias=0.0, with_residual=True):
self._num_units = num_units
cell = _SRUUpdateCell(num_units, with_residual)
self._rnn = tf.contrib.rnn.FusedRNNCellAdaptor(cell, use_dynamic_rnn=True)
self._constant_bias = [0.0] * self._num_units + [f_bias] * self._num_units
if with_residual:
self._constant_bias += [r_bias] * self._num_units
self._constant_bias = np.array(self._constant_bias, np.float32)
self._with_residual = with_residual
def __call__(self, inputs, initial_state=None, dtype=tf.float32, sequence_length=None, scope=None):
num_gates = 3 if self._with_residual else 2
transformed = tf.layers.dense(inputs, num_gates * self._num_units,
bias_initializer=tf.constant_initializer(self._constant_bias))
gates = tf.split(transformed, num_gates, axis=2)
forget_gate = tf.sigmoid(gates[1])
transformed_inputs = (1.0 - forget_gate) * gates[0]
if self._with_residual:
residual_gate = tf.sigmoid(gates[2])
inputs *= (1.0 - residual_gate)
new_inputs = tf.concat([inputs, transformed_inputs, forget_gate, residual_gate], axis=2)
else:
new_inputs = tf.concat([transformed_inputs, forget_gate], axis=2)
return self._rnn(new_inputs, initial_state, dtype, sequence_length, scope)
class _SRUUpdateCell(tf.contrib.rnn.RNNCell):
"""Simple Recurrent Unit, very fast. https://openreview.net/pdf?id=rJBiunlAW"""
def __init__(self, num_units, with_residual, activation=None, reuse=None):
super(_SRUUpdateCell, self).__init__(_reuse=reuse)
self._num_units = num_units
self._with_residual = with_residual
self._activation = activation or tf.tanh
@property
def state_size(self):
return self._num_units
@property
def output_size(self):
return self._num_units
def call(self, inputs, state):
"""Simple recurrent unit (SRU)."""
if self._with_residual:
base_inputs, transformed_inputs, forget_gate, residual_gate = tf.split(inputs, 4, axis=1)
new_state = forget_gate * state + transformed_inputs
new_h = residual_gate * self._activation(new_state) + base_inputs
else:
transformed_inputs, forget_gate = tf.split(inputs, 2, axis=1)
new_state = new_h = forget_gate * state + transformed_inputs
return new_h, new_state
| en | 0.732199 | # -*- coding: utf-8 -*- Simple Recurrent Unit, very fast. https://openreview.net/pdf?id=rJBiunlAW Simple Recurrent Unit, very fast. https://openreview.net/pdf?id=rJBiunlAW Simple recurrent unit (SRU). | 2.678876 | 3 |
reframed/cobra/variability.py | xuanyuanXIV/reframed | 30 | 6623770 | <reponame>xuanyuanXIV/reframed
from ..solvers import solver_instance
from ..solvers.solution import Status
from .simulation import FBA
from .thermodynamics import llFBA
from warnings import warn
from math import inf
def FVA(model, obj_frac=0, reactions=None, constraints=None, loopless=False, internal=None, solver=None):
""" Run Flux Variability Analysis (FVA).
Arguments:
model (CBModel): a constraint-based model
obj_frac (float): minimum fraction of the maximum growth rate (default 0.0, max: 1.0)
reactions (list): list of reactions to analyze (default: all)
constraints (dict): additional constraints (optional)
loopless (bool): run looplessFBA internally (very slow) (default: false)
internal (list): list of internal reactions for looplessFBA (optional)
solver (Solver): pre-instantiated solver instance (optional)
Returns:
dict: flux variation ranges
"""
_constraints = {}
if constraints:
_constraints.update(constraints)
if not solver:
solver = solver_instance(model)
if obj_frac > 0:
target = model.biomass_reaction
solution = FBA(model, objective=target, constraints=constraints, solver=solver)
_constraints[target] = (obj_frac * solution.fobj, inf)
if not reactions:
reactions = model.reactions.keys()
variability = {r_id: [None, None] for r_id in reactions}
for r_id in reactions:
if loopless:
solution = llFBA(model, r_id, True, constraints=_constraints, internal=internal,
solver=solver, get_values=False)
else:
solution = FBA(model, r_id, True, constraints=_constraints, solver=solver, get_values=False)
if solution.status == Status.OPTIMAL:
variability[r_id][0] = solution.fobj
elif solution.status == Status.UNBOUNDED:
variability[r_id][0] = -inf
elif solution.status == Status.INF_OR_UNB:
variability[r_id][0] = -inf
elif solution.status == Status.INFEASIBLE:
warn('Infeasible solution status')
else:
warn('Unknown solution status')
for r_id in reactions:
if loopless:
solution = llFBA(model, r_id, False, constraints=_constraints, internal=internal,
solver=solver, get_values=False)
else:
solution = FBA(model, r_id, False, constraints=_constraints, solver=solver, get_values=False)
if solution.status == Status.OPTIMAL:
variability[r_id][1] = solution.fobj
elif solution.status == Status.UNBOUNDED:
variability[r_id][1] = inf
elif solution.status == Status.INF_OR_UNB:
variability[r_id][1] = inf
elif solution.status == Status.INFEASIBLE:
warn('Infeasible solution status')
else:
warn('Unknown solution status')
return variability
def blocked_reactions(model, constraints=None, reactions=None, abstol=1e-9):
""" Find all blocked reactions in a model
Arguments:
model (CBModel): a constraint-based model
constraints (dict): additional constraints (optional)
reactions (list): List of reactions which will be tested (default: None, test all reactions)
abstol (float): absolute tolerance (default: 1e-9)
Returns:
list: blocked reactions
"""
variability = FVA(model, reactions=reactions, constraints=constraints)
return [r_id for r_id, (lb, ub) in variability.items() if (abs(lb) + abs(ub)) < abstol]
| from ..solvers import solver_instance
from ..solvers.solution import Status
from .simulation import FBA
from .thermodynamics import llFBA
from warnings import warn
from math import inf
def FVA(model, obj_frac=0, reactions=None, constraints=None, loopless=False, internal=None, solver=None):
""" Run Flux Variability Analysis (FVA).
Arguments:
model (CBModel): a constraint-based model
obj_frac (float): minimum fraction of the maximum growth rate (default 0.0, max: 1.0)
reactions (list): list of reactions to analyze (default: all)
constraints (dict): additional constraints (optional)
loopless (bool): run looplessFBA internally (very slow) (default: false)
internal (list): list of internal reactions for looplessFBA (optional)
solver (Solver): pre-instantiated solver instance (optional)
Returns:
dict: flux variation ranges
"""
_constraints = {}
if constraints:
_constraints.update(constraints)
if not solver:
solver = solver_instance(model)
if obj_frac > 0:
target = model.biomass_reaction
solution = FBA(model, objective=target, constraints=constraints, solver=solver)
_constraints[target] = (obj_frac * solution.fobj, inf)
if not reactions:
reactions = model.reactions.keys()
variability = {r_id: [None, None] for r_id in reactions}
for r_id in reactions:
if loopless:
solution = llFBA(model, r_id, True, constraints=_constraints, internal=internal,
solver=solver, get_values=False)
else:
solution = FBA(model, r_id, True, constraints=_constraints, solver=solver, get_values=False)
if solution.status == Status.OPTIMAL:
variability[r_id][0] = solution.fobj
elif solution.status == Status.UNBOUNDED:
variability[r_id][0] = -inf
elif solution.status == Status.INF_OR_UNB:
variability[r_id][0] = -inf
elif solution.status == Status.INFEASIBLE:
warn('Infeasible solution status')
else:
warn('Unknown solution status')
for r_id in reactions:
if loopless:
solution = llFBA(model, r_id, False, constraints=_constraints, internal=internal,
solver=solver, get_values=False)
else:
solution = FBA(model, r_id, False, constraints=_constraints, solver=solver, get_values=False)
if solution.status == Status.OPTIMAL:
variability[r_id][1] = solution.fobj
elif solution.status == Status.UNBOUNDED:
variability[r_id][1] = inf
elif solution.status == Status.INF_OR_UNB:
variability[r_id][1] = inf
elif solution.status == Status.INFEASIBLE:
warn('Infeasible solution status')
else:
warn('Unknown solution status')
return variability
def blocked_reactions(model, constraints=None, reactions=None, abstol=1e-9):
""" Find all blocked reactions in a model
Arguments:
model (CBModel): a constraint-based model
constraints (dict): additional constraints (optional)
reactions (list): List of reactions which will be tested (default: None, test all reactions)
abstol (float): absolute tolerance (default: 1e-9)
Returns:
list: blocked reactions
"""
variability = FVA(model, reactions=reactions, constraints=constraints)
return [r_id for r_id, (lb, ub) in variability.items() if (abs(lb) + abs(ub)) < abstol] | en | 0.680313 | Run Flux Variability Analysis (FVA). Arguments: model (CBModel): a constraint-based model obj_frac (float): minimum fraction of the maximum growth rate (default 0.0, max: 1.0) reactions (list): list of reactions to analyze (default: all) constraints (dict): additional constraints (optional) loopless (bool): run looplessFBA internally (very slow) (default: false) internal (list): list of internal reactions for looplessFBA (optional) solver (Solver): pre-instantiated solver instance (optional) Returns: dict: flux variation ranges Find all blocked reactions in a model Arguments: model (CBModel): a constraint-based model constraints (dict): additional constraints (optional) reactions (list): List of reactions which will be tested (default: None, test all reactions) abstol (float): absolute tolerance (default: 1e-9) Returns: list: blocked reactions | 2.234643 | 2 |
map/models.py | matthewoconnor/mapplot-cdp | 0 | 6623771 | <gh_stars>0
import re
import requests
import matplotlib.path as matplotlib_path
import numpy as np
from pyquery import PyQuery as pq
from sodapy import Socrata
from django.db import models
from django.utils import timezone
from django.template.loader import render_to_string
from django.core.files.base import ContentFile
from django.conf import settings
from .utils import kml_hex_color_from_value_range, kml_height_from_value_range
AREA_TYPES = (
("UNCATEGORIZED", "Uncategorized"),
("BLOCK", "Block"),
("NEIGHBORHOOD", "Neighborhood"),
("WARD", "Ward"),
("DISTRICT", "District"),
("STATE", "State"),
("COUNTRY", "Country"),
("REGION", "Region"),
("COUNTY", "County"),
)
BOUNDARY_TYPES = (
("OUTER", "Outer Boundary"),
("INNER", "Inner Boundary")
)
WEIGHT_TYPES = (
("COUNT", "Count Instances"),
("SUM", "Sum Field value")
)
CATEGORIZE_TYPES = (
("POINT", "Location Point"),
("LATLNG", "Latitude Longitude"),
("JOIN", "Join on Common Field"),
("JOIN_MAP", "Join on Field Mapping")
)
DATASET_TYPES = (
("SOCRATA", "Socrata Soda Data Portal"),
("OTHER", "Url for Other Data Source")
)
class Area(models.Model):
"""
A single enclosed area
"""
name = models.CharField(max_length=256)
external_identifier = models.CharField(max_length=256)
area_type = models.CharField(max_length=64, choices=AREA_TYPES)
boundary_type = models.CharField(max_length=64, choices=BOUNDARY_TYPES)
polygon = models.TextField()
mbr = models.CharField(max_length=256) #n,e,s,w SHOUlD SEPARATE INTO INDIVIDUAL FIELDS TO HELP QUERY ON LARGER
is_primary = models.BooleanField(default=True)
outer_area = models.ForeignKey("Area", related_name="inner_areas", related_query_name="inner_area", null=True, blank=True)
primary_area = models.ForeignKey("Area", related_name="child_areas", related_query_name="child_area", null=True, blank=True)
created_time = models.DateTimeField()
def __str__(self):
return self.name
def contains_point(self, lng, lat, polygon_list=None):
""" tests if a point is within this area
test for minumum bounding rectangle
before trying more expensive contains_point method """
n, e, s, w = self.mbr.split(",")
if lng < float(e) and lng > float(w) and lat < float(n) and lat > float(s):
polygon_list = polygon_list or self.get_polygon_list()
path = matplotlib_path.Path(np.array(polygon_list))
return path.contains_point((lng, lat))
else:
return False
def group_contains_point(self, lng, lat, grouped_polygon_list=None):
""" tests if a point is within this area
test for minumum bounding rectangle
before trying more expensive contains_point method """
grouped_polygon_list = grouped_polygon_list or self.get_grouped_polygon_list()
for polygon in grouped_polygon_list:
if polygon["area"].contains_point(lng, lat, polygon_list=polygon["outer"]):
is_within_inner_polygon = False # assume contains point until we find point within inner polygon
for inner_area in polygon["inner"]:
if inner_area["area"].contains_point(lng, lat, polygon_list=inner_area["polygon"]):
is_within_inner_polygon = True
break
if not is_within_inner_polygon:
return True
return False
def get_polygon_list(self):
return [point.split(",")[:2] for point in self.polygon.split(";")]
def get_grouped_polygon_list(self):
""" meant to be called on the primary area"""
return [{
"area":self,
"outer":self.get_polygon_list(),
"inner":[dict(area=ia, polygon=ia.get_polygon_list()) for ia in self.inner_areas.all()]
}] + [{
"area":ca,
"outer":ca.get_polygon_list(),
"inner":[dict(area=ia, polygon=ia.get_polygon_list()) for ia in ca.inner_areas.all()]
} for ca in self.child_areas.all()]
def get_geometry(self):
"""Almost identical to get_grouped_polygon_list, but without area instances"""
return [{
"outer":self.get_polygon_list(),
"inner":[ia.get_polygon_list() for ia in self.inner_areas.all()]
}] + [{
"outer":ca.get_polygon_list(),
"inner":[ia.get_polygon_list() for ia in ca.inner_areas.all()]
} for ca in self.child_areas.all()]
def mbr_from_polygon(self):
points = self.polygon.split(";")
lngs = []
lats = []
for point in points:
coords = point.split(",")
lngs.append(float(coords[0]))
lats.append(float(coords[1]))
return "{n},{e},{s},{w}".format(n=max(lats), e=max(lngs), s=min(lats), w=min(lngs))
def save(self, *args, **kwargs):
self.created_time = self.created_time or timezone.now()
return super().save(*args, **kwargs)
class AreaMap(models.Model):
"""
A collection of areas (e.g. Chicago Neighborhoods)
"""
name = models.CharField(max_length=256)
description = models.CharField(max_length=256, blank=True)
areas = models.ManyToManyField("Area", null=True, blank=True)
data_source = models.CharField(max_length=256, null=True, blank=True) # e.g. "data.cityofchicago.org"
dataset_identifier = models.CharField(max_length=256, null=True, blank=True)
kml_file = models.FileField(upload_to="uploads/areamap/", null=True, blank=True)
area_name_path = models.CharField(max_length=256, null=True, blank=True)
area_external_identifier_path = models.CharField(max_length=256, null=True, blank=True)
area_default_type = models.CharField(max_length=64, null=True, blank=True)
created_time = models.DateTimeField()
def import_areas_from_kml_file(self, *args, **kwargs):
on_iteration = kwargs.get("on_iteration", None)
d = pq(filename=self.kml_file.path, parser="xml").remove_namespaces()
placemarks = d("Placemark")
total = len(placemarks)
i = 0
# If callable function is passed to keep track of progress, call it
if on_iteration:
on_iteration(i, total)
for placemark in placemarks.items():
# If callable function is passed to keep track of progress, call it
i += 1
if on_iteration:
on_iteration(i, total)
polygons = placemark.find("Polygon")
primary_area = None
for polygon in polygons.items():
outer_boundary_text = polygon.find("outerBoundaryIs LinearRing coordinates").text()
inner_boundaries = polygon.find("innerBoundaryIs")
area = Area(
polygon=re.sub(r"\s+", ";", outer_boundary_text.strip()),
name=placemark.find(self.area_name_path).text(), # e.g. "Data[name='ntaname'] value"
external_identifier=placemark.find(self.area_external_identifier_path).text(), # e.g. "Data[name='ntacode'] value"
area_type=self.area_default_type,
boundary_type="OUTER"
)
area.mbr = area.mbr_from_polygon()
# only one outer area (the primary area) is related to the area map, all others are children
if primary_area:
area.primary_area = primary_area
area.is_primary = False
area.save()
else:
primary_area = area
area.save()
self.areas.add(area)
for inner_boundary in inner_boundaries.items():
inner_boundary_text = inner_boundary.find("LinearRing coordinates").text()
inner_area = Area(
polygon=re.sub(r"\s+", ";", inner_boundary_text.strip()),
name="{0} Inner".format(area.name),
external_identifier=area.external_identifier,
area_type=self.area_default_type,
boundary_type="INNER",
outer_area=area
)
inner_area.mbr = inner_area.mbr_from_polygon()
inner_area.save()
@classmethod
def import_from_geojson(cls, file, *args, **kwargs):
"""write code to import from geojson file"""
# feature_path = kwargs.get("feature_path",".")
pass
def import_areas_from_soda(self, field_mapping, defaults):
# e.g. this is for chicago neighborhoods
# field_mapping = dict(
# polygon="the_geom",
# name="community",
# external_identifier="area_num_1"
# )
# defaults = dict(
# area_type="NEIGHBORHOOD",
# )
# client = Socrata(self.data_source, "FakeAppToken", username="<EMAIL>", password="<PASSWORD>")
client = Socrata(self.data_source, None)
data = client.get(self.dataset_identifier, content_type="json")
for area in data:
coordinates = area[field_mapping["polygon"]]["coordinates"][0][0]
lngs = []
lats = []
polygon = []
for c in coordinates:
lngs.append(c[0])
lats.append(c[1])
polygon.append( ",".join([str(i) for i in c]) )
mbr = "{n},{e},{s},{w}".format(n=max(lats), e=max(lngs), s=min(lats), w=min(lngs))
area_data = dict(
polygon= ";".join(polygon),
name=area[field_mapping["name"]],
external_identifier=area[field_mapping["external_identifier"]],
mbr=mbr,
**defaults
)
a = Area.objects.create(**area_data)
self.areas.add(a)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.created_time = self.created_time or timezone.now()
return super().save(*args, **kwargs)
class AreaBin(models.Model):
data_map = models.ForeignKey("DataMap")
area = models.ForeignKey("Area")
value = models.FloatField(default=0.0) # value of the bin
count = models.IntegerField(default=0) # number of rows used for bin
def get_geometry(self):
return {
"id": self.id,
"name": self.area.name,
"geometry": self.area.get_geometry(),
"value": self.value,
"count": self.count
}
class DataMap(models.Model):
"""
A generated KML file for a data map
"""
name = models.CharField(max_length=256)
description = models.CharField(max_length=256, blank=True)
user = models.ForeignKey("auth.User")
area_map = models.ForeignKey("AreaMap", null=True, blank=True)
dataset_type = models.CharField(max_length=256, choices=DATASET_TYPES, blank=True)
# for socrata datasets
data_source = models.CharField(max_length=256, null=True, blank=True) # e.g. "data.cityofchicago.org"
dataset_identifier = models.CharField(max_length=256, null=True, blank=True)
# other datasets
dataset_url = models.URLField(max_length=256, blank=True)
weight_type = models.CharField(max_length=64, choices=WEIGHT_TYPES)
categorize_type = models.CharField(choices=CATEGORIZE_TYPES, max_length=64)
point_key = models.CharField(max_length=256, blank=True)
latitude_key = models.CharField(max_length=256, blank=True)
longitude_key = models.CharField(max_length=256, blank=True)
join_key = models.CharField(max_length=256, blank=True)
join_map_file = models.FileField(upload_to="uploads/joinmap/", null=True, blank=True) # json file for complex join mapping
value_key = models.CharField(max_length=256, blank=True)
querystring = models.CharField(max_length=256, blank=True)
kml_file = models.FileField(upload_to="uploads/datamap/", null=True, blank=True)
task_id = models.CharField(max_length=256, blank=True) # For tracking progress
created_time = models.DateTimeField()
updated_time = models.DateTimeField()
# KEEP
def get_file_url(self):
try:
return self.kml_file.url
except:
return None
# KEEP
def get_socrata_client(self, *args, **kwargs):
socrata_credentials = settings.DATA_PORTAL_KEYS.get("socrata", None)
session_adapter = dict(
prefix="http://",
adapter=requests.adapters.HTTPAdapter(max_retries=3))
if socrata_credentials:
return Socrata(
self.data_source,
socrata_credentials["app_token"],
username=socrata_credentials["username"],
password=s<PASSWORD>ata_credentials["password"],
session_adapter=session_adapter)
else:
return Socrata(
self.data_source,
None,
session_adapter=session_adapter)
def get_dataset_count(self, *args, **kwargs):
# to do: include filters
client = self.get_socrata_client()
dataset_count = client.get(self.dataset_identifier, exclude_system_fields=False, select="count(:id)")[0].get("count_id")
return dataset_count
def get_metadata(self):
client = self.get_socrata_client()
return client.get_metadata(self.dataset_identifier)
# NEW
def areabin_dict_from_socrata_dataset(self, *args, **kwargs):
limit = kwargs.get("limit", 1000)
offset = kwargs.get("offset", 0)
iterations = kwargs.get("iterations", 1)
on_iteration = kwargs.get("on_iteration", None)
client = self.get_socrata_client()
areas = self.area_map.areas.filter(
is_primary=True
).prefetch_related("inner_areas", "child_areas__inner_areas")
area_bins = [dict(
area=area,
polygons=area.get_grouped_polygon_list(),
count=0,
) for area in areas]
i = 0
# If callable function is passed to keep track of progress, call it
if on_iteration:
on_iteration(i, iterations)
while i < iterations:
i += 1
# If callable function is passed to keep track of progress, call it
if on_iteration:
on_iteration(i, iterations)
data = client.get(
self.dataset_identifier,
content_type="json",
limit=limit,
offset=offset) # ADD WHERE CLAUSE FROM QUEYSTRING
if not data:
break
if self.categorize_type == "POINT":
for row in data:
try:
point = row[self.point_key]
coords = point.get("coordinates")
lng = float(coords[0])
lat = float(coords[1])
for ab in area_bins:
if ab["area"].group_contains_point(lng, lat, grouped_polygon_list=ab["polygons"]):
ab["count"] += 1
break
except:
pass
elif self.categorize_type == "LATLNG":
for row in data:
try:
lng = float(row[self.latitude_key])
lat = float(row[self.longitude_key])
for ab in area_bins:
if ab["area"].group_contains_point(lng, lat, grouped_polygon_list=ab["polygons"]):
ab["count"] += 1
break
except:
pass
offset += limit
return area_bins
# KEEP
def save_kmlfile_from_areabins(self):
areabins = self.areabins.all()
counts = [ab.count for ab in areabins]
min_count = min(counts)
max_count = max(counts)
for ab in areabins:
ab["height"] = kml_height_from_value_range(ab.count, min_count, max_count)
ab["color"] = kml_hex_color_from_value_range(ab.count, min_count, max_count)
kml_string = render_to_string("map/map_template.kml", dict(
kml_map=self,
areabins=areabins
))
self.kml_file.save("{0} {1}.kml".format(self.name, self.id), ContentFile(kml_string))
return self.kml_file.path
# NEW
def save_areabins_from_dicts(self, areabin_dicts):
for ab_dict in areabin_dicts:
AreaBin.objects.update_or_create(
data_map=self,
area=ab_dict["area"],
defaults={
"count": ab_dict.get("count", 0),
"value": ab_dict.get("value", 0.0)
});
def kml_mapplot_from_soda_dataset(self, *args, **kwargs):
area_bins = self.area_bins_from_soda_dataset(*args, **kwargs)
return self.save_kmlfile_from_area_bins(area_bins)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
now = timezone.now()
self.created_time = self.created_time or now
self.updated_time = now
self.user_id = 1 # REMOVE WHEN ABILITY FOR MORE USERS
return super().save(*args, **kwargs)
| import re
import requests
import matplotlib.path as matplotlib_path
import numpy as np
from pyquery import PyQuery as pq
from sodapy import Socrata
from django.db import models
from django.utils import timezone
from django.template.loader import render_to_string
from django.core.files.base import ContentFile
from django.conf import settings
from .utils import kml_hex_color_from_value_range, kml_height_from_value_range
AREA_TYPES = (
("UNCATEGORIZED", "Uncategorized"),
("BLOCK", "Block"),
("NEIGHBORHOOD", "Neighborhood"),
("WARD", "Ward"),
("DISTRICT", "District"),
("STATE", "State"),
("COUNTRY", "Country"),
("REGION", "Region"),
("COUNTY", "County"),
)
BOUNDARY_TYPES = (
("OUTER", "Outer Boundary"),
("INNER", "Inner Boundary")
)
WEIGHT_TYPES = (
("COUNT", "Count Instances"),
("SUM", "Sum Field value")
)
CATEGORIZE_TYPES = (
("POINT", "Location Point"),
("LATLNG", "Latitude Longitude"),
("JOIN", "Join on Common Field"),
("JOIN_MAP", "Join on Field Mapping")
)
DATASET_TYPES = (
("SOCRATA", "Socrata Soda Data Portal"),
("OTHER", "Url for Other Data Source")
)
class Area(models.Model):
"""
A single enclosed area
"""
name = models.CharField(max_length=256)
external_identifier = models.CharField(max_length=256)
area_type = models.CharField(max_length=64, choices=AREA_TYPES)
boundary_type = models.CharField(max_length=64, choices=BOUNDARY_TYPES)
polygon = models.TextField()
mbr = models.CharField(max_length=256) #n,e,s,w SHOUlD SEPARATE INTO INDIVIDUAL FIELDS TO HELP QUERY ON LARGER
is_primary = models.BooleanField(default=True)
outer_area = models.ForeignKey("Area", related_name="inner_areas", related_query_name="inner_area", null=True, blank=True)
primary_area = models.ForeignKey("Area", related_name="child_areas", related_query_name="child_area", null=True, blank=True)
created_time = models.DateTimeField()
def __str__(self):
return self.name
def contains_point(self, lng, lat, polygon_list=None):
""" tests if a point is within this area
test for minumum bounding rectangle
before trying more expensive contains_point method """
n, e, s, w = self.mbr.split(",")
if lng < float(e) and lng > float(w) and lat < float(n) and lat > float(s):
polygon_list = polygon_list or self.get_polygon_list()
path = matplotlib_path.Path(np.array(polygon_list))
return path.contains_point((lng, lat))
else:
return False
def group_contains_point(self, lng, lat, grouped_polygon_list=None):
""" tests if a point is within this area
test for minumum bounding rectangle
before trying more expensive contains_point method """
grouped_polygon_list = grouped_polygon_list or self.get_grouped_polygon_list()
for polygon in grouped_polygon_list:
if polygon["area"].contains_point(lng, lat, polygon_list=polygon["outer"]):
is_within_inner_polygon = False # assume contains point until we find point within inner polygon
for inner_area in polygon["inner"]:
if inner_area["area"].contains_point(lng, lat, polygon_list=inner_area["polygon"]):
is_within_inner_polygon = True
break
if not is_within_inner_polygon:
return True
return False
def get_polygon_list(self):
return [point.split(",")[:2] for point in self.polygon.split(";")]
def get_grouped_polygon_list(self):
""" meant to be called on the primary area"""
return [{
"area":self,
"outer":self.get_polygon_list(),
"inner":[dict(area=ia, polygon=ia.get_polygon_list()) for ia in self.inner_areas.all()]
}] + [{
"area":ca,
"outer":ca.get_polygon_list(),
"inner":[dict(area=ia, polygon=ia.get_polygon_list()) for ia in ca.inner_areas.all()]
} for ca in self.child_areas.all()]
def get_geometry(self):
"""Almost identical to get_grouped_polygon_list, but without area instances"""
return [{
"outer":self.get_polygon_list(),
"inner":[ia.get_polygon_list() for ia in self.inner_areas.all()]
}] + [{
"outer":ca.get_polygon_list(),
"inner":[ia.get_polygon_list() for ia in ca.inner_areas.all()]
} for ca in self.child_areas.all()]
def mbr_from_polygon(self):
points = self.polygon.split(";")
lngs = []
lats = []
for point in points:
coords = point.split(",")
lngs.append(float(coords[0]))
lats.append(float(coords[1]))
return "{n},{e},{s},{w}".format(n=max(lats), e=max(lngs), s=min(lats), w=min(lngs))
def save(self, *args, **kwargs):
self.created_time = self.created_time or timezone.now()
return super().save(*args, **kwargs)
class AreaMap(models.Model):
"""
A collection of areas (e.g. Chicago Neighborhoods)
"""
name = models.CharField(max_length=256)
description = models.CharField(max_length=256, blank=True)
areas = models.ManyToManyField("Area", null=True, blank=True)
data_source = models.CharField(max_length=256, null=True, blank=True) # e.g. "data.cityofchicago.org"
dataset_identifier = models.CharField(max_length=256, null=True, blank=True)
kml_file = models.FileField(upload_to="uploads/areamap/", null=True, blank=True)
area_name_path = models.CharField(max_length=256, null=True, blank=True)
area_external_identifier_path = models.CharField(max_length=256, null=True, blank=True)
area_default_type = models.CharField(max_length=64, null=True, blank=True)
created_time = models.DateTimeField()
def import_areas_from_kml_file(self, *args, **kwargs):
on_iteration = kwargs.get("on_iteration", None)
d = pq(filename=self.kml_file.path, parser="xml").remove_namespaces()
placemarks = d("Placemark")
total = len(placemarks)
i = 0
# If callable function is passed to keep track of progress, call it
if on_iteration:
on_iteration(i, total)
for placemark in placemarks.items():
# If callable function is passed to keep track of progress, call it
i += 1
if on_iteration:
on_iteration(i, total)
polygons = placemark.find("Polygon")
primary_area = None
for polygon in polygons.items():
outer_boundary_text = polygon.find("outerBoundaryIs LinearRing coordinates").text()
inner_boundaries = polygon.find("innerBoundaryIs")
area = Area(
polygon=re.sub(r"\s+", ";", outer_boundary_text.strip()),
name=placemark.find(self.area_name_path).text(), # e.g. "Data[name='ntaname'] value"
external_identifier=placemark.find(self.area_external_identifier_path).text(), # e.g. "Data[name='ntacode'] value"
area_type=self.area_default_type,
boundary_type="OUTER"
)
area.mbr = area.mbr_from_polygon()
# only one outer area (the primary area) is related to the area map, all others are children
if primary_area:
area.primary_area = primary_area
area.is_primary = False
area.save()
else:
primary_area = area
area.save()
self.areas.add(area)
for inner_boundary in inner_boundaries.items():
inner_boundary_text = inner_boundary.find("LinearRing coordinates").text()
inner_area = Area(
polygon=re.sub(r"\s+", ";", inner_boundary_text.strip()),
name="{0} Inner".format(area.name),
external_identifier=area.external_identifier,
area_type=self.area_default_type,
boundary_type="INNER",
outer_area=area
)
inner_area.mbr = inner_area.mbr_from_polygon()
inner_area.save()
@classmethod
def import_from_geojson(cls, file, *args, **kwargs):
"""write code to import from geojson file"""
# feature_path = kwargs.get("feature_path",".")
pass
def import_areas_from_soda(self, field_mapping, defaults):
# e.g. this is for chicago neighborhoods
# field_mapping = dict(
# polygon="the_geom",
# name="community",
# external_identifier="area_num_1"
# )
# defaults = dict(
# area_type="NEIGHBORHOOD",
# )
# client = Socrata(self.data_source, "FakeAppToken", username="<EMAIL>", password="<PASSWORD>")
client = Socrata(self.data_source, None)
data = client.get(self.dataset_identifier, content_type="json")
for area in data:
coordinates = area[field_mapping["polygon"]]["coordinates"][0][0]
lngs = []
lats = []
polygon = []
for c in coordinates:
lngs.append(c[0])
lats.append(c[1])
polygon.append( ",".join([str(i) for i in c]) )
mbr = "{n},{e},{s},{w}".format(n=max(lats), e=max(lngs), s=min(lats), w=min(lngs))
area_data = dict(
polygon= ";".join(polygon),
name=area[field_mapping["name"]],
external_identifier=area[field_mapping["external_identifier"]],
mbr=mbr,
**defaults
)
a = Area.objects.create(**area_data)
self.areas.add(a)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
self.created_time = self.created_time or timezone.now()
return super().save(*args, **kwargs)
class AreaBin(models.Model):
data_map = models.ForeignKey("DataMap")
area = models.ForeignKey("Area")
value = models.FloatField(default=0.0) # value of the bin
count = models.IntegerField(default=0) # number of rows used for bin
def get_geometry(self):
return {
"id": self.id,
"name": self.area.name,
"geometry": self.area.get_geometry(),
"value": self.value,
"count": self.count
}
class DataMap(models.Model):
"""
A generated KML file for a data map
"""
name = models.CharField(max_length=256)
description = models.CharField(max_length=256, blank=True)
user = models.ForeignKey("auth.User")
area_map = models.ForeignKey("AreaMap", null=True, blank=True)
dataset_type = models.CharField(max_length=256, choices=DATASET_TYPES, blank=True)
# for socrata datasets
data_source = models.CharField(max_length=256, null=True, blank=True) # e.g. "data.cityofchicago.org"
dataset_identifier = models.CharField(max_length=256, null=True, blank=True)
# other datasets
dataset_url = models.URLField(max_length=256, blank=True)
weight_type = models.CharField(max_length=64, choices=WEIGHT_TYPES)
categorize_type = models.CharField(choices=CATEGORIZE_TYPES, max_length=64)
point_key = models.CharField(max_length=256, blank=True)
latitude_key = models.CharField(max_length=256, blank=True)
longitude_key = models.CharField(max_length=256, blank=True)
join_key = models.CharField(max_length=256, blank=True)
join_map_file = models.FileField(upload_to="uploads/joinmap/", null=True, blank=True) # json file for complex join mapping
value_key = models.CharField(max_length=256, blank=True)
querystring = models.CharField(max_length=256, blank=True)
kml_file = models.FileField(upload_to="uploads/datamap/", null=True, blank=True)
task_id = models.CharField(max_length=256, blank=True) # For tracking progress
created_time = models.DateTimeField()
updated_time = models.DateTimeField()
# KEEP
def get_file_url(self):
try:
return self.kml_file.url
except:
return None
# KEEP
def get_socrata_client(self, *args, **kwargs):
socrata_credentials = settings.DATA_PORTAL_KEYS.get("socrata", None)
session_adapter = dict(
prefix="http://",
adapter=requests.adapters.HTTPAdapter(max_retries=3))
if socrata_credentials:
return Socrata(
self.data_source,
socrata_credentials["app_token"],
username=socrata_credentials["username"],
password=s<PASSWORD>ata_credentials["password"],
session_adapter=session_adapter)
else:
return Socrata(
self.data_source,
None,
session_adapter=session_adapter)
def get_dataset_count(self, *args, **kwargs):
# to do: include filters
client = self.get_socrata_client()
dataset_count = client.get(self.dataset_identifier, exclude_system_fields=False, select="count(:id)")[0].get("count_id")
return dataset_count
def get_metadata(self):
client = self.get_socrata_client()
return client.get_metadata(self.dataset_identifier)
# NEW
def areabin_dict_from_socrata_dataset(self, *args, **kwargs):
limit = kwargs.get("limit", 1000)
offset = kwargs.get("offset", 0)
iterations = kwargs.get("iterations", 1)
on_iteration = kwargs.get("on_iteration", None)
client = self.get_socrata_client()
areas = self.area_map.areas.filter(
is_primary=True
).prefetch_related("inner_areas", "child_areas__inner_areas")
area_bins = [dict(
area=area,
polygons=area.get_grouped_polygon_list(),
count=0,
) for area in areas]
i = 0
# If callable function is passed to keep track of progress, call it
if on_iteration:
on_iteration(i, iterations)
while i < iterations:
i += 1
# If callable function is passed to keep track of progress, call it
if on_iteration:
on_iteration(i, iterations)
data = client.get(
self.dataset_identifier,
content_type="json",
limit=limit,
offset=offset) # ADD WHERE CLAUSE FROM QUEYSTRING
if not data:
break
if self.categorize_type == "POINT":
for row in data:
try:
point = row[self.point_key]
coords = point.get("coordinates")
lng = float(coords[0])
lat = float(coords[1])
for ab in area_bins:
if ab["area"].group_contains_point(lng, lat, grouped_polygon_list=ab["polygons"]):
ab["count"] += 1
break
except:
pass
elif self.categorize_type == "LATLNG":
for row in data:
try:
lng = float(row[self.latitude_key])
lat = float(row[self.longitude_key])
for ab in area_bins:
if ab["area"].group_contains_point(lng, lat, grouped_polygon_list=ab["polygons"]):
ab["count"] += 1
break
except:
pass
offset += limit
return area_bins
# KEEP
def save_kmlfile_from_areabins(self):
areabins = self.areabins.all()
counts = [ab.count for ab in areabins]
min_count = min(counts)
max_count = max(counts)
for ab in areabins:
ab["height"] = kml_height_from_value_range(ab.count, min_count, max_count)
ab["color"] = kml_hex_color_from_value_range(ab.count, min_count, max_count)
kml_string = render_to_string("map/map_template.kml", dict(
kml_map=self,
areabins=areabins
))
self.kml_file.save("{0} {1}.kml".format(self.name, self.id), ContentFile(kml_string))
return self.kml_file.path
# NEW
def save_areabins_from_dicts(self, areabin_dicts):
for ab_dict in areabin_dicts:
AreaBin.objects.update_or_create(
data_map=self,
area=ab_dict["area"],
defaults={
"count": ab_dict.get("count", 0),
"value": ab_dict.get("value", 0.0)
});
def kml_mapplot_from_soda_dataset(self, *args, **kwargs):
area_bins = self.area_bins_from_soda_dataset(*args, **kwargs)
return self.save_kmlfile_from_area_bins(area_bins)
def __str__(self):
return self.name
def save(self, *args, **kwargs):
now = timezone.now()
self.created_time = self.created_time or now
self.updated_time = now
self.user_id = 1 # REMOVE WHEN ABILITY FOR MORE USERS
return super().save(*args, **kwargs) | en | 0.761348 | A single enclosed area #n,e,s,w SHOUlD SEPARATE INTO INDIVIDUAL FIELDS TO HELP QUERY ON LARGER tests if a point is within this area test for minumum bounding rectangle before trying more expensive contains_point method tests if a point is within this area test for minumum bounding rectangle before trying more expensive contains_point method # assume contains point until we find point within inner polygon meant to be called on the primary area Almost identical to get_grouped_polygon_list, but without area instances A collection of areas (e.g. Chicago Neighborhoods) # e.g. "data.cityofchicago.org" # If callable function is passed to keep track of progress, call it # If callable function is passed to keep track of progress, call it # e.g. "Data[name='ntaname'] value" # e.g. "Data[name='ntacode'] value" # only one outer area (the primary area) is related to the area map, all others are children write code to import from geojson file # feature_path = kwargs.get("feature_path",".") # e.g. this is for chicago neighborhoods # field_mapping = dict( # polygon="the_geom", # name="community", # external_identifier="area_num_1" # ) # defaults = dict( # area_type="NEIGHBORHOOD", # ) # client = Socrata(self.data_source, "FakeAppToken", username="<EMAIL>", password="<PASSWORD>") # value of the bin # number of rows used for bin A generated KML file for a data map # for socrata datasets # e.g. "data.cityofchicago.org" # other datasets # json file for complex join mapping # For tracking progress # KEEP # KEEP # to do: include filters # NEW # If callable function is passed to keep track of progress, call it # If callable function is passed to keep track of progress, call it # ADD WHERE CLAUSE FROM QUEYSTRING # KEEP # NEW # REMOVE WHEN ABILITY FOR MORE USERS | 2.015265 | 2 |
Python/RoadLineDetector/RoadLineDetector.py | thefool76/hacktoberfest2021 | 448 | 6623772 | import cv2
import numpy as np
from matplotlib import pyplot as plt
def roi(image,vertices):
mask=np.zeros_like(image)
cv2.fillPoly(mask,vertices,255)
masked_image=cv2.bitwise_and(image,mask)
return masked_image
def image_with_lines(image,lines):
image=np.copy(image)
blank_image=np.zeros((image.shape[0],image.shape[1],3),np.uint8)
for line in lines:
for x1,y1,x,y in line:
cv2.line(blank_image,(x1,y1),(x,y),(0,255,0),4)
image =cv2.addWeighted(image,0.8,blank_image,1,0.0)
return image
img=cv2.imread("roads.jpg")
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
print(img.shape,img.dtype)
height=img.shape[0]
width=img.shape[1]
region_of_interest_vertices=[(0,height),(height/2,width/2),(width,height)]
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edge=cv2.Canny(gray,100,200)
cropped_image=roi(edge,np.array([region_of_interest_vertices],np.uint8))
line=cv2.HoughLinesP(cropped_image,1,np.pi/180,60,lines=np.array([]),minLineLength=40,maxLineGap=25)
final=image_with_lines(img,line)
plt.imshow(final)
plt.show()
| import cv2
import numpy as np
from matplotlib import pyplot as plt
def roi(image,vertices):
mask=np.zeros_like(image)
cv2.fillPoly(mask,vertices,255)
masked_image=cv2.bitwise_and(image,mask)
return masked_image
def image_with_lines(image,lines):
image=np.copy(image)
blank_image=np.zeros((image.shape[0],image.shape[1],3),np.uint8)
for line in lines:
for x1,y1,x,y in line:
cv2.line(blank_image,(x1,y1),(x,y),(0,255,0),4)
image =cv2.addWeighted(image,0.8,blank_image,1,0.0)
return image
img=cv2.imread("roads.jpg")
img=cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
print(img.shape,img.dtype)
height=img.shape[0]
width=img.shape[1]
region_of_interest_vertices=[(0,height),(height/2,width/2),(width,height)]
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
edge=cv2.Canny(gray,100,200)
cropped_image=roi(edge,np.array([region_of_interest_vertices],np.uint8))
line=cv2.HoughLinesP(cropped_image,1,np.pi/180,60,lines=np.array([]),minLineLength=40,maxLineGap=25)
final=image_with_lines(img,line)
plt.imshow(final)
plt.show()
| none | 1 | 3.003423 | 3 | |
simple_detection.py | hoerldavid/nis-automation | 0 | 6623773 | from skimage.morphology import remove_small_holes, binary_erosion
from skimage.measure import regionprops, label
from skimage.filters import threshold_local
from skimage.morphology import disk, binary_opening
from skimage.exposure import rescale_intensity
from scipy.ndimage.filters import gaussian_filter
from skimage.transform import pyramid_gaussian
from skimage.color import label2rgb
try:
import javabridge
import bioformats
except ImportError as e:
print('WARNING: Bioformats bridge not installed')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import logging
def bbox_pix2unit(bbox, start, pixsize, direction):
"""
old pixel->unit conversion for bounding boxes
NB: may no be corect
TODO: remove if it is no longer necessary
"""
logger = logging.getLogger(__name__)
res = (np.array(bbox, dtype=float).reshape((2,2)) * np.array(pixsize, dtype=float) *
np.array(direction, dtype=float) + np.array(start, dtype=float))
logger.debug('bbox: {}, toUnit: {}'.format(bbox, res.reshape((4,))))
return res.reshape((4,))
def aspect(bbox):
"""
get inverse aspect ratio a bounding box (smaller axis/larger axis)
Parameters
----------
bbox: 4-tuple
ymin, xmin, ymax, xmax
Returns
-------
aspect: scalar
inverse aspect ratio (in 0-1)
"""
(ymin, xmin, ymax, xmax) = bbox
exy = ymax - ymin
exx = xmax - xmin
return (exy / exx) if (exx > exy) else (exx / exy)
def detect_wings_simple(img, pixel_size=1,
ds=2, layers=2, thresh_window=1.8e3,
minarea=0.5e6, maxarea=2e6, minsolidity=.6,
minaspect=.3, plot=False, threshold_fun=None):
"""
simple wing detection via adaptive thresholding and some filtering by shape
default area 0.5-2 mm^2
Parameters
----------
img: np-array (2-dim)
the input image
pixel_size: scalar
pixel size in input image
ds: scalar
downsampling factor at each layer
layers: scalar
how may downsampling layers to calculate
thresh_window: integer
window for adaptive threshold, in original image pixels
minarea: scalar
minimum size of objects to detect, in units^2
maxarea: scalar
maximum size of objects to detect, in units^2
minsolidity: scalar
minimal solidity of detected objects \in (0,1)
minaspect: scalar
minimal inverse aspect ratio of detected objects \in (0,1)
plot: boolean
whether to plot detections or not
threshold_fun: function pointer, optional
thresholding function to use in windows
Returns
-------
bboxes: list of 4-tuples
bounding boxes (in original image pixel units)
"""
# scale min and max area to be in pixels^2
minarea = minarea / pixel_size**2 / ds**(layers*2)
maxarea = maxarea / pixel_size**2 / ds**(layers*2)
# scale thresh window size, make sure it is odd
thresh_window = int(thresh_window / pixel_size / ds**layers)
thresh_window += 0 if thresh_window%2 == 1 else 1
logger = logging.getLogger(__name__)
# some debug output:
logger.info('wing detection started')
logger.debug('input shape: {}'.format(img.shape))
logger.debug('ds: {}, layer:{}'.format(ds, layers))
logger.debug('minarea: {}, maxarea:{}'.format(minarea, maxarea))
logger.debug('threshold window: {}'.format(thresh_window))
# downsample
pyr = [p for p in pyramid_gaussian(img, max_layer= layers, downscale = ds)]
img_ds = pyr[layers]
logger.debug('img size after ds: {}'.format(img_ds.shape))
# rescale to (0-1)
img_ds = img_ds.astype(float)
img_ds = rescale_intensity(img_ds, out_range=(0.0, 1.0))
# smooth
img_ds = gaussian_filter(img_ds, 2.0)
# adaptive threshold
if threshold_fun is None:
thrd = img_ds > threshold_local(img_ds, thresh_window)
else:
thrd = img_ds > threshold_local(img_ds, thresh_window, method='generic', param=threshold_fun)
# clean a bit
thrd = np.bitwise_not(thrd)
thrd = binary_opening(thrd, selem=disk(4))
labelled = label(thrd)
# filter objs
ls = [r.label for r in regionprops(labelled) if r.area>minarea and
r.area<maxarea and r.solidity>minsolidity and aspect(r.bbox) > minaspect]
# filtered binary
res = np.zeros(thrd.shape)
l = label(thrd)
for li in ls:
res += (l == li)
# more cleaning, plus some erosion to separate touching wings
r2 = remove_small_holes(res.astype(np.bool), 25000)
r2 = binary_erosion(r2, selem=disk(3))
# show detections
if plot:
image_label_overlay = label2rgb(label(r2), image=img_ds)
plt.imshow(image_label_overlay)
ax = plt.gca()
# get bboxes
bboxes = []
for r in regionprops(label(r2)):
# TODO: is this really necessary?
if r.area < (minarea * .8 ):
continue
bbox_scaled = np.array(r.bbox) * (ds**layers)
logger.debug('bbox: {}, upsampled: {}'.format(r.bbox, bbox_scaled))
bboxes.append(bbox_scaled)
if plot:
minr, minc, maxr, maxc = r.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
logger.info('found {} object(s)'.format(len(bboxes)) )
return bboxes
def scale_bbox(bbox, expand_factor = .15):
"""
expand a bounding box by a fixed factor
Parameters
----------
bbox: 4-tuple
ymin, xmin, ymax, xmax
expand_factor: scalar
factor by which to scale ( resulting size will be 1+expand_factor)
Returns
-------
bbox_scaled: 4-tuple
ymin, xmin, ymax, xmax, scaled by factor
"""
(ymin, xmin, ymax, xmax) = tuple(bbox)
yrange = ymax - ymin
xrange = xmax - xmin
bbox_scaled = (ymin - yrange * expand_factor / 2., xmin - xrange * expand_factor / 2.,
ymax + yrange * expand_factor / 2., xmax + xrange * expand_factor / 2.)
return bbox_scaled
def read_bf(path):
"""
read an image into a np-array using BioFormats
Parameters
----------
path: str
file path to read
Returns
-------
img: np.array
image as np-array
"""
javabridge.start_vm(class_path=bioformats.JARS, run_headless=True)
img = bioformats.load_image(path, rescale=False)
return img
| from skimage.morphology import remove_small_holes, binary_erosion
from skimage.measure import regionprops, label
from skimage.filters import threshold_local
from skimage.morphology import disk, binary_opening
from skimage.exposure import rescale_intensity
from scipy.ndimage.filters import gaussian_filter
from skimage.transform import pyramid_gaussian
from skimage.color import label2rgb
try:
import javabridge
import bioformats
except ImportError as e:
print('WARNING: Bioformats bridge not installed')
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import logging
def bbox_pix2unit(bbox, start, pixsize, direction):
"""
old pixel->unit conversion for bounding boxes
NB: may no be corect
TODO: remove if it is no longer necessary
"""
logger = logging.getLogger(__name__)
res = (np.array(bbox, dtype=float).reshape((2,2)) * np.array(pixsize, dtype=float) *
np.array(direction, dtype=float) + np.array(start, dtype=float))
logger.debug('bbox: {}, toUnit: {}'.format(bbox, res.reshape((4,))))
return res.reshape((4,))
def aspect(bbox):
"""
get inverse aspect ratio a bounding box (smaller axis/larger axis)
Parameters
----------
bbox: 4-tuple
ymin, xmin, ymax, xmax
Returns
-------
aspect: scalar
inverse aspect ratio (in 0-1)
"""
(ymin, xmin, ymax, xmax) = bbox
exy = ymax - ymin
exx = xmax - xmin
return (exy / exx) if (exx > exy) else (exx / exy)
def detect_wings_simple(img, pixel_size=1,
ds=2, layers=2, thresh_window=1.8e3,
minarea=0.5e6, maxarea=2e6, minsolidity=.6,
minaspect=.3, plot=False, threshold_fun=None):
"""
simple wing detection via adaptive thresholding and some filtering by shape
default area 0.5-2 mm^2
Parameters
----------
img: np-array (2-dim)
the input image
pixel_size: scalar
pixel size in input image
ds: scalar
downsampling factor at each layer
layers: scalar
how may downsampling layers to calculate
thresh_window: integer
window for adaptive threshold, in original image pixels
minarea: scalar
minimum size of objects to detect, in units^2
maxarea: scalar
maximum size of objects to detect, in units^2
minsolidity: scalar
minimal solidity of detected objects \in (0,1)
minaspect: scalar
minimal inverse aspect ratio of detected objects \in (0,1)
plot: boolean
whether to plot detections or not
threshold_fun: function pointer, optional
thresholding function to use in windows
Returns
-------
bboxes: list of 4-tuples
bounding boxes (in original image pixel units)
"""
# scale min and max area to be in pixels^2
minarea = minarea / pixel_size**2 / ds**(layers*2)
maxarea = maxarea / pixel_size**2 / ds**(layers*2)
# scale thresh window size, make sure it is odd
thresh_window = int(thresh_window / pixel_size / ds**layers)
thresh_window += 0 if thresh_window%2 == 1 else 1
logger = logging.getLogger(__name__)
# some debug output:
logger.info('wing detection started')
logger.debug('input shape: {}'.format(img.shape))
logger.debug('ds: {}, layer:{}'.format(ds, layers))
logger.debug('minarea: {}, maxarea:{}'.format(minarea, maxarea))
logger.debug('threshold window: {}'.format(thresh_window))
# downsample
pyr = [p for p in pyramid_gaussian(img, max_layer= layers, downscale = ds)]
img_ds = pyr[layers]
logger.debug('img size after ds: {}'.format(img_ds.shape))
# rescale to (0-1)
img_ds = img_ds.astype(float)
img_ds = rescale_intensity(img_ds, out_range=(0.0, 1.0))
# smooth
img_ds = gaussian_filter(img_ds, 2.0)
# adaptive threshold
if threshold_fun is None:
thrd = img_ds > threshold_local(img_ds, thresh_window)
else:
thrd = img_ds > threshold_local(img_ds, thresh_window, method='generic', param=threshold_fun)
# clean a bit
thrd = np.bitwise_not(thrd)
thrd = binary_opening(thrd, selem=disk(4))
labelled = label(thrd)
# filter objs
ls = [r.label for r in regionprops(labelled) if r.area>minarea and
r.area<maxarea and r.solidity>minsolidity and aspect(r.bbox) > minaspect]
# filtered binary
res = np.zeros(thrd.shape)
l = label(thrd)
for li in ls:
res += (l == li)
# more cleaning, plus some erosion to separate touching wings
r2 = remove_small_holes(res.astype(np.bool), 25000)
r2 = binary_erosion(r2, selem=disk(3))
# show detections
if plot:
image_label_overlay = label2rgb(label(r2), image=img_ds)
plt.imshow(image_label_overlay)
ax = plt.gca()
# get bboxes
bboxes = []
for r in regionprops(label(r2)):
# TODO: is this really necessary?
if r.area < (minarea * .8 ):
continue
bbox_scaled = np.array(r.bbox) * (ds**layers)
logger.debug('bbox: {}, upsampled: {}'.format(r.bbox, bbox_scaled))
bboxes.append(bbox_scaled)
if plot:
minr, minc, maxr, maxc = r.bbox
rect = mpatches.Rectangle((minc, minr), maxc - minc, maxr - minr,
fill=False, edgecolor='red', linewidth=2)
ax.add_patch(rect)
logger.info('found {} object(s)'.format(len(bboxes)) )
return bboxes
def scale_bbox(bbox, expand_factor = .15):
"""
expand a bounding box by a fixed factor
Parameters
----------
bbox: 4-tuple
ymin, xmin, ymax, xmax
expand_factor: scalar
factor by which to scale ( resulting size will be 1+expand_factor)
Returns
-------
bbox_scaled: 4-tuple
ymin, xmin, ymax, xmax, scaled by factor
"""
(ymin, xmin, ymax, xmax) = tuple(bbox)
yrange = ymax - ymin
xrange = xmax - xmin
bbox_scaled = (ymin - yrange * expand_factor / 2., xmin - xrange * expand_factor / 2.,
ymax + yrange * expand_factor / 2., xmax + xrange * expand_factor / 2.)
return bbox_scaled
def read_bf(path):
"""
read an image into a np-array using BioFormats
Parameters
----------
path: str
file path to read
Returns
-------
img: np.array
image as np-array
"""
javabridge.start_vm(class_path=bioformats.JARS, run_headless=True)
img = bioformats.load_image(path, rescale=False)
return img
| en | 0.676085 | old pixel->unit conversion for bounding boxes NB: may no be corect TODO: remove if it is no longer necessary get inverse aspect ratio a bounding box (smaller axis/larger axis) Parameters ---------- bbox: 4-tuple ymin, xmin, ymax, xmax Returns ------- aspect: scalar inverse aspect ratio (in 0-1) simple wing detection via adaptive thresholding and some filtering by shape default area 0.5-2 mm^2 Parameters ---------- img: np-array (2-dim) the input image pixel_size: scalar pixel size in input image ds: scalar downsampling factor at each layer layers: scalar how may downsampling layers to calculate thresh_window: integer window for adaptive threshold, in original image pixels minarea: scalar minimum size of objects to detect, in units^2 maxarea: scalar maximum size of objects to detect, in units^2 minsolidity: scalar minimal solidity of detected objects \in (0,1) minaspect: scalar minimal inverse aspect ratio of detected objects \in (0,1) plot: boolean whether to plot detections or not threshold_fun: function pointer, optional thresholding function to use in windows Returns ------- bboxes: list of 4-tuples bounding boxes (in original image pixel units) # scale min and max area to be in pixels^2 # scale thresh window size, make sure it is odd # some debug output: # downsample # rescale to (0-1) # smooth # adaptive threshold # clean a bit # filter objs # filtered binary # more cleaning, plus some erosion to separate touching wings # show detections # get bboxes # TODO: is this really necessary? expand a bounding box by a fixed factor Parameters ---------- bbox: 4-tuple ymin, xmin, ymax, xmax expand_factor: scalar factor by which to scale ( resulting size will be 1+expand_factor) Returns ------- bbox_scaled: 4-tuple ymin, xmin, ymax, xmax, scaled by factor read an image into a np-array using BioFormats Parameters ---------- path: str file path to read Returns ------- img: np.array image as np-array | 1.946423 | 2 |
python/gvgai/tests/non_gym_client.py | aadharna/GVGAI_GYM | 0 | 6623774 | import logging
import time
import numpy as np
from gvgai.gym import GVGAI_Env
from gvgai.utils.level_data_generator import SokobanGenerator
if __name__ == '__main__':
# Turn debug logging on
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('Test Agent')
level_generator = SokobanGenerator()
env = GVGAI_Env('sokoban-lvl0', max_steps=10, tile_observations=False, include_semantic_data=True, client_only=True)
initial_frame = env.reset()
actions = env.unwrapped.get_action_meanings()
start = time.time()
frames = 0
for t in range(1000):
# choose action based on trained policy
# do action and get new state and its reward
action_id = np.random.randint(5)
stateObs, diffScore, done, debug = env.step(action_id)
env.render()
#time.sleep(1)
frames += 1
if t % 100 == 0:
end = time.time()
total_time = end - start
fps = (frames / total_time)
logger.info(f'frames per second: {fps}')
# break loop when terminal state is reached
if done:
env.reset()
end = time.time()
total_time = end - start
fps = (frames / total_time)
logger.info(f'frames per second: {fps}') | import logging
import time
import numpy as np
from gvgai.gym import GVGAI_Env
from gvgai.utils.level_data_generator import SokobanGenerator
if __name__ == '__main__':
# Turn debug logging on
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('Test Agent')
level_generator = SokobanGenerator()
env = GVGAI_Env('sokoban-lvl0', max_steps=10, tile_observations=False, include_semantic_data=True, client_only=True)
initial_frame = env.reset()
actions = env.unwrapped.get_action_meanings()
start = time.time()
frames = 0
for t in range(1000):
# choose action based on trained policy
# do action and get new state and its reward
action_id = np.random.randint(5)
stateObs, diffScore, done, debug = env.step(action_id)
env.render()
#time.sleep(1)
frames += 1
if t % 100 == 0:
end = time.time()
total_time = end - start
fps = (frames / total_time)
logger.info(f'frames per second: {fps}')
# break loop when terminal state is reached
if done:
env.reset()
end = time.time()
total_time = end - start
fps = (frames / total_time)
logger.info(f'frames per second: {fps}') | en | 0.923072 | # Turn debug logging on # choose action based on trained policy # do action and get new state and its reward #time.sleep(1) # break loop when terminal state is reached | 2.300094 | 2 |
mysite/urls.py | thetruefuss/theoctopuslibrary | 4 | 6623775 | <gh_stars>1-10
"""mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from books import views as books_views
from core import views as core_views
urlpatterns = [
url(r'^$', books_views.homepage, name='homepage'),
url(r'^results/$', books_views.search_results, name='search_results'),
url(r'^book/(?P<book_slug>[-\w]+)/$', books_views.book_detail, name='book_detail'),
url(r'^submit/$', books_views.book_post, name='book_post'),
url(r'^ajax/contact_details/(?P<book_id>\d+)/$', books_views.contact_details, name='contact_details'),
url(r'^ajax/deactivate_book/(?P<book_id>\d+)/$', books_views.deactivate_book, name='deactivate_book'),
url(r'^ajax/activate_book/(?P<book_id>\d+)/$', books_views.activate_book, name='activate_book'),
url(r'^report/$', core_views.report, name='report'),
url(r'^feedback/$', core_views.feedback, name='feedback'),
url(r'^terms/$', core_views.terms, name='terms'),
url(r'^privacy/$', core_views.privacy, name='privacy'),
url(r'^about/$', core_views.about, name='about'),
url(r'^faq/$', core_views.faq, name='faq'),
url(r'^accounts/', include('accounts.urls')),
url(r'^messages/', include('pinax.messages.urls', namespace='pinax_messages')),
url(r'^api/accounts/', include('accounts.api.urls', namespace='accounts-api')),
url(r'^api/books/', include('books.api.urls', namespace='books-api')),
url(r'^api/messages/', include('pinax.messages.api.urls', namespace='messages-api')),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/1.11/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf import settings
from django.conf.urls import include, url
from django.conf.urls.static import static
from django.contrib import admin
from books import views as books_views
from core import views as core_views
urlpatterns = [
url(r'^$', books_views.homepage, name='homepage'),
url(r'^results/$', books_views.search_results, name='search_results'),
url(r'^book/(?P<book_slug>[-\w]+)/$', books_views.book_detail, name='book_detail'),
url(r'^submit/$', books_views.book_post, name='book_post'),
url(r'^ajax/contact_details/(?P<book_id>\d+)/$', books_views.contact_details, name='contact_details'),
url(r'^ajax/deactivate_book/(?P<book_id>\d+)/$', books_views.deactivate_book, name='deactivate_book'),
url(r'^ajax/activate_book/(?P<book_id>\d+)/$', books_views.activate_book, name='activate_book'),
url(r'^report/$', core_views.report, name='report'),
url(r'^feedback/$', core_views.feedback, name='feedback'),
url(r'^terms/$', core_views.terms, name='terms'),
url(r'^privacy/$', core_views.privacy, name='privacy'),
url(r'^about/$', core_views.about, name='about'),
url(r'^faq/$', core_views.faq, name='faq'),
url(r'^accounts/', include('accounts.urls')),
url(r'^messages/', include('pinax.messages.urls', namespace='pinax_messages')),
url(r'^api/accounts/', include('accounts.api.urls', namespace='accounts-api')),
url(r'^api/books/', include('books.api.urls', namespace='books-api')),
url(r'^api/messages/', include('pinax.messages.api.urls', namespace='messages-api')),
url(r'^admin/', admin.site.urls),
]
if settings.DEBUG:
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT) | en | 0.616317 | mysite URL Configuration The `urlpatterns` list routes URLs to views. For more information please see: https://docs.djangoproject.com/en/1.11/topics/http/urls/ Examples: Function views 1. Add an import: from my_app import views 2. Add a URL to urlpatterns: url(r'^$', views.home, name='home') Class-based views 1. Add an import: from other_app.views import Home 2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home') Including another URLconf 1. Import the include() function: from django.conf.urls import url, include 2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls')) | 2.732517 | 3 |
bin/run_p4_mininet.py | termlen0/transparent-security | 1 | 6623776 | #!/usr/bin/env python2
# Copyright (c) 2019 Cable Television Laboratories, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import sys
import yaml
from trans_sec.mininet.exercise import ExerciseRunner
logger = logging.getLogger('')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--topo', help='Path to topology json',
type=str, required=True)
parser.add_argument('-l', '--log-dir', type=str, required=True,
default=None)
parser.add_argument('-lf', '--log-file', type=str, required=False,
default='run_p4_mininet.log')
parser.add_argument('-p', '--pcap-dir', type=str, required=False,
default=None)
parser.add_argument('-j', '--switch_json', type=str, required=False)
parser.add_argument('-c', '--start-cli', type=bool, required=False,
default=None)
parser.add_argument('-d', '--daemon', help='Run device daemon on hosts.',
type=bool, required=False, default=False)
parser.add_argument('-fc', '--forwarding-config', help='Forwarding config',
type=str, required=False)
parser.add_argument('-lp', '--load-p4', type=str, required=True,
choices=['True', 'False'],
help='When set, the Exercise class will not attempt '
'to load the P4 program onto the switches')
return parser.parse_args()
def read_yaml_file(config_file_path):
"""
Reads a yaml file and returns a dict representation of it
:return: a dict of the yaml file
"""
logger.debug('Attempting to load configuration file - ' + config_file_path)
config_file = None
try:
with open(config_file_path, 'r') as config_file:
config = yaml.safe_load(config_file)
logger.info('Loaded configuration')
return config
finally:
if config_file:
logger.info('Closing configuration file')
config_file.close()
if __name__ == '__main__':
args = get_args()
log_file = '{}/{}'.format(args.log_dir, args.log_file)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,
filename=log_file)
topo_file = args.topo
if topo_file.endswith('json'):
with open(topo_file, 'r') as f:
topo = json.load(f)
else:
topo = read_yaml_file(topo_file)
forwarding_yaml = None
if args.forwarding_config:
logger.info('Parsing forwarding config file - [%s]',
args.forwarding_config)
forwarding_yaml = read_yaml_file(args.forwarding_config)
logger.debug('Forwarding config - [%s]', forwarding_yaml)
exercise = ExerciseRunner(
topo, args.log_dir, args.pcap_dir, args.switch_json, forwarding_yaml,
args.start_cli, eval(args.load_p4))
exercise.run_exercise()
logger.info('Exercise Runner running indefinitely')
while True:
pass
| #!/usr/bin/env python2
# Copyright (c) 2019 Cable Television Laboratories, Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import sys
import yaml
from trans_sec.mininet.exercise import ExerciseRunner
logger = logging.getLogger('')
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--topo', help='Path to topology json',
type=str, required=True)
parser.add_argument('-l', '--log-dir', type=str, required=True,
default=None)
parser.add_argument('-lf', '--log-file', type=str, required=False,
default='run_p4_mininet.log')
parser.add_argument('-p', '--pcap-dir', type=str, required=False,
default=None)
parser.add_argument('-j', '--switch_json', type=str, required=False)
parser.add_argument('-c', '--start-cli', type=bool, required=False,
default=None)
parser.add_argument('-d', '--daemon', help='Run device daemon on hosts.',
type=bool, required=False, default=False)
parser.add_argument('-fc', '--forwarding-config', help='Forwarding config',
type=str, required=False)
parser.add_argument('-lp', '--load-p4', type=str, required=True,
choices=['True', 'False'],
help='When set, the Exercise class will not attempt '
'to load the P4 program onto the switches')
return parser.parse_args()
def read_yaml_file(config_file_path):
"""
Reads a yaml file and returns a dict representation of it
:return: a dict of the yaml file
"""
logger.debug('Attempting to load configuration file - ' + config_file_path)
config_file = None
try:
with open(config_file_path, 'r') as config_file:
config = yaml.safe_load(config_file)
logger.info('Loaded configuration')
return config
finally:
if config_file:
logger.info('Closing configuration file')
config_file.close()
if __name__ == '__main__':
args = get_args()
log_file = '{}/{}'.format(args.log_dir, args.log_file)
logging.basicConfig(stream=sys.stdout, level=logging.DEBUG,
filename=log_file)
topo_file = args.topo
if topo_file.endswith('json'):
with open(topo_file, 'r') as f:
topo = json.load(f)
else:
topo = read_yaml_file(topo_file)
forwarding_yaml = None
if args.forwarding_config:
logger.info('Parsing forwarding config file - [%s]',
args.forwarding_config)
forwarding_yaml = read_yaml_file(args.forwarding_config)
logger.debug('Forwarding config - [%s]', forwarding_yaml)
exercise = ExerciseRunner(
topo, args.log_dir, args.pcap_dir, args.switch_json, forwarding_yaml,
args.start_cli, eval(args.load_p4))
exercise.run_exercise()
logger.info('Exercise Runner running indefinitely')
while True:
pass
| en | 0.838065 | #!/usr/bin/env python2 # Copyright (c) 2019 Cable Television Laboratories, Inc. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Reads a yaml file and returns a dict representation of it :return: a dict of the yaml file | 2.037633 | 2 |
bluebottle/fsm/effects.py | terrameijar/bluebottle | 10 | 6623777 | from collections import Iterable
from functools import partial
from builtins import str
from builtins import object
from django.utils.translation import gettext_lazy as _
from django.template.loader import render_to_string
from future.utils import python_2_unicode_compatible
from bluebottle.fsm.state import TransitionNotPossible
@python_2_unicode_compatible
class Effect(object):
post_save = False
conditions = []
display = True
do_not_call_in_templates = True
@classmethod
def render(cls, effects):
context = {
'opts': effects[0].instance.__class__._meta,
'effects': effects
}
return render_to_string(cls.template, context)
@property
def description(self):
return str(self)
def __init__(self, instance, **kwargs):
self.instance = instance
self.options = kwargs
def __reduce__(self):
return (partial(Effect, self.instance, **self.options), ())
def __eq__(self, other):
return self.instance == other.instance and type(self) == type(other)
def pre_save(self, **kwargs):
pass
@property
def is_valid(self):
return all(condition(self) for condition in self.conditions)
def __str__(self):
return self.__class__.__name__
def to_html(self):
return str(self)
class BaseTransitionEffect(Effect):
field = 'states'
title = _('Change the status')
template = 'admin/transition_effect.html'
@property
def description(self):
return 'Change status of {} to {}'.format(
str(self.instance), self.transition.target.name
)
@property
def machine(self):
return getattr(self.instance, self.field)
@property
def is_valid(self):
return (
super().is_valid and
self.transition in self.machine.possible_transitions()
)
def pre_save(self, **kwargs):
try:
self.transition.execute(self.machine)
except TransitionNotPossible:
pass
def __eq__(self, other):
return (
isinstance(other, BaseTransitionEffect) and
self.transition == other.transition and
self.instance == other.instance
)
def __repr__(self):
return '<Effect: {}>'.format(self.transition)
def __str__(self):
if self.instance:
return _('{transition} {object}').format(
transition=self.transition.name,
object=str(self.instance)
)
return str(self.transition.target)
@ property
def help(self):
return _('{}: {}').format(self.instance.__class__._meta.verbose_name, self.instance)
def to_html(self):
if self.conditions:
return _('{transition} {object} if {conditions}').format(
transition=self.transition.name,
object=str(self.instance),
conditions=" and ".join([c.__doc__ for c in self.conditions])
)
return _('{transition} {object}').format(
transition=self.transition.name,
object=str(self.instance)
)
def TransitionEffect(transition, field='states', conditions=None, post_save=False, display=True):
_transition = transition
_field = field
_conditions = conditions
_post_save = post_save
_display = display
class _TransitionEffect(BaseTransitionEffect):
transition = _transition
field = _field
conditions = _conditions or []
post_save = _post_save
display = _display
return _TransitionEffect
class BaseRelatedTransitionEffect(Effect):
post_save = True
display = False
description = None
transition_effect_class = None
def __init__(self, *args, **kwargs):
super(BaseRelatedTransitionEffect, self).__init__(*args, **kwargs)
self.executed = False
relation = getattr(self.instance, self.relation)
try:
self.instances = list(relation.all())
except AttributeError:
if isinstance(relation, Iterable):
self.instances = relation
else:
self.instances = [relation]
def pre_save(self, effects):
for instance in self.instances:
effect = self.transition_effect_class(
instance, parent=self.instance, **self.options
)
if effect not in effects and effect.is_valid and self.transition in effect.machine.transitions.values():
self.executed = True
effect.pre_save(effects=effects)
effects.append(effect)
instance.execute_triggers(effects=effects)
def post_save(self):
if self.executed:
for instance in self.instances:
instance.save()
def __str__(self):
if self.description:
return self.description
return _('{transition} related {object}').format(
transition=self.transition_effect_class.transition.name,
object=self.relation
)
def __repr__(self):
return '<Related Transition Effect: {} on {}>'.format(self.transition, list(self.instances))
def to_html(self):
if self.conditions:
return _('{transition} related {object} if {conditions}').format(
transition=self.transition_effect_class.transition.name,
object=str(self.relation),
conditions=" and ".join([c.__doc__ for c in self.conditions])
)
return str(self)
def RelatedTransitionEffect(
_relation, transition, field='states', conditions=None, description=None, display=True
):
_transition = transition
_conditions = conditions or []
_transition_effect_class = TransitionEffect(transition, field, display=display)
_description = description
class _RelatedTransitionEffect(BaseRelatedTransitionEffect):
transition_effect_class = _transition_effect_class
relation = _relation
transition = _transition
conditions = _conditions
description = _description
field = 'states'
return _RelatedTransitionEffect
| from collections import Iterable
from functools import partial
from builtins import str
from builtins import object
from django.utils.translation import gettext_lazy as _
from django.template.loader import render_to_string
from future.utils import python_2_unicode_compatible
from bluebottle.fsm.state import TransitionNotPossible
@python_2_unicode_compatible
class Effect(object):
post_save = False
conditions = []
display = True
do_not_call_in_templates = True
@classmethod
def render(cls, effects):
context = {
'opts': effects[0].instance.__class__._meta,
'effects': effects
}
return render_to_string(cls.template, context)
@property
def description(self):
return str(self)
def __init__(self, instance, **kwargs):
self.instance = instance
self.options = kwargs
def __reduce__(self):
return (partial(Effect, self.instance, **self.options), ())
def __eq__(self, other):
return self.instance == other.instance and type(self) == type(other)
def pre_save(self, **kwargs):
pass
@property
def is_valid(self):
return all(condition(self) for condition in self.conditions)
def __str__(self):
return self.__class__.__name__
def to_html(self):
return str(self)
class BaseTransitionEffect(Effect):
field = 'states'
title = _('Change the status')
template = 'admin/transition_effect.html'
@property
def description(self):
return 'Change status of {} to {}'.format(
str(self.instance), self.transition.target.name
)
@property
def machine(self):
return getattr(self.instance, self.field)
@property
def is_valid(self):
return (
super().is_valid and
self.transition in self.machine.possible_transitions()
)
def pre_save(self, **kwargs):
try:
self.transition.execute(self.machine)
except TransitionNotPossible:
pass
def __eq__(self, other):
return (
isinstance(other, BaseTransitionEffect) and
self.transition == other.transition and
self.instance == other.instance
)
def __repr__(self):
return '<Effect: {}>'.format(self.transition)
def __str__(self):
if self.instance:
return _('{transition} {object}').format(
transition=self.transition.name,
object=str(self.instance)
)
return str(self.transition.target)
@ property
def help(self):
return _('{}: {}').format(self.instance.__class__._meta.verbose_name, self.instance)
def to_html(self):
if self.conditions:
return _('{transition} {object} if {conditions}').format(
transition=self.transition.name,
object=str(self.instance),
conditions=" and ".join([c.__doc__ for c in self.conditions])
)
return _('{transition} {object}').format(
transition=self.transition.name,
object=str(self.instance)
)
def TransitionEffect(transition, field='states', conditions=None, post_save=False, display=True):
_transition = transition
_field = field
_conditions = conditions
_post_save = post_save
_display = display
class _TransitionEffect(BaseTransitionEffect):
transition = _transition
field = _field
conditions = _conditions or []
post_save = _post_save
display = _display
return _TransitionEffect
class BaseRelatedTransitionEffect(Effect):
post_save = True
display = False
description = None
transition_effect_class = None
def __init__(self, *args, **kwargs):
super(BaseRelatedTransitionEffect, self).__init__(*args, **kwargs)
self.executed = False
relation = getattr(self.instance, self.relation)
try:
self.instances = list(relation.all())
except AttributeError:
if isinstance(relation, Iterable):
self.instances = relation
else:
self.instances = [relation]
def pre_save(self, effects):
for instance in self.instances:
effect = self.transition_effect_class(
instance, parent=self.instance, **self.options
)
if effect not in effects and effect.is_valid and self.transition in effect.machine.transitions.values():
self.executed = True
effect.pre_save(effects=effects)
effects.append(effect)
instance.execute_triggers(effects=effects)
def post_save(self):
if self.executed:
for instance in self.instances:
instance.save()
def __str__(self):
if self.description:
return self.description
return _('{transition} related {object}').format(
transition=self.transition_effect_class.transition.name,
object=self.relation
)
def __repr__(self):
return '<Related Transition Effect: {} on {}>'.format(self.transition, list(self.instances))
def to_html(self):
if self.conditions:
return _('{transition} related {object} if {conditions}').format(
transition=self.transition_effect_class.transition.name,
object=str(self.relation),
conditions=" and ".join([c.__doc__ for c in self.conditions])
)
return str(self)
def RelatedTransitionEffect(
_relation, transition, field='states', conditions=None, description=None, display=True
):
_transition = transition
_conditions = conditions or []
_transition_effect_class = TransitionEffect(transition, field, display=display)
_description = description
class _RelatedTransitionEffect(BaseRelatedTransitionEffect):
transition_effect_class = _transition_effect_class
relation = _relation
transition = _transition
conditions = _conditions
description = _description
field = 'states'
return _RelatedTransitionEffect
| none | 1 | 2.100582 | 2 | |
_utils/_2021_10_09_update_timeline.py | jeromecyang/ltsoj | 0 | 6623778 | <reponame>jeromecyang/ltsoj
from lib import *
episodes = get_all_episodes()
for episode in [e for e in episodes[:41] if not e in ['ep017.md', 'ep026.md', 'ep035.md']]:
content = read_content(episode)
timeline = get_section(content, 1)
lines = re.findall(r'\*.*?\n', timeline, flags=re.S)
output = '\n'
for line in lines:
parts = line.replace('* ', '').split(' ', 1)
time = parts[0].replace('(','').replace(')','')
if len(time) == 4:
line = line.replace(time, '(00:0' + time + ')')
if len(time) == 5:
line = line.replace(time, '(00:' + time + ')')
output = output + line
write_content(episode, content.replace(timeline, output)) | from lib import *
episodes = get_all_episodes()
for episode in [e for e in episodes[:41] if not e in ['ep017.md', 'ep026.md', 'ep035.md']]:
content = read_content(episode)
timeline = get_section(content, 1)
lines = re.findall(r'\*.*?\n', timeline, flags=re.S)
output = '\n'
for line in lines:
parts = line.replace('* ', '').split(' ', 1)
time = parts[0].replace('(','').replace(')','')
if len(time) == 4:
line = line.replace(time, '(00:0' + time + ')')
if len(time) == 5:
line = line.replace(time, '(00:' + time + ')')
output = output + line
write_content(episode, content.replace(timeline, output)) | none | 1 | 2.632948 | 3 | |
onmt/decoders/tree_decoder.py | longhuei/tree2seq-terminology-translation | 2 | 6623779 | <gh_stars>1-10
"""tree_decoder.py - Sequential or Tree-generator decoder models
Written by OpenNMT (https://github.com/OpenNMT/OpenNMT-py)
Rewritten in 2018 by <NAME> <<EMAIL>>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
from __future__ import division
import torch
import torch.nn as nn
from onmt.decoders.decoder import InputFeedRNNDecoder, RNNDecoderState
from onmt.utils.rnn_factory import rnn_factory
from onmt.utils.misc import aeq
from onmt.modules.tree_lstm import BinaryTreeLSTM
class Tree2SeqDecoder(InputFeedRNNDecoder):
"""
Standard fully batched RNN decoder without attention.
See :obj:`RNNDecoderBase` for options.
Based around the approach from
"Neural Machine Translation By Jointly Learning To Align and Translate"
:cite:`Bahdanau2015`
"""
def __init__(self,
rnn_type,
bidirectional_encoder,
num_layers,
hidden_size,
attn_type="general",
attn_func="softmax",
coverage_attn=False,
context_gate=None,
copy_attn=False,
dropout=0.0,
embeddings=None,
reuse_copy_attn=False,
tree_combine_hidden=False):
super(Tree2SeqDecoder, self).__init__(
rnn_type, bidirectional_encoder, num_layers, hidden_size,
attn_type, attn_func, coverage_attn, context_gate, copy_attn,
dropout, embeddings, reuse_copy_attn)
if tree_combine_hidden:
self.combine = BinaryTreeLSTM(rnn_type, hidden_size, bias=False)
else:
self.linear = nn.Linear(2 * hidden_size, hidden_size, bias=False)
self.combine = lambda c, h: (sum(c), torch.tanh(self.linear(torch.cat(h, dim=2))))
def init_decoder_state(self, src, memory_bank, encoder_final):
""" Init decoder state with last state of the encoder """
rnn_final, tree_final = encoder_final
child_c = (rnn_final[0], tree_final[0])
child_h = (rnn_final[1], tree_final[1])
encoder_final = self.combine(child_c, child_h)
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat(
[hidden[0:hidden.size(0):2], hidden[1:hidden.size(0):2]],
2)
return hidden
return RNNDecoderState(
self.hidden_size,
tuple([_fix_enc_hidden(enc_hid) for enc_hid in encoder_final]))
| """tree_decoder.py - Sequential or Tree-generator decoder models
Written by OpenNMT (https://github.com/OpenNMT/OpenNMT-py)
Rewritten in 2018 by <NAME> <<EMAIL>>
To the extent possible under law, the author(s) have dedicated all copyright
and related and neighboring rights to this software to the public domain
worldwide. This software is distributed without any warranty.
You should have received a copy of the CC0 Public Domain Dedication along with
this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>.
"""
from __future__ import division
import torch
import torch.nn as nn
from onmt.decoders.decoder import InputFeedRNNDecoder, RNNDecoderState
from onmt.utils.rnn_factory import rnn_factory
from onmt.utils.misc import aeq
from onmt.modules.tree_lstm import BinaryTreeLSTM
class Tree2SeqDecoder(InputFeedRNNDecoder):
"""
Standard fully batched RNN decoder without attention.
See :obj:`RNNDecoderBase` for options.
Based around the approach from
"Neural Machine Translation By Jointly Learning To Align and Translate"
:cite:`Bahdanau2015`
"""
def __init__(self,
rnn_type,
bidirectional_encoder,
num_layers,
hidden_size,
attn_type="general",
attn_func="softmax",
coverage_attn=False,
context_gate=None,
copy_attn=False,
dropout=0.0,
embeddings=None,
reuse_copy_attn=False,
tree_combine_hidden=False):
super(Tree2SeqDecoder, self).__init__(
rnn_type, bidirectional_encoder, num_layers, hidden_size,
attn_type, attn_func, coverage_attn, context_gate, copy_attn,
dropout, embeddings, reuse_copy_attn)
if tree_combine_hidden:
self.combine = BinaryTreeLSTM(rnn_type, hidden_size, bias=False)
else:
self.linear = nn.Linear(2 * hidden_size, hidden_size, bias=False)
self.combine = lambda c, h: (sum(c), torch.tanh(self.linear(torch.cat(h, dim=2))))
def init_decoder_state(self, src, memory_bank, encoder_final):
""" Init decoder state with last state of the encoder """
rnn_final, tree_final = encoder_final
child_c = (rnn_final[0], tree_final[0])
child_h = (rnn_final[1], tree_final[1])
encoder_final = self.combine(child_c, child_h)
def _fix_enc_hidden(hidden):
# The encoder hidden is (layers*directions) x batch x dim.
# We need to convert it to layers x batch x (directions*dim).
if self.bidirectional_encoder:
hidden = torch.cat(
[hidden[0:hidden.size(0):2], hidden[1:hidden.size(0):2]],
2)
return hidden
return RNNDecoderState(
self.hidden_size,
tuple([_fix_enc_hidden(enc_hid) for enc_hid in encoder_final])) | en | 0.84515 | tree_decoder.py - Sequential or Tree-generator decoder models Written by OpenNMT (https://github.com/OpenNMT/OpenNMT-py) Rewritten in 2018 by <NAME> <<EMAIL>> To the extent possible under law, the author(s) have dedicated all copyright and related and neighboring rights to this software to the public domain worldwide. This software is distributed without any warranty. You should have received a copy of the CC0 Public Domain Dedication along with this software. If not, see <http://creativecommons.org/publicdomain/zero/1.0/>. Standard fully batched RNN decoder without attention. See :obj:`RNNDecoderBase` for options. Based around the approach from "Neural Machine Translation By Jointly Learning To Align and Translate" :cite:`Bahdanau2015` Init decoder state with last state of the encoder # The encoder hidden is (layers*directions) x batch x dim. # We need to convert it to layers x batch x (directions*dim). | 2.022932 | 2 |
django_project/django/panoramas/libs.py | wowcube/webprojector | 0 | 6623780 | # -*- coding: utf-8 -*-
from io import BytesIO
import requests
from PIL import Image
from django.conf import settings
def get_panoram_frame(location, heading, pitch):
base_url = 'https://maps.googleapis.com/maps/api/streetview?size=480x480'
key = settings.GOOGLE_STREETVIEW_KEY
fov = 90
im_url = base_url + '&heading=' + str(heading) + '&pitch=' + str(pitch) + '&location=' + location + '&fov=' + str(
fov) + '&key=' + key
response = requests.get(im_url)
return Image.open(BytesIO(response.content))
def get_panoram_by_location(location):
heading = '0' # горизонтальный угол
pitch = '0' # вертикальный угол
img = Image.new(mode = "RGB", size = (1920, 1440))
imgs = []
imgs.append(get_panoram_frame(location, 0, 90))
img.paste(get_panoram_frame(location, 0, 90), (480, 0))
img.paste(get_panoram_frame(location, -90, 0), (0, 480))
img.paste(get_panoram_frame(location, 0, 0), (480, 480))
img.paste(get_panoram_frame(location, 90, 0), (960, 480))
img.paste(get_panoram_frame(location, 180, 0), (1440, 480))
img.paste(get_panoram_frame(location, 0, -90), (480, 960))
img_io = BytesIO()
img.save(img_io, format="BMP")
img_io.seek(0)
return img_io
def save_panoram_to_file(location, file_path, thumb_file_path):
try:
img_io = get_panoram_by_location(location)
thumb_io = thumb_generate(img_io)
with open(thumb_file_path, "wb") as f:
f.write(thumb_io.getbuffer())
# print("img_io")
with open(file_path, "wb") as f:
f.write(img_io.getbuffer())
return True
except BaseException:
return False
def crop_box_240(base_image, left, top):
return base_image.crop( (left, top, left+240, top+240) )
def convert_to_panoram(img_io):
base_image = Image.open(img_io)
base_image = base_image.resize((2320, 1740))
width, height = base_image.size
w = 480 * 4
h = 480 * 3
rect_original = width/4
display_rect_original = rect_original/2
croppx = 25
img = Image.new('RGBA', (w, h), (0,0,0,0))
print(width, height)
j = 0
while j < 6:
top = display_rect_original * j
top_past = 240 * j
i = 0
while i < 8:
img.paste(crop_box_240(base_image, croppx+290*i, top+croppx), (240*i,top_past))
i += 1
j += 1
# left_side = 350
# watermark = Image.open('panorams/space/watermark.png')
# img.paste(watermark, (left_side,460+480), mask=watermark)
# img.paste(watermark, (left_side+480,460+480), mask=watermark)
# img.paste(watermark, (left_side+480*2,460+480), mask=watermark)
# img.paste(watermark, (left_side+480*3,460+480), mask=watermark)
# img.paste(watermark, (left_side+480,460), mask=watermark)
# img.paste(watermark, (left_side+480,460+480*2), mask=watermark)
# img.show()
img_io = BytesIO()
img.save(img_io, format="BMP")
img_io.seek(0)
return img_io
def thumb_generate(img_io):
thumb_panorama = Image.open(img_io)
thumb_panorama = thumb_panorama.resize((640, 480))
thumb_panorama = thumb_panorama.crop((0, 160, 640, 320))
thumb_panorama = thumb_panorama.convert('RGB')
img_io = BytesIO()
thumb_panorama.save(img_io, format="JPEG")
img_io.seek(0)
return img_io
def get_thumb(panorama_id=0, seria_id=0):
pano_path = settings.PANORAMAS_PATH + str(seria_id) + '/' + str(panorama_id) + "_thumb.jpg"
print(pano_path)
with open(pano_path, 'rb') as f:
img_io = f.read()
return img_io
| # -*- coding: utf-8 -*-
from io import BytesIO
import requests
from PIL import Image
from django.conf import settings
def get_panoram_frame(location, heading, pitch):
base_url = 'https://maps.googleapis.com/maps/api/streetview?size=480x480'
key = settings.GOOGLE_STREETVIEW_KEY
fov = 90
im_url = base_url + '&heading=' + str(heading) + '&pitch=' + str(pitch) + '&location=' + location + '&fov=' + str(
fov) + '&key=' + key
response = requests.get(im_url)
return Image.open(BytesIO(response.content))
def get_panoram_by_location(location):
heading = '0' # горизонтальный угол
pitch = '0' # вертикальный угол
img = Image.new(mode = "RGB", size = (1920, 1440))
imgs = []
imgs.append(get_panoram_frame(location, 0, 90))
img.paste(get_panoram_frame(location, 0, 90), (480, 0))
img.paste(get_panoram_frame(location, -90, 0), (0, 480))
img.paste(get_panoram_frame(location, 0, 0), (480, 480))
img.paste(get_panoram_frame(location, 90, 0), (960, 480))
img.paste(get_panoram_frame(location, 180, 0), (1440, 480))
img.paste(get_panoram_frame(location, 0, -90), (480, 960))
img_io = BytesIO()
img.save(img_io, format="BMP")
img_io.seek(0)
return img_io
def save_panoram_to_file(location, file_path, thumb_file_path):
try:
img_io = get_panoram_by_location(location)
thumb_io = thumb_generate(img_io)
with open(thumb_file_path, "wb") as f:
f.write(thumb_io.getbuffer())
# print("img_io")
with open(file_path, "wb") as f:
f.write(img_io.getbuffer())
return True
except BaseException:
return False
def crop_box_240(base_image, left, top):
return base_image.crop( (left, top, left+240, top+240) )
def convert_to_panoram(img_io):
base_image = Image.open(img_io)
base_image = base_image.resize((2320, 1740))
width, height = base_image.size
w = 480 * 4
h = 480 * 3
rect_original = width/4
display_rect_original = rect_original/2
croppx = 25
img = Image.new('RGBA', (w, h), (0,0,0,0))
print(width, height)
j = 0
while j < 6:
top = display_rect_original * j
top_past = 240 * j
i = 0
while i < 8:
img.paste(crop_box_240(base_image, croppx+290*i, top+croppx), (240*i,top_past))
i += 1
j += 1
# left_side = 350
# watermark = Image.open('panorams/space/watermark.png')
# img.paste(watermark, (left_side,460+480), mask=watermark)
# img.paste(watermark, (left_side+480,460+480), mask=watermark)
# img.paste(watermark, (left_side+480*2,460+480), mask=watermark)
# img.paste(watermark, (left_side+480*3,460+480), mask=watermark)
# img.paste(watermark, (left_side+480,460), mask=watermark)
# img.paste(watermark, (left_side+480,460+480*2), mask=watermark)
# img.show()
img_io = BytesIO()
img.save(img_io, format="BMP")
img_io.seek(0)
return img_io
def thumb_generate(img_io):
thumb_panorama = Image.open(img_io)
thumb_panorama = thumb_panorama.resize((640, 480))
thumb_panorama = thumb_panorama.crop((0, 160, 640, 320))
thumb_panorama = thumb_panorama.convert('RGB')
img_io = BytesIO()
thumb_panorama.save(img_io, format="JPEG")
img_io.seek(0)
return img_io
def get_thumb(panorama_id=0, seria_id=0):
pano_path = settings.PANORAMAS_PATH + str(seria_id) + '/' + str(panorama_id) + "_thumb.jpg"
print(pano_path)
with open(pano_path, 'rb') as f:
img_io = f.read()
return img_io
| en | 0.289721 | # -*- coding: utf-8 -*- # горизонтальный угол # вертикальный угол # print("img_io") # left_side = 350 # watermark = Image.open('panorams/space/watermark.png') # img.paste(watermark, (left_side,460+480), mask=watermark) # img.paste(watermark, (left_side+480,460+480), mask=watermark) # img.paste(watermark, (left_side+480*2,460+480), mask=watermark) # img.paste(watermark, (left_side+480*3,460+480), mask=watermark) # img.paste(watermark, (left_side+480,460), mask=watermark) # img.paste(watermark, (left_side+480,460+480*2), mask=watermark) # img.show() | 2.409548 | 2 |
momo_api/cron.py | Foris-master/momo_server | 0 | 6623781 | <gh_stars>0
import difflib
from time import time
from django_cron import CronJobBase, Schedule
from momo_api.lib import proceed_transactions
class ProceedTransactionJob(CronJobBase):
RUN_EVERY_MINS = 1 # every 5 minutes
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'momo_server.fetch_stations' # a unique code
def do(self):
start = time()
proceed_transactions()
finish = time()
t = (finish - start)
print('time ' + str(t))
| import difflib
from time import time
from django_cron import CronJobBase, Schedule
from momo_api.lib import proceed_transactions
class ProceedTransactionJob(CronJobBase):
RUN_EVERY_MINS = 1 # every 5 minutes
schedule = Schedule(run_every_mins=RUN_EVERY_MINS)
code = 'momo_server.fetch_stations' # a unique code
def do(self):
start = time()
proceed_transactions()
finish = time()
t = (finish - start)
print('time ' + str(t)) | en | 0.701229 | # every 5 minutes # a unique code | 2.318844 | 2 |
ariane/apps/users/views.py | DebVortex/ariane-old- | 0 | 6623782 | from braces.views import LoginRequiredMixin
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView
from . import forms, models
class UpdateUserSettingsView(LoginRequiredMixin, FormView):
"""View for the user to update his settings."""
template_name = 'users/user_settings_update.html'
form_class = forms.UserSettingForm
model = models.UserSetting
def get_object(self):
"""Return the UserSetting of the current user.
If the user has no related UserSetting, it gets created.
Returns:
UserSetting: the UserSetting object of the current user
"""
self.object, _ = self.model.objects.get_or_create(user=self.request.user)
return self.object
def get_form_kwargs(self):
"""Return the keyword arguments for the form.
Returns:
Dict: the form keyword arguments, updated with the UserSetting of
the current user
"""
kwargs = super().get_form_kwargs()
kwargs.update({'instance': self.get_object()})
return kwargs
def form_valid(self, form):
"""Save new data and redirect back to view."""
self.object.update(language=form.cleaned_data['language'])
messages.add_message(self.request, messages.SUCCESS, _("Settings saved."))
return self.get(self, self.request)
| from braces.views import LoginRequiredMixin
from django.contrib import messages
from django.utils.translation import ugettext_lazy as _
from django.views.generic import FormView
from . import forms, models
class UpdateUserSettingsView(LoginRequiredMixin, FormView):
"""View for the user to update his settings."""
template_name = 'users/user_settings_update.html'
form_class = forms.UserSettingForm
model = models.UserSetting
def get_object(self):
"""Return the UserSetting of the current user.
If the user has no related UserSetting, it gets created.
Returns:
UserSetting: the UserSetting object of the current user
"""
self.object, _ = self.model.objects.get_or_create(user=self.request.user)
return self.object
def get_form_kwargs(self):
"""Return the keyword arguments for the form.
Returns:
Dict: the form keyword arguments, updated with the UserSetting of
the current user
"""
kwargs = super().get_form_kwargs()
kwargs.update({'instance': self.get_object()})
return kwargs
def form_valid(self, form):
"""Save new data and redirect back to view."""
self.object.update(language=form.cleaned_data['language'])
messages.add_message(self.request, messages.SUCCESS, _("Settings saved."))
return self.get(self, self.request)
| en | 0.85757 | View for the user to update his settings. Return the UserSetting of the current user. If the user has no related UserSetting, it gets created. Returns: UserSetting: the UserSetting object of the current user Return the keyword arguments for the form. Returns: Dict: the form keyword arguments, updated with the UserSetting of the current user Save new data and redirect back to view. | 2.264576 | 2 |
core/migrations/0008_auto_20180704_2340.py | mertyildiran/echo | 5 | 6623783 | <reponame>mertyildiran/echo
# Generated by Django 2.0.6 on 2018-07-04 23:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20180704_2241'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='location',
new_name='address',
),
]
| # Generated by Django 2.0.6 on 2018-07-04 23:40
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0007_auto_20180704_2241'),
]
operations = [
migrations.RenameField(
model_name='profile',
old_name='location',
new_name='address',
),
] | en | 0.697693 | # Generated by Django 2.0.6 on 2018-07-04 23:40 | 1.719107 | 2 |
API-extract/keras/extract_members.py | sqlab-sustech/PyCompat | 4 | 6623784 | <filename>API-extract/keras/extract_members.py<gh_stars>1-10
#! /usr/bin/python3
from library_traverser import traverse_module, MemberVisitor, MemberInfoExtractor
import re
import inspect
import pymongo
import importlib
import pkgutil
import keras
sub_modules = [m for m in pkgutil.iter_modules(keras.__path__) if m[2]]
# From tensorflow source
do_not_descend_map = {
}
prefix_black_list = {
".".join([prefix, name])
for prefix in do_not_descend_map
for name in do_not_descend_map[prefix]
}
class KerasMemberInfoExtractor(MemberInfoExtractor):
_args_doc_regex = re.compile(
r"(# Arguments\n)((\ {4}\w+:\s[\S\ ]+(\n\ {4}[\S\ ]+)*\n*)+)")
_arg_item_doc_regex = re.compile(
r"\ {4}(\w+):\s([\S\ ]+(\n\ {8}[\S\ ]+)*)")
_returns_doc_regex = re.compile(r"(Returns:\n)((\ {2}[\S\ ]+\n)+)")
_raises_doc_regex = re.compile(r"(# Raises\n)((\ {4}[\S\ ]+)(\n\ {8}[\S\ ]+)+)")
def extract_args_doc(self, doc):
arg_doc_match = next(self._args_doc_regex.finditer(doc or ""), None)
if not arg_doc_match:
return {}
arg_doc = arg_doc_match.group(2)
return {
match.group(1): match.group(2)
for match in self._arg_item_doc_regex.finditer(arg_doc)
}
def extract_returns_doc(self, doc):
match = next(self._returns_doc_regex.finditer(doc or ""), None)
return match.group(2) if match else None
def extract_raise_doc(self, doc):
match = next(self._raises_doc_regex.finditer(doc or ""), None)
return match.group(2) if match else None
def is_deprecated(self, name, member):
doc = inspect.getdoc(member)
return False if not doc else "DEPRECATED" in doc
mongn_client = pymongo.MongoClient(host="172.17.0.2")
db = mongn_client.get_database("DeepLearningAPIEvoluation")
collection = db.get_collection("Keras_APIs_%s" % keras.__version__)
collection.drop()
def insert_db(data):
collection.insert(data,check_keys=False)
extractor = KerasMemberInfoExtractor()
visitor = MemberVisitor(insert_db, inspect, extractor)
traverse_module(("keras", keras), visitor, "keras", prefix_black_list)
mongn_client.close()
| <filename>API-extract/keras/extract_members.py<gh_stars>1-10
#! /usr/bin/python3
from library_traverser import traverse_module, MemberVisitor, MemberInfoExtractor
import re
import inspect
import pymongo
import importlib
import pkgutil
import keras
sub_modules = [m for m in pkgutil.iter_modules(keras.__path__) if m[2]]
# From tensorflow source
do_not_descend_map = {
}
prefix_black_list = {
".".join([prefix, name])
for prefix in do_not_descend_map
for name in do_not_descend_map[prefix]
}
class KerasMemberInfoExtractor(MemberInfoExtractor):
_args_doc_regex = re.compile(
r"(# Arguments\n)((\ {4}\w+:\s[\S\ ]+(\n\ {4}[\S\ ]+)*\n*)+)")
_arg_item_doc_regex = re.compile(
r"\ {4}(\w+):\s([\S\ ]+(\n\ {8}[\S\ ]+)*)")
_returns_doc_regex = re.compile(r"(Returns:\n)((\ {2}[\S\ ]+\n)+)")
_raises_doc_regex = re.compile(r"(# Raises\n)((\ {4}[\S\ ]+)(\n\ {8}[\S\ ]+)+)")
def extract_args_doc(self, doc):
arg_doc_match = next(self._args_doc_regex.finditer(doc or ""), None)
if not arg_doc_match:
return {}
arg_doc = arg_doc_match.group(2)
return {
match.group(1): match.group(2)
for match in self._arg_item_doc_regex.finditer(arg_doc)
}
def extract_returns_doc(self, doc):
match = next(self._returns_doc_regex.finditer(doc or ""), None)
return match.group(2) if match else None
def extract_raise_doc(self, doc):
match = next(self._raises_doc_regex.finditer(doc or ""), None)
return match.group(2) if match else None
def is_deprecated(self, name, member):
doc = inspect.getdoc(member)
return False if not doc else "DEPRECATED" in doc
mongn_client = pymongo.MongoClient(host="172.17.0.2")
db = mongn_client.get_database("DeepLearningAPIEvoluation")
collection = db.get_collection("Keras_APIs_%s" % keras.__version__)
collection.drop()
def insert_db(data):
collection.insert(data,check_keys=False)
extractor = KerasMemberInfoExtractor()
visitor = MemberVisitor(insert_db, inspect, extractor)
traverse_module(("keras", keras), visitor, "keras", prefix_black_list)
mongn_client.close()
| ru | 0.256886 | #! /usr/bin/python3 # From tensorflow source # Arguments\n)((\ {4}\w+:\s[\S\ ]+(\n\ {4}[\S\ ]+)*\n*)+)") # Raises\n)((\ {4}[\S\ ]+)(\n\ {8}[\S\ ]+)+)") | 2.27749 | 2 |
datastorm/limits/batching.py | JavierLuna/datastorm | 13 | 6623785 | MAX_BATCH_SIZE = 500
| MAX_BATCH_SIZE = 500
| none | 1 | 1.067802 | 1 | |
testsrc/collectortests.py | paulharter/biofeed | 0 | 6623786 | <filename>testsrc/collectortests.py
import unittest
from biofeedCollector import DataCollector
TEST_DATA = ({"one":154.7,
"two":66.0,
"three":44.1,
"four":5.6},
{"one":158.4,
"two":66.2,
"three":55.3,
"four":6.4},
{"one":169.2,
"two":66.5,
"three":23.6,
"four":5.3},
{"one":181.2,
"two":66.9,
"three":77.8,
"four":5.2},
{"one":199.0,
"two":67.1,
"three":98.3,
"four":5.8},
{"one":218.5,
"two":67.4,
"three":45.3,
"four":5.9})
HISTORY_SIZE = 4
class BasicSetup(unittest.TestCase):
def setUp(self):
self.collector = DataCollector()
self.ch1 = self.collector.addChannel("one", 4, 1)
self.ch2 = self.collector.addChannel("two", 4, 1)
self.ch3 = self.collector.addChannel("three", 4, 1)
self.ch4 = self.collector.addChannel("four", 4, 1)
def tearDown(self):
pass
class Case01_PuttingData(BasicSetup):
def test01_canPutDataIn(self):
collector = self.collector
collector.put(TEST_DATA[0])
self.assertEquals(len(collector.channels), 4)
def test02_canGetaluesOut(self):
collector = self.collector
collector.put(TEST_DATA[0])
self.assertEquals(self.ch1.value, 154.7)
self.assertEquals(self.ch3.value, 44.1)
self.assertEquals(self.ch1.value, 154.7)#repeats if no new value
collector.put(TEST_DATA[1])
self.assertEquals(self.ch1.value, 158.4)
self.assertEquals(self.ch2.value, (66.0 + 66.2)/2)#average if not got for two or more
self.assertEquals(self.ch4.value, (5.6 + 6.4)/2)
collector.put(TEST_DATA[2])
self.assertEquals(self.ch2.value, 66.5)#reset by last get
def test03_canGetHistory(self):
collector = self.collector
for i in range(4):
collector.put(TEST_DATA[i])
history = self.ch1.history
self.assertEquals(history[0], [154.7, 158.4, 169.2, 181.2])
for j in range(2):
collector.put(TEST_DATA[j + 4])
history = self.ch1.history
self.assertEquals(history[0], [169.2, 181.2, 199.0, 218.5])
class Case02_Combining(unittest.TestCase):
def setUp(self):
print "***************"
self.collector = DataCollector()
self.ch1 = self.collector.addChannel("one", 2, 2)
self.ch2 = self.collector.addChannel("two", 2, 3)
def tearDown(self):
pass
def test01_WhatAboutCombining(self):
collector = self.collector
for i in range(6):
collector.put(TEST_DATA[i])
self.assertEquals(self.ch1.history[0], [175.2, 208.75])
self.assertEquals(self.ch2.history[0], [198.7/3, 201.4/3])
if __name__ == '__main__':
unittest.main()
| <filename>testsrc/collectortests.py
import unittest
from biofeedCollector import DataCollector
TEST_DATA = ({"one":154.7,
"two":66.0,
"three":44.1,
"four":5.6},
{"one":158.4,
"two":66.2,
"three":55.3,
"four":6.4},
{"one":169.2,
"two":66.5,
"three":23.6,
"four":5.3},
{"one":181.2,
"two":66.9,
"three":77.8,
"four":5.2},
{"one":199.0,
"two":67.1,
"three":98.3,
"four":5.8},
{"one":218.5,
"two":67.4,
"three":45.3,
"four":5.9})
HISTORY_SIZE = 4
class BasicSetup(unittest.TestCase):
def setUp(self):
self.collector = DataCollector()
self.ch1 = self.collector.addChannel("one", 4, 1)
self.ch2 = self.collector.addChannel("two", 4, 1)
self.ch3 = self.collector.addChannel("three", 4, 1)
self.ch4 = self.collector.addChannel("four", 4, 1)
def tearDown(self):
pass
class Case01_PuttingData(BasicSetup):
def test01_canPutDataIn(self):
collector = self.collector
collector.put(TEST_DATA[0])
self.assertEquals(len(collector.channels), 4)
def test02_canGetaluesOut(self):
collector = self.collector
collector.put(TEST_DATA[0])
self.assertEquals(self.ch1.value, 154.7)
self.assertEquals(self.ch3.value, 44.1)
self.assertEquals(self.ch1.value, 154.7)#repeats if no new value
collector.put(TEST_DATA[1])
self.assertEquals(self.ch1.value, 158.4)
self.assertEquals(self.ch2.value, (66.0 + 66.2)/2)#average if not got for two or more
self.assertEquals(self.ch4.value, (5.6 + 6.4)/2)
collector.put(TEST_DATA[2])
self.assertEquals(self.ch2.value, 66.5)#reset by last get
def test03_canGetHistory(self):
collector = self.collector
for i in range(4):
collector.put(TEST_DATA[i])
history = self.ch1.history
self.assertEquals(history[0], [154.7, 158.4, 169.2, 181.2])
for j in range(2):
collector.put(TEST_DATA[j + 4])
history = self.ch1.history
self.assertEquals(history[0], [169.2, 181.2, 199.0, 218.5])
class Case02_Combining(unittest.TestCase):
def setUp(self):
print "***************"
self.collector = DataCollector()
self.ch1 = self.collector.addChannel("one", 2, 2)
self.ch2 = self.collector.addChannel("two", 2, 3)
def tearDown(self):
pass
def test01_WhatAboutCombining(self):
collector = self.collector
for i in range(6):
collector.put(TEST_DATA[i])
self.assertEquals(self.ch1.history[0], [175.2, 208.75])
self.assertEquals(self.ch2.history[0], [198.7/3, 201.4/3])
if __name__ == '__main__':
unittest.main()
| en | 0.759572 | #repeats if no new value #average if not got for two or more #reset by last get | 2.662333 | 3 |
accelerometer/src/lsm_iic.py | JGoard/teensy-rs485-arm-control | 3 | 6623787 | <reponame>JGoard/teensy-rs485-arm-control<filename>accelerometer/src/lsm_iic.py
#!/usr/bin/env python3
import board
import busio
import rospy
from adafruit_lsm6ds.lsm6dsox import LSM6DSOX
from sensor_msgs.msg import Imu
def main():
rospy.init_node('accelerometer', anonymous=False)
pub = rospy.Publisher("imu", Imu, queue_size=10)
print(board.SCL, board.SDA)
i2c = busio.I2C(board.SCL, board.SDA)
sensor = LSM6DSOX(i2c)
rospy.loginfo('ISM330DHCX 6DOF Accelerometer Publishing to IMU')
imu_msg = Imu()
imu_msg.linear_acceleration_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
imu_msg.angular_velocity_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
while not rospy.is_shutdown():
x, y, z = sensor.acceleration
u,v,w = sensor.gyro
imu_msg.angular_velocity.x = u
imu_msg.angular_velocity.y = v
imu_msg.angular_velocity.z = w
imu_msg.linear_acceleration.x = x
imu_msg.linear_acceleration.y = y
imu_msg.linear_acceleration.z = z
pub.publish(imu_msg)
rospy.sleep(1)
rospy.loginfo('ISM330DHCX Accelerometer Offline')
if __name__ == '__main__':
main() | #!/usr/bin/env python3
import board
import busio
import rospy
from adafruit_lsm6ds.lsm6dsox import LSM6DSOX
from sensor_msgs.msg import Imu
def main():
rospy.init_node('accelerometer', anonymous=False)
pub = rospy.Publisher("imu", Imu, queue_size=10)
print(board.SCL, board.SDA)
i2c = busio.I2C(board.SCL, board.SDA)
sensor = LSM6DSOX(i2c)
rospy.loginfo('ISM330DHCX 6DOF Accelerometer Publishing to IMU')
imu_msg = Imu()
imu_msg.linear_acceleration_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
imu_msg.angular_velocity_covariance = [
0, 0, 0,
0, 0, 0,
0, 0, 0
]
while not rospy.is_shutdown():
x, y, z = sensor.acceleration
u,v,w = sensor.gyro
imu_msg.angular_velocity.x = u
imu_msg.angular_velocity.y = v
imu_msg.angular_velocity.z = w
imu_msg.linear_acceleration.x = x
imu_msg.linear_acceleration.y = y
imu_msg.linear_acceleration.z = z
pub.publish(imu_msg)
rospy.sleep(1)
rospy.loginfo('ISM330DHCX Accelerometer Offline')
if __name__ == '__main__':
main() | fr | 0.221828 | #!/usr/bin/env python3 | 2.498763 | 2 |
AdnReport/Adn_Report.py | METIS-GEO/plugins | 0 | 6623788 | # -*- coding: utf-8 -*-
"""
/***************************************************************************
AdnReport
A QGIS plugin
Prégénérer les fichiers et dossier pour la génération de rapport pour ADN
-------------------
begin : 2018-01-08
git sha : $Format:%H$
copyright : (C) 2018 by gbruel/metis
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtGui import QAction, QIcon
from PyQt4 import QtGui, QtCore
import sys
# Initialize Qt resources from file resources.py
import resources
# Import the code for the dialog
from Adn_Report_dialog import AdnReportDialog
from os.path import expanduser
import os.path, csv, time, shutil # specific
class AdnReport:
"""QGIS Plugin Implementation."""
export_result = []
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'AdnReport_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Rapport ADN')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'AdnReport')
self.toolbar.setObjectName(u'AdnReport')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('AdnReport', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
self.dlg = AdnReportDialog()
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/AdnReport/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Rapports ADN'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Rapport ADN'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def isInList(self, val, li):
"""Return index of value find in list or -1 if value is not exist in list"""
res = False
if val and li:
try :
res = li.index(val)
except ValueError:
res = False
return res
def rmDblToCombo(self,array,cb):
cb.clear()
cb.addItem("Select all opportunity")
"""Remove dupplicate value from given array and import unic values to given combo"""
cb.setEnabled(True);
t = list(set(array))
clean = []
for elem in t:
typeVar = type(elem).__name__
if typeVar == "unicode" or typeVar == "str":
if cb.findText(elem) < 0:
clean.append(elem)
cb.addItem(elem)
return clean
def searchFile(self):
"""Open window to search template file"""
"""Update text box with path value"""
def test(string, expression):
test = False
if string in expression:
test = True
return test
validFormat = "xls"
file = QtGui.QFileDialog.getOpenFileName(None, 'Open file')
"""Valid file format"""
isValid = test(validFormat, file)
if not isValid or isValid == "" :
file = "Please, select valid file !"
"""Update text box with path value"""
return self.dlg.pathTpl.setText(file)
def searchFolder(self):
"""Method to get path in order to export file to path"""
folder = QtGui.QFileDialog.getExistingDirectory(None, 'Open folder', expanduser('~'))
"""Update text box with path value"""
self.dlg.pathFolder.setText(folder)
def getLayerFromCb(self, cbString):
res = False
layers = self.iface.legendInterface().layers();
for x in layers:
if x.name() == cbString:
res = x
break
return res
def layersToCombo(self, combo):
"""Create array to use map layers"""
layer = ""
layer_list= []
layers = self.iface.legendInterface().layers();
for layer in layers:
if layer.name() and layer.type() == 0:
layer_list.append(layer.name())
combo.addItems(layer_list)
def getLayerFields(self,layer):
fieldsName = []
"""parse layer to get opportunity values"""
fields = layer.dataProvider().fields()
for field in fields:
fieldsName.append(field.name())
return fieldsName
def fieldValues(self, layer, val):
# retourne les valeurs pour un champ donné dans une couche donnée
"""if user select layer in combo, return attributes as list """
res = False
if val != "":
cbList = []
fields = self.getLayerFields(layer) # list of fields
idx = self.isInList(val, fields) # control if field exist in layer
# Correction apply : if index is first, index = int(0). So, python indentify index as False.
if idx != False or idx > -1:
features = layer.getFeatures() # array that contain all attributes values without fields name
for el in features:
cbList.append(el.attributes()[idx])
res = cbList # return list of opportunity states values
return res
def oppFiltering(self, idFromGc, idFromSy, gcLayer, syLayer, cbOfState, cbO):
"""return opportunity according to state value or not"""
finalAttr = []
def getOppFromLayer (layer, cbId, cbSt, cbOp):
oppResult = []
layerRead = self.getLayerFromCb(layer.currentText())
idLayer = cbId.currentText()
state = cbSt.currentText()
defaultValue = cbSt.itemText(0)
if layerRead != False:
cbOp.clear()
self.export_result = {}
filterVal = []
cbOp.addItem("Select all opportunity")
# return list of id for gc layer
layerOpp = self.fieldValues(layerRead, idLayer)
# return all features
layerFeatures = layerRead.getFeatures()
# return all fields
layerFields = self.getLayerFields(layerRead)
# return position of given field in layer fields
posId = self.isInList(idLayer, layerFields) # to get id attributes # bug
posState = self.isInList("statut",layerFields) # si on a bien le champ statut donne alors la position du champ, sinon renvoi false
if posState != False or posState > -1:
filterVal = self.fieldValues(layerRead,"statut")
for feature in layerFeatures: # on regarde toutes les features de la couche
idAttr = feature.attributes()[posId] # on prend la valeur de l'id pour la feature
if state == defaultValue :
oppResult.append(idAttr)
else:
stateAttr = feature.attributes()[posState] # on prend le statut pour cette même feature
isFilter = self.isInList(state,filterVal) # on test si la valeur sélectionnée est dans la liste des statuts
if isFilter != False or isFilter > -1: # si c'est le cas, alors on filtre
if stateAttr == state: # on filtre donc sur le statut souhaité pour ne prendre que les features qui ont un statut identique au statut sélectionné
oppResult.append(idAttr) # on ajoutera la feature dans une liste
return oppResult
# return sum of opportunity for each combo whithout duplicate value
listGc = getOppFromLayer(gcLayer, idFromGc, cbOfState, cbO)
listSy = getOppFromLayer(syLayer, idFromSy, cbOfState, cbO)
finalAttr = listGc + listSy
return self.rmDblToCombo(finalAttr,cbO)
def cbStateEl(self, combo):
# get count of cb items and returns the text for the given index in the combobox
cbData = []
for i in range(combo.count()):
cbData.append(combo.itemText(i))
return cbData
def cbUpdate(self,cb,val):
"""Function to parse state combo list and remove state not listed in selected ids"""
attributes = []
cb.clear()
cb.addItem("Select all " + val)# display default message
layerGC = self.getLayerFromCb(self.dlg.comboGC.currentText())
layerSynthese = self.getLayerFromCb(self.dlg.comboSynthese.currentText())
if layerGC != False :
listValuesGc = self.fieldValues(layerGC,val)
if listValuesGc != False :
attributes = attributes + listValuesGc
if layerSynthese != False:
listValuesSynthese = self.fieldValues(layerSynthese,val)
if listValuesSynthese != False:
attributes = attributes + listValuesSynthese # list all opportunity from layers
if len(attributes)>0:
cb.setEnabled(True);
self.rmDblToCombo(attributes,cb)
else :
cb.setEnabled(False)
def createFile(self):
"""create folder to contain report by opportunity"""
listOpp = self.cbStateEl(self.dlg.cbOpp)
layers = [
self.getLayerFromCb(self.dlg.comboGC.currentText()),
self.getLayerFromCb(self.dlg.comboSynthese.currentText())
]
selectOpp = self.dlg.cbOpp.currentText() #get selected value in combo
defaultValue = self.dlg.cbOpp.itemText(0)
if(selectOpp) != defaultValue:
listOpp = [selectOpp]
# use this code if user select all
if len(listOpp)>1:
del(listOpp[0])
for opp in listOpp:
'''create folder'''
folder = self.dlg.pathFolder.text() + "/"+opp
if not os.path.exists(folder):
os.makedirs(folder)
'''copy template'''
template = self.dlg.pathTpl.text()
shutil.copy(template,folder) # copie du template
'''export to csv'''
for layer in layers: # traitement par couche
if layer != False:
docName = False
# create csv file
if "gc" in layer.name() or "GC" in layer.name() or "Gc" in layer.name():
docName = folder+"/gc.csv"
elif "synthese" in layer.name() or "Synthese" in layer.name() or "Synthèse" in layer.name() or "synthèse" in layer.name():
docName = folder+"/synthese.csv"
# control docname is not wrong
if docName != False:
output_file = open(docName,"w")
# get and add fields to csv
fields = layer.pendingFields()
fieldname = [field.name() for field in fields]
lineField = line = ",".join(fieldname) + "\n"
unicode_fields = lineField.encode("utf-8")
output_file.write(unicode_fields)
# filter features to add to csv
features = layer.getFeatures()
for f in features:
# get attribute
attr = [el for el in f.attributes()]
# parse all feature's values
for val in range(len(attr)):
item = attr[val]
if item == opp:
find = self.isInList(val, listOpp)
# if feature is search write in csv
if find != False or find > -1:
line = ",".join(unicode(f[x]) for x in fieldname) + "\n"
unicode_line = line.encode("utf-8")
output_file.write(unicode_line)
output_file.close()
def updateCbId(self,val,combo,st):
"""We begin by activate state combo and load this combo by states values"""
self.cbUpdate(st, "statut")
"""Search Id in given layer's fields name and load fields name in this combo"""
selectLayer = ""
fieldsName = []
idFind = ""
layers = self.iface.legendInterface().layers()
idx = 0
"""Get layer's name selected in combobox and return real layer object from Qgis canvas"""
selectLayer = self.getLayerFromCb(val)
"""From layer parse fields and return field name that contain "id" value """
if combo and val and (selectLayer != False) :
# update id combo
combo.clear()
combo.setEnabled(True)
fieldsName = self.getLayerFields(selectLayer) # get fields name
combo.addItems(fieldsName) # load values in combo id
"""Search first occurency that contain "id" value and define as default index"""
for name in fieldsName:
if ("id" in name) or ("Id" in name) or ("ID" in name) or ("iD" in name): # if field name contain "id" str we set this name index by default combo value
idx = fieldsName.index(name)
break
combo.setCurrentIndex(idx)
else:
"""Restore default combo state"""
combo.clear()
combo.addItem("Select id")
combo.setEnabled(False)
"""Init combo elements"""
def initCb (self, cb, cbId, cbSt):
#load layer list to combobox
self.layersToCombo(cb)
# event on clic
cb.currentIndexChanged.connect(lambda: self.updateCbId(cb.currentText(), cbId, cbSt))
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
""""To connect event to gui elements"""
cbGC = self.dlg.comboGC
cbSynthese = self.dlg.comboSynthese
cbGcId = self.dlg.idGC
cbSyntheseId = self.dlg.idSynthese
cbState = self.dlg.cbState
cbOpp = self.dlg.cbOpp
# init combo
self.initCb(cbGC, cbGcId,cbState)
self.initCb(cbSynthese, cbSyntheseId,cbState)
# buttons
self.dlg.buttonFile.clicked.connect(self.searchFile)
self.dlg.buttonFolder.clicked.connect(self.searchFolder)
'''here we need to load opportunity list wehen user select id field to get opp values'''
for el in [cbGcId, cbSyntheseId, cbState] :
el.currentIndexChanged.connect(lambda: self.oppFiltering(cbGcId, cbSyntheseId, cbGC, cbSynthese, cbState, cbOpp))
self.state = []
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
self.createFile()
# substitute with your code.
pass
| # -*- coding: utf-8 -*-
"""
/***************************************************************************
AdnReport
A QGIS plugin
Prégénérer les fichiers et dossier pour la génération de rapport pour ADN
-------------------
begin : 2018-01-08
git sha : $Format:%H$
copyright : (C) 2018 by gbruel/metis
email : <EMAIL>
***************************************************************************/
/***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************/
"""
from PyQt4.QtCore import QSettings, QTranslator, qVersion, QCoreApplication
from PyQt4.QtGui import QAction, QIcon
from PyQt4 import QtGui, QtCore
import sys
# Initialize Qt resources from file resources.py
import resources
# Import the code for the dialog
from Adn_Report_dialog import AdnReportDialog
from os.path import expanduser
import os.path, csv, time, shutil # specific
class AdnReport:
"""QGIS Plugin Implementation."""
export_result = []
def __init__(self, iface):
"""Constructor.
:param iface: An interface instance that will be passed to this class
which provides the hook by which you can manipulate the QGIS
application at run time.
:type iface: QgsInterface
"""
# Save reference to the QGIS interface
self.iface = iface
# initialize plugin directory
self.plugin_dir = os.path.dirname(__file__)
# initialize locale
locale = QSettings().value('locale/userLocale')[0:2]
locale_path = os.path.join(
self.plugin_dir,
'i18n',
'AdnReport_{}.qm'.format(locale))
if os.path.exists(locale_path):
self.translator = QTranslator()
self.translator.load(locale_path)
if qVersion() > '4.3.3':
QCoreApplication.installTranslator(self.translator)
# Declare instance attributes
self.actions = []
self.menu = self.tr(u'&Rapport ADN')
# TODO: We are going to let the user set this up in a future iteration
self.toolbar = self.iface.addToolBar(u'AdnReport')
self.toolbar.setObjectName(u'AdnReport')
# noinspection PyMethodMayBeStatic
def tr(self, message):
"""Get the translation for a string using Qt translation API.
We implement this ourselves since we do not inherit QObject.
:param message: String for translation.
:type message: str, QString
:returns: Translated version of message.
:rtype: QString
"""
# noinspection PyTypeChecker,PyArgumentList,PyCallByClass
return QCoreApplication.translate('AdnReport', message)
def add_action(
self,
icon_path,
text,
callback,
enabled_flag=True,
add_to_menu=True,
add_to_toolbar=True,
status_tip=None,
whats_this=None,
parent=None):
"""Add a toolbar icon to the toolbar.
:param icon_path: Path to the icon for this action. Can be a resource
path (e.g. ':/plugins/foo/bar.png') or a normal file system path.
:type icon_path: str
:param text: Text that should be shown in menu items for this action.
:type text: str
:param callback: Function to be called when the action is triggered.
:type callback: function
:param enabled_flag: A flag indicating if the action should be enabled
by default. Defaults to True.
:type enabled_flag: bool
:param add_to_menu: Flag indicating whether the action should also
be added to the menu. Defaults to True.
:type add_to_menu: bool
:param add_to_toolbar: Flag indicating whether the action should also
be added to the toolbar. Defaults to True.
:type add_to_toolbar: bool
:param status_tip: Optional text to show in a popup when mouse pointer
hovers over the action.
:type status_tip: str
:param parent: Parent widget for the new action. Defaults None.
:type parent: QWidget
:param whats_this: Optional text to show in the status bar when the
mouse pointer hovers over the action.
:returns: The action that was created. Note that the action is also
added to self.actions list.
:rtype: QAction
"""
# Create the dialog (after translation) and keep reference
self.dlg = AdnReportDialog()
icon = QIcon(icon_path)
action = QAction(icon, text, parent)
action.triggered.connect(callback)
action.setEnabled(enabled_flag)
if status_tip is not None:
action.setStatusTip(status_tip)
if whats_this is not None:
action.setWhatsThis(whats_this)
if add_to_toolbar:
self.toolbar.addAction(action)
if add_to_menu:
self.iface.addPluginToMenu(
self.menu,
action)
self.actions.append(action)
return action
def initGui(self):
"""Create the menu entries and toolbar icons inside the QGIS GUI."""
icon_path = ':/plugins/AdnReport/icon.png'
self.add_action(
icon_path,
text=self.tr(u'Rapports ADN'),
callback=self.run,
parent=self.iface.mainWindow())
def unload(self):
"""Removes the plugin menu item and icon from QGIS GUI."""
for action in self.actions:
self.iface.removePluginMenu(
self.tr(u'&Rapport ADN'),
action)
self.iface.removeToolBarIcon(action)
# remove the toolbar
del self.toolbar
def isInList(self, val, li):
"""Return index of value find in list or -1 if value is not exist in list"""
res = False
if val and li:
try :
res = li.index(val)
except ValueError:
res = False
return res
def rmDblToCombo(self,array,cb):
cb.clear()
cb.addItem("Select all opportunity")
"""Remove dupplicate value from given array and import unic values to given combo"""
cb.setEnabled(True);
t = list(set(array))
clean = []
for elem in t:
typeVar = type(elem).__name__
if typeVar == "unicode" or typeVar == "str":
if cb.findText(elem) < 0:
clean.append(elem)
cb.addItem(elem)
return clean
def searchFile(self):
"""Open window to search template file"""
"""Update text box with path value"""
def test(string, expression):
test = False
if string in expression:
test = True
return test
validFormat = "xls"
file = QtGui.QFileDialog.getOpenFileName(None, 'Open file')
"""Valid file format"""
isValid = test(validFormat, file)
if not isValid or isValid == "" :
file = "Please, select valid file !"
"""Update text box with path value"""
return self.dlg.pathTpl.setText(file)
def searchFolder(self):
"""Method to get path in order to export file to path"""
folder = QtGui.QFileDialog.getExistingDirectory(None, 'Open folder', expanduser('~'))
"""Update text box with path value"""
self.dlg.pathFolder.setText(folder)
def getLayerFromCb(self, cbString):
res = False
layers = self.iface.legendInterface().layers();
for x in layers:
if x.name() == cbString:
res = x
break
return res
def layersToCombo(self, combo):
"""Create array to use map layers"""
layer = ""
layer_list= []
layers = self.iface.legendInterface().layers();
for layer in layers:
if layer.name() and layer.type() == 0:
layer_list.append(layer.name())
combo.addItems(layer_list)
def getLayerFields(self,layer):
fieldsName = []
"""parse layer to get opportunity values"""
fields = layer.dataProvider().fields()
for field in fields:
fieldsName.append(field.name())
return fieldsName
def fieldValues(self, layer, val):
# retourne les valeurs pour un champ donné dans une couche donnée
"""if user select layer in combo, return attributes as list """
res = False
if val != "":
cbList = []
fields = self.getLayerFields(layer) # list of fields
idx = self.isInList(val, fields) # control if field exist in layer
# Correction apply : if index is first, index = int(0). So, python indentify index as False.
if idx != False or idx > -1:
features = layer.getFeatures() # array that contain all attributes values without fields name
for el in features:
cbList.append(el.attributes()[idx])
res = cbList # return list of opportunity states values
return res
def oppFiltering(self, idFromGc, idFromSy, gcLayer, syLayer, cbOfState, cbO):
"""return opportunity according to state value or not"""
finalAttr = []
def getOppFromLayer (layer, cbId, cbSt, cbOp):
oppResult = []
layerRead = self.getLayerFromCb(layer.currentText())
idLayer = cbId.currentText()
state = cbSt.currentText()
defaultValue = cbSt.itemText(0)
if layerRead != False:
cbOp.clear()
self.export_result = {}
filterVal = []
cbOp.addItem("Select all opportunity")
# return list of id for gc layer
layerOpp = self.fieldValues(layerRead, idLayer)
# return all features
layerFeatures = layerRead.getFeatures()
# return all fields
layerFields = self.getLayerFields(layerRead)
# return position of given field in layer fields
posId = self.isInList(idLayer, layerFields) # to get id attributes # bug
posState = self.isInList("statut",layerFields) # si on a bien le champ statut donne alors la position du champ, sinon renvoi false
if posState != False or posState > -1:
filterVal = self.fieldValues(layerRead,"statut")
for feature in layerFeatures: # on regarde toutes les features de la couche
idAttr = feature.attributes()[posId] # on prend la valeur de l'id pour la feature
if state == defaultValue :
oppResult.append(idAttr)
else:
stateAttr = feature.attributes()[posState] # on prend le statut pour cette même feature
isFilter = self.isInList(state,filterVal) # on test si la valeur sélectionnée est dans la liste des statuts
if isFilter != False or isFilter > -1: # si c'est le cas, alors on filtre
if stateAttr == state: # on filtre donc sur le statut souhaité pour ne prendre que les features qui ont un statut identique au statut sélectionné
oppResult.append(idAttr) # on ajoutera la feature dans une liste
return oppResult
# return sum of opportunity for each combo whithout duplicate value
listGc = getOppFromLayer(gcLayer, idFromGc, cbOfState, cbO)
listSy = getOppFromLayer(syLayer, idFromSy, cbOfState, cbO)
finalAttr = listGc + listSy
return self.rmDblToCombo(finalAttr,cbO)
def cbStateEl(self, combo):
# get count of cb items and returns the text for the given index in the combobox
cbData = []
for i in range(combo.count()):
cbData.append(combo.itemText(i))
return cbData
def cbUpdate(self,cb,val):
"""Function to parse state combo list and remove state not listed in selected ids"""
attributes = []
cb.clear()
cb.addItem("Select all " + val)# display default message
layerGC = self.getLayerFromCb(self.dlg.comboGC.currentText())
layerSynthese = self.getLayerFromCb(self.dlg.comboSynthese.currentText())
if layerGC != False :
listValuesGc = self.fieldValues(layerGC,val)
if listValuesGc != False :
attributes = attributes + listValuesGc
if layerSynthese != False:
listValuesSynthese = self.fieldValues(layerSynthese,val)
if listValuesSynthese != False:
attributes = attributes + listValuesSynthese # list all opportunity from layers
if len(attributes)>0:
cb.setEnabled(True);
self.rmDblToCombo(attributes,cb)
else :
cb.setEnabled(False)
def createFile(self):
"""create folder to contain report by opportunity"""
listOpp = self.cbStateEl(self.dlg.cbOpp)
layers = [
self.getLayerFromCb(self.dlg.comboGC.currentText()),
self.getLayerFromCb(self.dlg.comboSynthese.currentText())
]
selectOpp = self.dlg.cbOpp.currentText() #get selected value in combo
defaultValue = self.dlg.cbOpp.itemText(0)
if(selectOpp) != defaultValue:
listOpp = [selectOpp]
# use this code if user select all
if len(listOpp)>1:
del(listOpp[0])
for opp in listOpp:
'''create folder'''
folder = self.dlg.pathFolder.text() + "/"+opp
if not os.path.exists(folder):
os.makedirs(folder)
'''copy template'''
template = self.dlg.pathTpl.text()
shutil.copy(template,folder) # copie du template
'''export to csv'''
for layer in layers: # traitement par couche
if layer != False:
docName = False
# create csv file
if "gc" in layer.name() or "GC" in layer.name() or "Gc" in layer.name():
docName = folder+"/gc.csv"
elif "synthese" in layer.name() or "Synthese" in layer.name() or "Synthèse" in layer.name() or "synthèse" in layer.name():
docName = folder+"/synthese.csv"
# control docname is not wrong
if docName != False:
output_file = open(docName,"w")
# get and add fields to csv
fields = layer.pendingFields()
fieldname = [field.name() for field in fields]
lineField = line = ",".join(fieldname) + "\n"
unicode_fields = lineField.encode("utf-8")
output_file.write(unicode_fields)
# filter features to add to csv
features = layer.getFeatures()
for f in features:
# get attribute
attr = [el for el in f.attributes()]
# parse all feature's values
for val in range(len(attr)):
item = attr[val]
if item == opp:
find = self.isInList(val, listOpp)
# if feature is search write in csv
if find != False or find > -1:
line = ",".join(unicode(f[x]) for x in fieldname) + "\n"
unicode_line = line.encode("utf-8")
output_file.write(unicode_line)
output_file.close()
def updateCbId(self,val,combo,st):
"""We begin by activate state combo and load this combo by states values"""
self.cbUpdate(st, "statut")
"""Search Id in given layer's fields name and load fields name in this combo"""
selectLayer = ""
fieldsName = []
idFind = ""
layers = self.iface.legendInterface().layers()
idx = 0
"""Get layer's name selected in combobox and return real layer object from Qgis canvas"""
selectLayer = self.getLayerFromCb(val)
"""From layer parse fields and return field name that contain "id" value """
if combo and val and (selectLayer != False) :
# update id combo
combo.clear()
combo.setEnabled(True)
fieldsName = self.getLayerFields(selectLayer) # get fields name
combo.addItems(fieldsName) # load values in combo id
"""Search first occurency that contain "id" value and define as default index"""
for name in fieldsName:
if ("id" in name) or ("Id" in name) or ("ID" in name) or ("iD" in name): # if field name contain "id" str we set this name index by default combo value
idx = fieldsName.index(name)
break
combo.setCurrentIndex(idx)
else:
"""Restore default combo state"""
combo.clear()
combo.addItem("Select id")
combo.setEnabled(False)
"""Init combo elements"""
def initCb (self, cb, cbId, cbSt):
#load layer list to combobox
self.layersToCombo(cb)
# event on clic
cb.currentIndexChanged.connect(lambda: self.updateCbId(cb.currentText(), cbId, cbSt))
def run(self):
"""Run method that performs all the real work"""
# show the dialog
self.dlg.show()
""""To connect event to gui elements"""
cbGC = self.dlg.comboGC
cbSynthese = self.dlg.comboSynthese
cbGcId = self.dlg.idGC
cbSyntheseId = self.dlg.idSynthese
cbState = self.dlg.cbState
cbOpp = self.dlg.cbOpp
# init combo
self.initCb(cbGC, cbGcId,cbState)
self.initCb(cbSynthese, cbSyntheseId,cbState)
# buttons
self.dlg.buttonFile.clicked.connect(self.searchFile)
self.dlg.buttonFolder.clicked.connect(self.searchFolder)
'''here we need to load opportunity list wehen user select id field to get opp values'''
for el in [cbGcId, cbSyntheseId, cbState] :
el.currentIndexChanged.connect(lambda: self.oppFiltering(cbGcId, cbSyntheseId, cbGC, cbSynthese, cbState, cbOpp))
self.state = []
# Run the dialog event loop
result = self.dlg.exec_()
# See if OK was pressed
if result:
# Do something useful here - delete the line containing pass and
self.createFile()
# substitute with your code.
pass
| en | 0.514349 | # -*- coding: utf-8 -*- /*************************************************************************** AdnReport A QGIS plugin Prégénérer les fichiers et dossier pour la génération de rapport pour ADN ------------------- begin : 2018-01-08 git sha : $Format:%H$ copyright : (C) 2018 by gbruel/metis email : <EMAIL> ***************************************************************************/ /*************************************************************************** * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * ***************************************************************************/ # Initialize Qt resources from file resources.py # Import the code for the dialog # specific QGIS Plugin Implementation. Constructor. :param iface: An interface instance that will be passed to this class which provides the hook by which you can manipulate the QGIS application at run time. :type iface: QgsInterface # Save reference to the QGIS interface # initialize plugin directory # initialize locale # Declare instance attributes # TODO: We are going to let the user set this up in a future iteration # noinspection PyMethodMayBeStatic Get the translation for a string using Qt translation API. We implement this ourselves since we do not inherit QObject. :param message: String for translation. :type message: str, QString :returns: Translated version of message. :rtype: QString # noinspection PyTypeChecker,PyArgumentList,PyCallByClass Add a toolbar icon to the toolbar. :param icon_path: Path to the icon for this action. Can be a resource path (e.g. ':/plugins/foo/bar.png') or a normal file system path. :type icon_path: str :param text: Text that should be shown in menu items for this action. :type text: str :param callback: Function to be called when the action is triggered. :type callback: function :param enabled_flag: A flag indicating if the action should be enabled by default. Defaults to True. :type enabled_flag: bool :param add_to_menu: Flag indicating whether the action should also be added to the menu. Defaults to True. :type add_to_menu: bool :param add_to_toolbar: Flag indicating whether the action should also be added to the toolbar. Defaults to True. :type add_to_toolbar: bool :param status_tip: Optional text to show in a popup when mouse pointer hovers over the action. :type status_tip: str :param parent: Parent widget for the new action. Defaults None. :type parent: QWidget :param whats_this: Optional text to show in the status bar when the mouse pointer hovers over the action. :returns: The action that was created. Note that the action is also added to self.actions list. :rtype: QAction # Create the dialog (after translation) and keep reference Create the menu entries and toolbar icons inside the QGIS GUI. Removes the plugin menu item and icon from QGIS GUI. # remove the toolbar Return index of value find in list or -1 if value is not exist in list Remove dupplicate value from given array and import unic values to given combo Open window to search template file Update text box with path value Valid file format Update text box with path value Method to get path in order to export file to path Update text box with path value Create array to use map layers parse layer to get opportunity values # retourne les valeurs pour un champ donné dans une couche donnée if user select layer in combo, return attributes as list # list of fields # control if field exist in layer # Correction apply : if index is first, index = int(0). So, python indentify index as False. # array that contain all attributes values without fields name # return list of opportunity states values return opportunity according to state value or not # return list of id for gc layer # return all features # return all fields # return position of given field in layer fields # to get id attributes # bug # si on a bien le champ statut donne alors la position du champ, sinon renvoi false # on regarde toutes les features de la couche # on prend la valeur de l'id pour la feature # on prend le statut pour cette même feature # on test si la valeur sélectionnée est dans la liste des statuts # si c'est le cas, alors on filtre # on filtre donc sur le statut souhaité pour ne prendre que les features qui ont un statut identique au statut sélectionné # on ajoutera la feature dans une liste # return sum of opportunity for each combo whithout duplicate value # get count of cb items and returns the text for the given index in the combobox Function to parse state combo list and remove state not listed in selected ids # display default message # list all opportunity from layers create folder to contain report by opportunity #get selected value in combo # use this code if user select all create folder copy template # copie du template export to csv # traitement par couche # create csv file # control docname is not wrong # get and add fields to csv # filter features to add to csv # get attribute # parse all feature's values # if feature is search write in csv We begin by activate state combo and load this combo by states values Search Id in given layer's fields name and load fields name in this combo Get layer's name selected in combobox and return real layer object from Qgis canvas From layer parse fields and return field name that contain "id" value # update id combo # get fields name # load values in combo id Search first occurency that contain "id" value and define as default index # if field name contain "id" str we set this name index by default combo value Restore default combo state Init combo elements #load layer list to combobox # event on clic Run method that performs all the real work # show the dialog "To connect event to gui elements # init combo # buttons here we need to load opportunity list wehen user select id field to get opp values # Run the dialog event loop # See if OK was pressed # Do something useful here - delete the line containing pass and # substitute with your code. | 1.569821 | 2 |
src/py42/sdk/queries/__init__.py | code42/py42 | 21 | 6623789 | from py42 import settings
from py42.sdk.queries.query_filter import FilterGroup
class BaseQuery:
def __init__(self, *args, **kwargs):
self._filter_group_list = list(args)
self._group_clause = kwargs.get("group_clause", "AND")
self.page_number = kwargs.get("page_number") or 1
self.page_size = kwargs.get("page_size") or settings.security_events_per_page
self.page_token = kwargs.get("page_token") or None
self.sort_direction = "asc"
# Override
self.sort_key = None
@classmethod
def from_dict(cls, _dict, group_clause="AND", **kwargs):
filter_groups = [FilterGroup.from_dict(item) for item in _dict["groups"]]
return cls(*filter_groups, group_clause=group_clause, **kwargs)
@classmethod
def any(cls, *args):
return cls(*args, group_clause="OR")
@classmethod
def all(cls, *args):
return cls(*args)
| from py42 import settings
from py42.sdk.queries.query_filter import FilterGroup
class BaseQuery:
def __init__(self, *args, **kwargs):
self._filter_group_list = list(args)
self._group_clause = kwargs.get("group_clause", "AND")
self.page_number = kwargs.get("page_number") or 1
self.page_size = kwargs.get("page_size") or settings.security_events_per_page
self.page_token = kwargs.get("page_token") or None
self.sort_direction = "asc"
# Override
self.sort_key = None
@classmethod
def from_dict(cls, _dict, group_clause="AND", **kwargs):
filter_groups = [FilterGroup.from_dict(item) for item in _dict["groups"]]
return cls(*filter_groups, group_clause=group_clause, **kwargs)
@classmethod
def any(cls, *args):
return cls(*args, group_clause="OR")
@classmethod
def all(cls, *args):
return cls(*args)
| en | 0.394336 | # Override | 2.428676 | 2 |
qcportal/records/optimization/__init__.py | bennybp/QCPortal | 0 | 6623790 | from .models import (
OptimizationRecord,
OptimizationProtocols,
OptimizationSpecification,
OptimizationInputSpecification,
OptimizationQCInputSpecification,
OptimizationQueryBody,
OptimizationAddBody,
)
| from .models import (
OptimizationRecord,
OptimizationProtocols,
OptimizationSpecification,
OptimizationInputSpecification,
OptimizationQCInputSpecification,
OptimizationQueryBody,
OptimizationAddBody,
)
| none | 1 | 1.054661 | 1 | |
Metaheuristics/BRKGA/CONFIGURATION.py | presmerats/Nurse-Scheduling-LP-and-Heuristics | 1 | 6623791 | config = {'chromosomeLength': 30,
'numIndividuals': 50,
'a' : 3,
'maxNumGen':20,
'eliteProp':0.3,
'mutantProp':0.15,
'inheritanceProb':0.8}
| config = {'chromosomeLength': 30,
'numIndividuals': 50,
'a' : 3,
'maxNumGen':20,
'eliteProp':0.3,
'mutantProp':0.15,
'inheritanceProb':0.8}
| none | 1 | 1.076549 | 1 | |
2019/day3-1.py | PaulWichser/adventofcode | 0 | 6623792 | #Solution for https://adventofcode.com/2019/day/3
def wireimp(filename):
with open(filename,'r') as file:
wires = {}
x=1
for line in file:
line = line.rstrip('\n')
list = line.split(',')
wires['wire%i' % x] = list
# print(len(wires))
x += 1
print("Imported wire dictionary of length %i" % len(wires))
return wires
def cartwire(list):
outlist = []
coords = [0,0]
for x in range(len(list)):
#convert strings to cartesian coords
dir = list[x][0]
list[x] = int(list[x].replace(dir,''))
for i in range(list[x]):
if dir == 'R':
coords[0] = coords[0]+1
elif dir == 'L':
coords[0] = coords[0]-1
elif dir == 'U':
coords[1] = coords[1]+1
elif dir == 'D':
coords[1] = coords[1]-1
else:
print('Unexpected direction of %s' % dir)
quit()
# print(coords)
outlist.append(coords.copy())
# print(outlist)
# print(outlist)
return outlist
def closecross(list1,list2):
crosses = []
length = len(list1)*len(list2)
counter = 0
print('Checking %i possibilities for crossed wires' % length)
for i in range(len(list1)):
for j in range(len(list2)):
if list1[i] == list2[j]:
crosses.append(list1[i])
counter +=1
if not (counter%10000000):
print('%i' % ((counter/length)*100))
for i in range(len(crosses)):
crosses[i] = abs(crosses[i][0]) + abs(crosses[i][1])
print(crosses)
return min(crosses)
def test(filename,ans):
testdict = wireimp(filename)
if closecross(cartwire(testdict['wire1']),cartwire(testdict['wire2'])) == ans:
print('Test cross check successful!')
else:
print('Test cross check failure!')
quit()
test('day3-1test2.txt',159)
test('day3-1test.txt',135)
wiredict = wireimp('day3-1input.txt')
print(closecross(cartwire(wiredict['wire1']),cartwire(wiredict['wire2'])))
| #Solution for https://adventofcode.com/2019/day/3
def wireimp(filename):
with open(filename,'r') as file:
wires = {}
x=1
for line in file:
line = line.rstrip('\n')
list = line.split(',')
wires['wire%i' % x] = list
# print(len(wires))
x += 1
print("Imported wire dictionary of length %i" % len(wires))
return wires
def cartwire(list):
outlist = []
coords = [0,0]
for x in range(len(list)):
#convert strings to cartesian coords
dir = list[x][0]
list[x] = int(list[x].replace(dir,''))
for i in range(list[x]):
if dir == 'R':
coords[0] = coords[0]+1
elif dir == 'L':
coords[0] = coords[0]-1
elif dir == 'U':
coords[1] = coords[1]+1
elif dir == 'D':
coords[1] = coords[1]-1
else:
print('Unexpected direction of %s' % dir)
quit()
# print(coords)
outlist.append(coords.copy())
# print(outlist)
# print(outlist)
return outlist
def closecross(list1,list2):
crosses = []
length = len(list1)*len(list2)
counter = 0
print('Checking %i possibilities for crossed wires' % length)
for i in range(len(list1)):
for j in range(len(list2)):
if list1[i] == list2[j]:
crosses.append(list1[i])
counter +=1
if not (counter%10000000):
print('%i' % ((counter/length)*100))
for i in range(len(crosses)):
crosses[i] = abs(crosses[i][0]) + abs(crosses[i][1])
print(crosses)
return min(crosses)
def test(filename,ans):
testdict = wireimp(filename)
if closecross(cartwire(testdict['wire1']),cartwire(testdict['wire2'])) == ans:
print('Test cross check successful!')
else:
print('Test cross check failure!')
quit()
test('day3-1test2.txt',159)
test('day3-1test.txt',135)
wiredict = wireimp('day3-1input.txt')
print(closecross(cartwire(wiredict['wire1']),cartwire(wiredict['wire2'])))
| en | 0.657251 | #Solution for https://adventofcode.com/2019/day/3 # print(len(wires)) #convert strings to cartesian coords # print(coords) # print(outlist) # print(outlist) | 3.445399 | 3 |
basics/requests/myHttpServer.py | lostFox/autoRunSomething | 0 | 6623793 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'james'
import web
urls = (
'/', 'index'
)
app = web.application(urls, globals())
class index:
def GET(self):
return "Hello, world!"
if __name__ == "__main__": app.run()
| #! /usr/bin/env python
# -*- coding: UTF-8 -*-
__author__ = 'james'
import web
urls = (
'/', 'index'
)
app = web.application(urls, globals())
class index:
def GET(self):
return "Hello, world!"
if __name__ == "__main__": app.run()
| fr | 0.153583 | #! /usr/bin/env python # -*- coding: UTF-8 -*- | 2.614636 | 3 |
games/game_snake/snake.py | sdenisen/test | 0 | 6623794 | <reponame>sdenisen/test<filename>games/game_snake/snake.py
import random
import time
__author__ = 'sdeni'
from tkinter import Frame, Canvas, Tk
from tkinter.constants import NW, ALL
import ImageTk
from PIL import Image
class Const:
BOARD_WIDTH = 600
BOARD_HEIGHT = 600
DOT_SIZE = 20
DELAY = 300
KEY_PORTAL = "g"
KEY_DOWN = "Down"
KEY_UP = "Up"
KEY_RIGHT = "Right"
KEY_LEFT = "Left"
class Board(Canvas):
def __init__(self):
super().__init__(width=Const.BOARD_WIDTH, height=Const.BOARD_HEIGHT, background="black", highlightthickness=0)
self.init()
def init(self):
# load images,
self.loadImages()
# init constants/variables
self.inGame = True
self.dots = 3
self.score = 0
self.is_in_portal = False
# init start positions of snake/apple
self.moveX = Const.DOT_SIZE
self.moveY = 0
self.appleX = 10*Const.DOT_SIZE
self.appleY = 5*Const.DOT_SIZE
# create objects
self.createObjects()
# init key events
self.bind_all("<Key>", self.readKeysEvent)
self.refreshFrame()
def refreshFrame(self):
# check collisions with border and himself
self.inGame = self.checkCollisions()
col_apple = self.checkAppleCollision()
if self.inGame:
if col_apple:
self.increaseSnake()
self.generateNewApple()
self.showNewScore()
self.moveSnake()
self.after(Const.DELAY, self.refreshFrame)
else:
self.showGaveOver()
def loadImages(self):
iapple = Image.open("icons/apple.jpg")
self.apple = ImageTk.PhotoImage(iapple)
ihead = Image.open("icons/snake_head.jpg")
self.head = ImageTk.PhotoImage(ihead)
idot = Image.open("icons/snake_dot.jpg")
self.dot = ImageTk.PhotoImage(idot)
iportal_input = Image.open("icons/portal_input.jpg")
self.portal_input = ImageTk.PhotoImage(iportal_input)
iportal_exit = Image.open("icons/portal_exit.jpg")
self.portal_exit = ImageTk.PhotoImage(iportal_exit)
def createObjects(self):
self.create_text(30, 10, text="Score: {0}".format(self.score), tag="score", fill="white")
self.create_image(self.appleX, self.appleY, image=self.apple, anchor=NW, tag="apple")
self.create_image(100, 50, image=self.head, anchor=NW, tag="head")
self.create_image(80, 50, image=self.dot, anchor=NW, tag="dot")
self.create_image(60, 50, image=self.dot, anchor=NW, tag="dot")
def moveSnake(self):
head = self.find_withtag("head")
dots = self.find_withtag("dot")
items = dots + head
for z in range(len(items)-1):
c1_x, c1_y = self.coords(items[z])
c2_x, c2_y = self.coords(items[z+1])
self.move(items[z], c2_x-c1_x, c2_y-c1_y )
self.move(head, self.moveX, self.moveY)
if self.checkPortalCollisions():
p_input = self.find_withtag("portal_input")
p_output = self.find_withtag("portal_exit")[0]
x1, y1, x2, y2 = self.bbox(p_input)
overlapping_items = self.find_overlapping(x1, y1, x2, y2)
if len(overlapping_items) == 2:
overlapping = list(set(overlapping_items) - set(p_input))
ox, oy = self.coords(p_output)
ov_x, ov_y = self.coords(overlapping[0])
self.move(overlapping[0], ox-ov_x, oy-ov_y)
ov_x, ov_y = self.coords(overlapping[0])
else:
print (overlapping_items)
raise Exception
def readKeysEvent(self, e):
print (e.keysym)
if e.keysym == Const.KEY_DOWN:
self.moveX = 0
self.moveY = Const.DOT_SIZE
elif e.keysym == Const.KEY_UP:
self.moveX = 0
self.moveY = -1*Const.DOT_SIZE
elif e.keysym == Const.KEY_LEFT:
self.moveY = 0
self.moveX = -1 * Const.DOT_SIZE
elif e.keysym == Const.KEY_RIGHT:
self.moveY = 0
self.moveX = Const.DOT_SIZE
elif e.keysym == Const.KEY_PORTAL:
if self.is_in_portal:
self.removePortal()
self.is_in_portal = False
else:
self.setPortal()
self.is_in_portal = True
def removePortal(self):
portal = self.find_withtag("portal_input")
self.delete(portal[0])
portal = self.find_withtag("portal_exit")
self.delete(portal[0])
def setPortal(self):
head = self.find_withtag("head")
exit_x = random.randint(100, 500)
exit_y = random.randint(100, 500)
head_x, head_y = self.coords(head)
self.create_image(head_x + 3*self.moveX, head_y +3* self.moveY, image=self.portal_input, anchor=NW, tag="portal_input")
self.create_image(exit_x, exit_y, image=self.portal_exit, anchor=NW, tag="portal_exit")
def checkPortalCollisions(self):
if not self.find_withtag("portal_input") or not self.find_withtag("portal_exit"):
return False
head = self.find_withtag("head")[0]
dots = self.find_withtag("dot")
portal_input = self.find_withtag("portal_input")[0]
x1, y1, x2, y2 = self.bbox(portal_input)
overlapping_items = self.find_overlapping(x1, y1, x2, y2)
if head in overlapping_items:
return True
for dot in dots:
if dot in overlapping_items:
return True
return False
def checkCollisions(self):
dots = self.find_withtag("dot")
head = self.find_withtag("head")
items = dots + head
x1, y1, x2, y2 = self.bbox(head[0])
overlapping_items = self.find_overlapping(x1, y1, x2, y2)
if set(items) - set(overlapping_items)< set(dots):
return False
if (x1<0 or x2 >= Const.BOARD_WIDTH + Const.DOT_SIZE) or (y1<0 or y2>Const.BOARD_HEIGHT + Const.DOT_SIZE):
return False
return True
def showGaveOver(self):
self.delete(ALL)
self.create_text(Const.BOARD_WIDTH/2, Const.BOARD_HEIGHT/2, text="GAME OVER! With Score: {0}".format(self.score), fill="red")
def checkAppleCollision(self):
head = self.find_withtag("head")[0]
apple = self.find_withtag("apple")[0]
x1, y1, x2, y2 = self.bbox(head)
overlapping_items = self.find_overlapping(x1, y1, x2, y2)
if apple in overlapping_items:
return True
return False
def increaseSnake(self):
dots = self.find_withtag("dot")
last = dots[-1:]
prev_last = dots[-2:-1]
x1, y1 = self.coords(last)
x2, y2 = self.coords(prev_last)
delta_x = x2-x1
delta_y = y2-y1
self.create_image(x1+delta_x, y1+delta_y, image=self.dot, anchor=NW, tag="dot")
self.score +=1
def generateNewApple(self):
apple = self.find_withtag("apple")
self.delete(apple[0])
x = random.randint(0, Const.BOARD_WIDTH%Const.DOT_SIZE)
y = random.randint(0, Const.BOARD_HEIGHT%Const.DOT_SIZE)
self.create_image(x * Const.DOT_SIZE, y * Const.DOT_SIZE, image=self.apple, anchor=NW, tag="apple")
def showNewScore(self):
score = self.find_withtag("score")
self.delete(score[0])
self.create_text(30, 10, text="Score: {0}".format(self.score), tag="score", fill="white")
class Snake(Frame):
def __init__(self):
super().__init__()
self.master.title("Snake")
self.board = Board()
self.board.pack()
def main():
root = Tk()
Snake()
root.mainloop()
if __name__ == "__main__":
main()
| import random
import time
__author__ = 'sdeni'
from tkinter import Frame, Canvas, Tk
from tkinter.constants import NW, ALL
import ImageTk
from PIL import Image
class Const:
BOARD_WIDTH = 600
BOARD_HEIGHT = 600
DOT_SIZE = 20
DELAY = 300
KEY_PORTAL = "g"
KEY_DOWN = "Down"
KEY_UP = "Up"
KEY_RIGHT = "Right"
KEY_LEFT = "Left"
class Board(Canvas):
def __init__(self):
super().__init__(width=Const.BOARD_WIDTH, height=Const.BOARD_HEIGHT, background="black", highlightthickness=0)
self.init()
def init(self):
# load images,
self.loadImages()
# init constants/variables
self.inGame = True
self.dots = 3
self.score = 0
self.is_in_portal = False
# init start positions of snake/apple
self.moveX = Const.DOT_SIZE
self.moveY = 0
self.appleX = 10*Const.DOT_SIZE
self.appleY = 5*Const.DOT_SIZE
# create objects
self.createObjects()
# init key events
self.bind_all("<Key>", self.readKeysEvent)
self.refreshFrame()
def refreshFrame(self):
# check collisions with border and himself
self.inGame = self.checkCollisions()
col_apple = self.checkAppleCollision()
if self.inGame:
if col_apple:
self.increaseSnake()
self.generateNewApple()
self.showNewScore()
self.moveSnake()
self.after(Const.DELAY, self.refreshFrame)
else:
self.showGaveOver()
def loadImages(self):
iapple = Image.open("icons/apple.jpg")
self.apple = ImageTk.PhotoImage(iapple)
ihead = Image.open("icons/snake_head.jpg")
self.head = ImageTk.PhotoImage(ihead)
idot = Image.open("icons/snake_dot.jpg")
self.dot = ImageTk.PhotoImage(idot)
iportal_input = Image.open("icons/portal_input.jpg")
self.portal_input = ImageTk.PhotoImage(iportal_input)
iportal_exit = Image.open("icons/portal_exit.jpg")
self.portal_exit = ImageTk.PhotoImage(iportal_exit)
def createObjects(self):
self.create_text(30, 10, text="Score: {0}".format(self.score), tag="score", fill="white")
self.create_image(self.appleX, self.appleY, image=self.apple, anchor=NW, tag="apple")
self.create_image(100, 50, image=self.head, anchor=NW, tag="head")
self.create_image(80, 50, image=self.dot, anchor=NW, tag="dot")
self.create_image(60, 50, image=self.dot, anchor=NW, tag="dot")
def moveSnake(self):
head = self.find_withtag("head")
dots = self.find_withtag("dot")
items = dots + head
for z in range(len(items)-1):
c1_x, c1_y = self.coords(items[z])
c2_x, c2_y = self.coords(items[z+1])
self.move(items[z], c2_x-c1_x, c2_y-c1_y )
self.move(head, self.moveX, self.moveY)
if self.checkPortalCollisions():
p_input = self.find_withtag("portal_input")
p_output = self.find_withtag("portal_exit")[0]
x1, y1, x2, y2 = self.bbox(p_input)
overlapping_items = self.find_overlapping(x1, y1, x2, y2)
if len(overlapping_items) == 2:
overlapping = list(set(overlapping_items) - set(p_input))
ox, oy = self.coords(p_output)
ov_x, ov_y = self.coords(overlapping[0])
self.move(overlapping[0], ox-ov_x, oy-ov_y)
ov_x, ov_y = self.coords(overlapping[0])
else:
print (overlapping_items)
raise Exception
def readKeysEvent(self, e):
print (e.keysym)
if e.keysym == Const.KEY_DOWN:
self.moveX = 0
self.moveY = Const.DOT_SIZE
elif e.keysym == Const.KEY_UP:
self.moveX = 0
self.moveY = -1*Const.DOT_SIZE
elif e.keysym == Const.KEY_LEFT:
self.moveY = 0
self.moveX = -1 * Const.DOT_SIZE
elif e.keysym == Const.KEY_RIGHT:
self.moveY = 0
self.moveX = Const.DOT_SIZE
elif e.keysym == Const.KEY_PORTAL:
if self.is_in_portal:
self.removePortal()
self.is_in_portal = False
else:
self.setPortal()
self.is_in_portal = True
def removePortal(self):
portal = self.find_withtag("portal_input")
self.delete(portal[0])
portal = self.find_withtag("portal_exit")
self.delete(portal[0])
def setPortal(self):
head = self.find_withtag("head")
exit_x = random.randint(100, 500)
exit_y = random.randint(100, 500)
head_x, head_y = self.coords(head)
self.create_image(head_x + 3*self.moveX, head_y +3* self.moveY, image=self.portal_input, anchor=NW, tag="portal_input")
self.create_image(exit_x, exit_y, image=self.portal_exit, anchor=NW, tag="portal_exit")
def checkPortalCollisions(self):
if not self.find_withtag("portal_input") or not self.find_withtag("portal_exit"):
return False
head = self.find_withtag("head")[0]
dots = self.find_withtag("dot")
portal_input = self.find_withtag("portal_input")[0]
x1, y1, x2, y2 = self.bbox(portal_input)
overlapping_items = self.find_overlapping(x1, y1, x2, y2)
if head in overlapping_items:
return True
for dot in dots:
if dot in overlapping_items:
return True
return False
def checkCollisions(self):
dots = self.find_withtag("dot")
head = self.find_withtag("head")
items = dots + head
x1, y1, x2, y2 = self.bbox(head[0])
overlapping_items = self.find_overlapping(x1, y1, x2, y2)
if set(items) - set(overlapping_items)< set(dots):
return False
if (x1<0 or x2 >= Const.BOARD_WIDTH + Const.DOT_SIZE) or (y1<0 or y2>Const.BOARD_HEIGHT + Const.DOT_SIZE):
return False
return True
def showGaveOver(self):
self.delete(ALL)
self.create_text(Const.BOARD_WIDTH/2, Const.BOARD_HEIGHT/2, text="GAME OVER! With Score: {0}".format(self.score), fill="red")
def checkAppleCollision(self):
head = self.find_withtag("head")[0]
apple = self.find_withtag("apple")[0]
x1, y1, x2, y2 = self.bbox(head)
overlapping_items = self.find_overlapping(x1, y1, x2, y2)
if apple in overlapping_items:
return True
return False
def increaseSnake(self):
dots = self.find_withtag("dot")
last = dots[-1:]
prev_last = dots[-2:-1]
x1, y1 = self.coords(last)
x2, y2 = self.coords(prev_last)
delta_x = x2-x1
delta_y = y2-y1
self.create_image(x1+delta_x, y1+delta_y, image=self.dot, anchor=NW, tag="dot")
self.score +=1
def generateNewApple(self):
apple = self.find_withtag("apple")
self.delete(apple[0])
x = random.randint(0, Const.BOARD_WIDTH%Const.DOT_SIZE)
y = random.randint(0, Const.BOARD_HEIGHT%Const.DOT_SIZE)
self.create_image(x * Const.DOT_SIZE, y * Const.DOT_SIZE, image=self.apple, anchor=NW, tag="apple")
def showNewScore(self):
score = self.find_withtag("score")
self.delete(score[0])
self.create_text(30, 10, text="Score: {0}".format(self.score), tag="score", fill="white")
class Snake(Frame):
def __init__(self):
super().__init__()
self.master.title("Snake")
self.board = Board()
self.board.pack()
def main():
root = Tk()
Snake()
root.mainloop()
if __name__ == "__main__":
main() | en | 0.730252 | # load images, # init constants/variables # init start positions of snake/apple # create objects # init key events # check collisions with border and himself | 2.715643 | 3 |
login_website/urls.py | sukumar1612/movie_stream | 0 | 6623795 | <filename>login_website/urls.py
from django.conf.urls import url
from django.urls import path,re_path
from login_website import views
app_name = 'login_website'
urlpatterns=[
path('user_login/',views.user_login,name='user_login'),
path('register/',views.register,name='register'),
path('user_logout/',views.user_logout,name='user_logout')
]
| <filename>login_website/urls.py
from django.conf.urls import url
from django.urls import path,re_path
from login_website import views
app_name = 'login_website'
urlpatterns=[
path('user_login/',views.user_login,name='user_login'),
path('register/',views.register,name='register'),
path('user_logout/',views.user_logout,name='user_logout')
]
| none | 1 | 1.994382 | 2 | |
survey/adapter.py | afranck64/ultimatum | 0 | 6623796 | """
Adapter
Transform available request args into known internal value
"""
from urllib.parse import urlparse, parse_qs
from collections import defaultdict
from flask import request, current_app as app
from survey.mturk import MTurk
class BaseAdapter(object):
def get_job_id(self):
raise NotImplementedError
def get_worker_id(self):
raise NotImplementedError
def get_assignment_id(self):
raise NotImplementedError
def get_submit_to_URL(self):
raise NotImplementedError
def get_submit_to_kwargs(self, **kwargs):
raise NotImplementedError
def is_preview(self):
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
@classmethod
def from_dict(cls, dict_obj):
raise NotImplementedError
def get_api(self, sandbox=None):
raise NotImplementedError
@classmethod
def has_api(cls):
raise NotImplementedError
class DefaultAdapter(BaseAdapter):
def __init__(self):
self.job_id = request.args.get("job_id", "").strip()
self.worker_id = request.args.get("worker_id", "").strip()
self.assignment_id = request.args.get("assignment_id", "").strip()
self.submit_to_URL = request.args.get("submit_to_URL")
self.preview = request.args.get("preview") in {"1", "true"} or self.job_id in ("", "na")
if self.preview:
self.worker_id = "na"
self.job_id = "na"
self.submit_to_kwargs = {
"job_id": self.job_id,
"worker_id": self.worker_id,
"assignment_id": self.assignment_id
}
def get_job_id(self):
return self.job_id
def get_worker_id(self):
return self.worker_id
def get_assignment_id(self):
return self.assignment_id
def get_submit_to_URL(self):
return self.submit_to_URL
def get_submit_to_kwargs(self):
return self.submit_to_kwargs
def is_preview(self):
return self.preview
def to_dict(self):
obj_dict = dict(self.__dict__)
obj_dict["_adapter"] = None
return obj_dict
@classmethod
def has_api(cls):
return False
@classmethod
def from_dict(cls, dict_obj):
adapter_key = dict_obj.get("_adapter")
adapter_cls = ADAPTERS[adapter_key]
adapter = adapter_cls()
adapter.__dict__.update(dict_obj)
return adapter
class MTurkAdapter(DefaultAdapter):
def __init__(self):
referrer = request.headers.get("Referer")
args_source = request.args
app.logger.debug(f"adapter: referrer={referrer}")
app.logger.debug(f"Mturk request.args: {request.args}")
if referrer and "workerId" in referrer:
parsed_url = urlparse(referrer)
query = parse_qs(parsed_url.query)
query_flat = {k:v[0] for k,v in query.items()}
args_source = query_flat
self.job_id = args_source.get("hitId", "").strip()
self.worker_id = args_source.get("workerId", "").strip()
self.assignment_id = args_source.get("assignmentId", "NA").strip()
self.submit_to_URL = args_source.get("turkSubmitTo")
self.preview = args_source.get("assignmentId") == "ASSIGNMENT_ID_NOT_AVAILABLE"
if self.preview:
self.worker_id = "na"
self.job_id = "na"
self.submit_to_kwargs = {
"assignmentId": args_source.get("assignmentId")
}
def to_dict(self):
obj_dict = dict(self.__dict__)
obj_dict["_adapter"] = "mturk"
return obj_dict
def get_api(self, sandbox=None):
if sandbox is None:
sandbox = app.config.get("MTURK_SANDBOX")
return MTurk(self.get_job_id(), sandbox=sandbox)
@classmethod
def has_api(cls):
return True
ADAPTERS = defaultdict(
lambda: DefaultAdapter,
mturk= MTurkAdapter,
)
def get_adapter() -> BaseAdapter:
app.logger.debug("get_adapter")
adapter_key = request.args.get("adapter")
adapter_cls = ADAPTERS[adapter_key]
app.logger.debug(f"get_adapter: {adapter_cls.__name__}")
return adapter_cls()
def get_adapter_from_dict(dict_obj) -> BaseAdapter:
adapter_key = dict_obj.get("_adapter")
adapter_cls = ADAPTERS[adapter_key]
adapter = adapter_cls()
adapter.__dict__.update(dict_obj)
return adapter
| """
Adapter
Transform available request args into known internal value
"""
from urllib.parse import urlparse, parse_qs
from collections import defaultdict
from flask import request, current_app as app
from survey.mturk import MTurk
class BaseAdapter(object):
def get_job_id(self):
raise NotImplementedError
def get_worker_id(self):
raise NotImplementedError
def get_assignment_id(self):
raise NotImplementedError
def get_submit_to_URL(self):
raise NotImplementedError
def get_submit_to_kwargs(self, **kwargs):
raise NotImplementedError
def is_preview(self):
raise NotImplementedError
def to_dict(self):
raise NotImplementedError
@classmethod
def from_dict(cls, dict_obj):
raise NotImplementedError
def get_api(self, sandbox=None):
raise NotImplementedError
@classmethod
def has_api(cls):
raise NotImplementedError
class DefaultAdapter(BaseAdapter):
def __init__(self):
self.job_id = request.args.get("job_id", "").strip()
self.worker_id = request.args.get("worker_id", "").strip()
self.assignment_id = request.args.get("assignment_id", "").strip()
self.submit_to_URL = request.args.get("submit_to_URL")
self.preview = request.args.get("preview") in {"1", "true"} or self.job_id in ("", "na")
if self.preview:
self.worker_id = "na"
self.job_id = "na"
self.submit_to_kwargs = {
"job_id": self.job_id,
"worker_id": self.worker_id,
"assignment_id": self.assignment_id
}
def get_job_id(self):
return self.job_id
def get_worker_id(self):
return self.worker_id
def get_assignment_id(self):
return self.assignment_id
def get_submit_to_URL(self):
return self.submit_to_URL
def get_submit_to_kwargs(self):
return self.submit_to_kwargs
def is_preview(self):
return self.preview
def to_dict(self):
obj_dict = dict(self.__dict__)
obj_dict["_adapter"] = None
return obj_dict
@classmethod
def has_api(cls):
return False
@classmethod
def from_dict(cls, dict_obj):
adapter_key = dict_obj.get("_adapter")
adapter_cls = ADAPTERS[adapter_key]
adapter = adapter_cls()
adapter.__dict__.update(dict_obj)
return adapter
class MTurkAdapter(DefaultAdapter):
def __init__(self):
referrer = request.headers.get("Referer")
args_source = request.args
app.logger.debug(f"adapter: referrer={referrer}")
app.logger.debug(f"Mturk request.args: {request.args}")
if referrer and "workerId" in referrer:
parsed_url = urlparse(referrer)
query = parse_qs(parsed_url.query)
query_flat = {k:v[0] for k,v in query.items()}
args_source = query_flat
self.job_id = args_source.get("hitId", "").strip()
self.worker_id = args_source.get("workerId", "").strip()
self.assignment_id = args_source.get("assignmentId", "NA").strip()
self.submit_to_URL = args_source.get("turkSubmitTo")
self.preview = args_source.get("assignmentId") == "ASSIGNMENT_ID_NOT_AVAILABLE"
if self.preview:
self.worker_id = "na"
self.job_id = "na"
self.submit_to_kwargs = {
"assignmentId": args_source.get("assignmentId")
}
def to_dict(self):
obj_dict = dict(self.__dict__)
obj_dict["_adapter"] = "mturk"
return obj_dict
def get_api(self, sandbox=None):
if sandbox is None:
sandbox = app.config.get("MTURK_SANDBOX")
return MTurk(self.get_job_id(), sandbox=sandbox)
@classmethod
def has_api(cls):
return True
ADAPTERS = defaultdict(
lambda: DefaultAdapter,
mturk= MTurkAdapter,
)
def get_adapter() -> BaseAdapter:
app.logger.debug("get_adapter")
adapter_key = request.args.get("adapter")
adapter_cls = ADAPTERS[adapter_key]
app.logger.debug(f"get_adapter: {adapter_cls.__name__}")
return adapter_cls()
def get_adapter_from_dict(dict_obj) -> BaseAdapter:
adapter_key = dict_obj.get("_adapter")
adapter_cls = ADAPTERS[adapter_key]
adapter = adapter_cls()
adapter.__dict__.update(dict_obj)
return adapter
| en | 0.80395 | Adapter Transform available request args into known internal value | 2.587512 | 3 |
map_gen_2/util/vector_util.py | hamracer/Map-Generator | 9 | 6623797 | <reponame>hamracer/Map-Generator
import math
def angle(a, b):
cos_theta = dot_prod(a, b) / (length(a) * length(b))
if cos_theta > 1:
cos_theta = 1
if cos_theta < -1:
cos_theta = -1
return math.acos(cos_theta)
def dot_prod(a, b):
return a[0] * b[0] + a[1] * b[1]
def get_unit_perp(a):
m_a = math.sqrt(a[0] ** 2 + a[1] ** 2)
if m_a > 0:
return [a[1] / m_a, -a[0] / m_a]
def length(vector):
return math.sqrt(vector[0] ** 2 + vector[1] ** 2)
def dist(p1, p2):
return length([p2[0] - p1[0], p2[1] - p1[1]])
def full_angle(a, b):
if dot_prod(a, b) < 0:
return math.pi - angle(a, b)
else:
return angle(a, b)
def split_line(p1, p2, rand, mag_fact=0.25):
mid_p = [0, 0]
mid_p[0] = (p1[0] + p2[0]) / 2
mid_p[1] = (p1[1] + p2[1]) / 2
mid_vect = [mid_p[0] - p1[0], mid_p[1] - p1[1]]
perp_vect = get_unit_perp(mid_vect)
split_p = [0, 0]
rand_fact = rand.random() - 0.5
distance = dist(p1, p2)
split_p[0] = mid_p[0] + rand_fact * mag_fact * distance * perp_vect[0]
split_p[1] = mid_p[1] + rand_fact * mag_fact * distance * perp_vect[1]
return split_p
def subtract(a, b):
""" Subtracts b from a. """
return [a[0] - b[0], a[1] - b[1]]
def add(a, b):
return [a[0] + b[0], a[1] + b[1]]
| import math
def angle(a, b):
cos_theta = dot_prod(a, b) / (length(a) * length(b))
if cos_theta > 1:
cos_theta = 1
if cos_theta < -1:
cos_theta = -1
return math.acos(cos_theta)
def dot_prod(a, b):
return a[0] * b[0] + a[1] * b[1]
def get_unit_perp(a):
m_a = math.sqrt(a[0] ** 2 + a[1] ** 2)
if m_a > 0:
return [a[1] / m_a, -a[0] / m_a]
def length(vector):
return math.sqrt(vector[0] ** 2 + vector[1] ** 2)
def dist(p1, p2):
return length([p2[0] - p1[0], p2[1] - p1[1]])
def full_angle(a, b):
if dot_prod(a, b) < 0:
return math.pi - angle(a, b)
else:
return angle(a, b)
def split_line(p1, p2, rand, mag_fact=0.25):
mid_p = [0, 0]
mid_p[0] = (p1[0] + p2[0]) / 2
mid_p[1] = (p1[1] + p2[1]) / 2
mid_vect = [mid_p[0] - p1[0], mid_p[1] - p1[1]]
perp_vect = get_unit_perp(mid_vect)
split_p = [0, 0]
rand_fact = rand.random() - 0.5
distance = dist(p1, p2)
split_p[0] = mid_p[0] + rand_fact * mag_fact * distance * perp_vect[0]
split_p[1] = mid_p[1] + rand_fact * mag_fact * distance * perp_vect[1]
return split_p
def subtract(a, b):
""" Subtracts b from a. """
return [a[0] - b[0], a[1] - b[1]]
def add(a, b):
return [a[0] + b[0], a[1] + b[1]] | en | 0.593961 | Subtracts b from a. | 2.928638 | 3 |
join_csv.py | yetinater/Prediction-of-Steering-Angle-using-Throttle-and-Road-Angle-Values-for-Vehicle-Control | 2 | 6623798 | # script to join master_beta_csv and road_angle to prepare finaldataset file
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def combine( df1, df2):
return pd.concat([df1, df2], axis=1, sort=False)
if __name__ == "__main__":
df1 = pd.read_csv("master_beta_csv2.csv")
df2 = pd.read_csv("road_angles2.csv")
output_dataframe = combine(df1, df2)
output_dataframe.to_csv("prefinal_master_dataset.csv") | # script to join master_beta_csv and road_angle to prepare finaldataset file
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
def combine( df1, df2):
return pd.concat([df1, df2], axis=1, sort=False)
if __name__ == "__main__":
df1 = pd.read_csv("master_beta_csv2.csv")
df2 = pd.read_csv("road_angles2.csv")
output_dataframe = combine(df1, df2)
output_dataframe.to_csv("prefinal_master_dataset.csv") | en | 0.711921 | # script to join master_beta_csv and road_angle to prepare finaldataset file | 3.081079 | 3 |
DallasPlayers/tit_for_two_tats_random_player.py | fras2560/Competition | 0 | 6623799 | <gh_stars>0
'''
@author: <NAME>
@id: 20652186
@class: CS686
@date: 2016-02-13
@note: contains a player using tit for two tats and jumping randomly at times
'''
from DallasPlayers.player import Player, DEFECT, COOPERATE
import random
class TitForTwoTatsRandomPlayer(Player):
"""
Tit for two Tats player - repeat two opponent's last choice
(cheat if one cheats), jump randomly at times
"""
def studentID(self):
return "20652186"
def agentName(self):
return "Random Tit for Two Tats Player"
def play(self, myHistory, oppHistory1, oppHistory2):
move = DEFECT
if len(oppHistory1) > 1 and len(oppHistory2) > 1:
correct = [COOPERATE, COOPERATE]
if (oppHistory1[-2:] == correct and oppHistory2[-2:] == correct):
move = COOPERATE
else:
if self.first_move(oppHistory1, oppHistory2):
move = COOPERATE
elif oppHistory1[-1] == COOPERATE and oppHistory2[-1] == COOPERATE:
# repeat opponent last choice if both choose corporation
move = COOPERATE
if random.random() < self.JUMP:
# jump moves randomly
move = (move + 1) % 2
return move
| '''
@author: <NAME>
@id: 20652186
@class: CS686
@date: 2016-02-13
@note: contains a player using tit for two tats and jumping randomly at times
'''
from DallasPlayers.player import Player, DEFECT, COOPERATE
import random
class TitForTwoTatsRandomPlayer(Player):
"""
Tit for two Tats player - repeat two opponent's last choice
(cheat if one cheats), jump randomly at times
"""
def studentID(self):
return "20652186"
def agentName(self):
return "Random Tit for Two Tats Player"
def play(self, myHistory, oppHistory1, oppHistory2):
move = DEFECT
if len(oppHistory1) > 1 and len(oppHistory2) > 1:
correct = [COOPERATE, COOPERATE]
if (oppHistory1[-2:] == correct and oppHistory2[-2:] == correct):
move = COOPERATE
else:
if self.first_move(oppHistory1, oppHistory2):
move = COOPERATE
elif oppHistory1[-1] == COOPERATE and oppHistory2[-1] == COOPERATE:
# repeat opponent last choice if both choose corporation
move = COOPERATE
if random.random() < self.JUMP:
# jump moves randomly
move = (move + 1) % 2
return move | en | 0.761675 | @author: <NAME> @id: 20652186 @class: CS686 @date: 2016-02-13 @note: contains a player using tit for two tats and jumping randomly at times Tit for two Tats player - repeat two opponent's last choice (cheat if one cheats), jump randomly at times # repeat opponent last choice if both choose corporation # jump moves randomly | 3.298111 | 3 |
Nitesh-Bhosle-:---Insurance-claim-prediction/code.py | Niteshnupur/nlp-dl-prework | 0 | 6623800 | # --------------
# Data loading and splitting
#The first step - you know the drill by now - load the dataset and see how it looks like. Additionally, split it into train and test set.
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn import metrics
warnings.filterwarnings('ignore')
# Code starts here
# Load dataset using pandas read_csv api in variable df and give file path as path.
file_path = path
print(file_path)
df = pd.read_csv(path)
print(df)
# Display first 5 columns of dataframe df.
df.head(5)
# Store all the features(independent values) in a variable called X
X = df[["age" , "sex" , "bmi" , "children" , "smoker" , "region" , "charges" ]]
print(X)
# Store the target variable (dependent value) in a variable called y
y = df["insuranceclaim"]
print(y)
# Split the dataframe into X_train,X_test,y_train,y_test using train_test_split() function. Use test_size = 0.2 and random_state = 6
train , test = train_test_split(df , test_size = 0.2 , random_state = 6)
X_train = train.drop(["insuranceclaim"] , axis = 1)
y_train = train["insuranceclaim"]
X_test = test.drop(["insuranceclaim"] , axis = 1)
y_test = test["insuranceclaim"]
# Code ends here
# --------------
# Outlier Detection
# Let's plot the box plot to check for the outlier.
import matplotlib.pyplot as plt
# Code starts here
# Plot the boxplot for X_train['bmi'].
plt.boxplot(X_train["bmi"])
# Set quantile equal to 0.95for X_train['bmi']. and store it in variable q_value.
q_value = X_train["bmi"].quantile(0.95)
print(q_value)
# Check the value counts of the y_train
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
# Correlation Check !
#Let's check the pair_plot for feature vs feature. This tells us which features are highly correlated with the other feature and help us predict its better logistic regression model.
# Find the correlation between the features which are stored in 'X_train' and store the result in a variable called 'relation'.
relation = X_train.corr()
print(relation)
# plot pairplot for X_train.
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Predictor check!
#Let's check the count_plot for different features vs target variable insuranceclaim. This tells us which features are highly correlated with the target variable insuranceclaim and help us predict it better.
# Code starts here
# Create a list cols store the columns 'children','sex','region','smoker' in it.
cols = ['children','sex','region','smoker']
print(cols)
type(cols)
# Create subplot with (nrows = 2 , ncols = 2) and store it in variable's fig ,axes
fig , axes = plt.subplots(nrows=2 , ncols=2 , figsize=(30,30))
# Create for loop to iterate through row.
# Create another for loop inside for to access column.
# create variable col and pass cols[ i * 2 + j].
# Using seaborn plot the countplot where x=X_train[col], hue=y_train, ax=axes[i,j]
for i in range(0,2):
for j in range(0,2):
col = cols[i * 2 + j]
sns.countplot(x=X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# Is my Insurance claim prediction right?
# Now let's come to the actual task, using logistic regression to predict the insuranceclaim. We will select the best model by cross-validation using Grid Search.
# You are given a list of values for regularization parameters for the logistic regression model.
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
print(parameters)
# Instantiate a logistic regression model with LogisticRegression() and pass the parameter as random_state=9 and save it to a variable called 'lr'.
lr = LogisticRegression(random_state=9)
# Inside GridSearchCV() pass estimator as the logistic model, param_grid=parameters. to do grid search on the logistic regression model store the result in variable grid.
grid = GridSearchCV(estimator=lr , param_grid=parameters)
# Fit the model on the training data X_train and y_train.
grid.fit(X_train,y_train)
# Make predictions on the X_test features and save the results in a variable called 'y_pred'.
y_pred = grid.predict(X_test)
# Calculate accuracy for grid and store the result in the variable accuracy
accuracy = accuracy_score(y_test , y_pred)
# print accuracy
print(accuracy)
# Code starts here
# Code ends here
# --------------
# Performance of a classifier !
# Now let's visualize the performance of a binary classifier. Check the performance of the classifier using roc auc curve.
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Calculate the roc_auc_score and store the result in variable score.
score = roc_auc_score(y_test , y_pred)
print(score)
# Predict the probability using grid.predict_proba on X_test and take the second column and store the result in y_pred_proba.
y_pred_proba = grid.predict_proba(X_test)
print(y_pred_proba)
y_pred_proba = y_pred_proba[:,1]
print(y_pred_proba)
# Use metrics.roc_curve to calculate the fpr and tpr and store the result in variables fpr, tpr, _.
fpr , tpr , _ = metrics.roc_curve(y_test , y_pred_proba)
# Calculate the roc_auc score of y_test and y_pred_proba and store it in variable called roc_auc.
roc_auc = roc_auc_score(y_test , y_pred_proba)
print(roc_auc)
# Plot auc curve of 'roc_auc' using the line plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc)).
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
plt.legend(loc = 4)
plt.show()
# Code starts here
# Code ends here
| # --------------
# Data loading and splitting
#The first step - you know the drill by now - load the dataset and see how it looks like. Additionally, split it into train and test set.
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn import metrics
warnings.filterwarnings('ignore')
# Code starts here
# Load dataset using pandas read_csv api in variable df and give file path as path.
file_path = path
print(file_path)
df = pd.read_csv(path)
print(df)
# Display first 5 columns of dataframe df.
df.head(5)
# Store all the features(independent values) in a variable called X
X = df[["age" , "sex" , "bmi" , "children" , "smoker" , "region" , "charges" ]]
print(X)
# Store the target variable (dependent value) in a variable called y
y = df["insuranceclaim"]
print(y)
# Split the dataframe into X_train,X_test,y_train,y_test using train_test_split() function. Use test_size = 0.2 and random_state = 6
train , test = train_test_split(df , test_size = 0.2 , random_state = 6)
X_train = train.drop(["insuranceclaim"] , axis = 1)
y_train = train["insuranceclaim"]
X_test = test.drop(["insuranceclaim"] , axis = 1)
y_test = test["insuranceclaim"]
# Code ends here
# --------------
# Outlier Detection
# Let's plot the box plot to check for the outlier.
import matplotlib.pyplot as plt
# Code starts here
# Plot the boxplot for X_train['bmi'].
plt.boxplot(X_train["bmi"])
# Set quantile equal to 0.95for X_train['bmi']. and store it in variable q_value.
q_value = X_train["bmi"].quantile(0.95)
print(q_value)
# Check the value counts of the y_train
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
# Correlation Check !
#Let's check the pair_plot for feature vs feature. This tells us which features are highly correlated with the other feature and help us predict its better logistic regression model.
# Find the correlation between the features which are stored in 'X_train' and store the result in a variable called 'relation'.
relation = X_train.corr()
print(relation)
# plot pairplot for X_train.
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Predictor check!
#Let's check the count_plot for different features vs target variable insuranceclaim. This tells us which features are highly correlated with the target variable insuranceclaim and help us predict it better.
# Code starts here
# Create a list cols store the columns 'children','sex','region','smoker' in it.
cols = ['children','sex','region','smoker']
print(cols)
type(cols)
# Create subplot with (nrows = 2 , ncols = 2) and store it in variable's fig ,axes
fig , axes = plt.subplots(nrows=2 , ncols=2 , figsize=(30,30))
# Create for loop to iterate through row.
# Create another for loop inside for to access column.
# create variable col and pass cols[ i * 2 + j].
# Using seaborn plot the countplot where x=X_train[col], hue=y_train, ax=axes[i,j]
for i in range(0,2):
for j in range(0,2):
col = cols[i * 2 + j]
sns.countplot(x=X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# Is my Insurance claim prediction right?
# Now let's come to the actual task, using logistic regression to predict the insuranceclaim. We will select the best model by cross-validation using Grid Search.
# You are given a list of values for regularization parameters for the logistic regression model.
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
print(parameters)
# Instantiate a logistic regression model with LogisticRegression() and pass the parameter as random_state=9 and save it to a variable called 'lr'.
lr = LogisticRegression(random_state=9)
# Inside GridSearchCV() pass estimator as the logistic model, param_grid=parameters. to do grid search on the logistic regression model store the result in variable grid.
grid = GridSearchCV(estimator=lr , param_grid=parameters)
# Fit the model on the training data X_train and y_train.
grid.fit(X_train,y_train)
# Make predictions on the X_test features and save the results in a variable called 'y_pred'.
y_pred = grid.predict(X_test)
# Calculate accuracy for grid and store the result in the variable accuracy
accuracy = accuracy_score(y_test , y_pred)
# print accuracy
print(accuracy)
# Code starts here
# Code ends here
# --------------
# Performance of a classifier !
# Now let's visualize the performance of a binary classifier. Check the performance of the classifier using roc auc curve.
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Calculate the roc_auc_score and store the result in variable score.
score = roc_auc_score(y_test , y_pred)
print(score)
# Predict the probability using grid.predict_proba on X_test and take the second column and store the result in y_pred_proba.
y_pred_proba = grid.predict_proba(X_test)
print(y_pred_proba)
y_pred_proba = y_pred_proba[:,1]
print(y_pred_proba)
# Use metrics.roc_curve to calculate the fpr and tpr and store the result in variables fpr, tpr, _.
fpr , tpr , _ = metrics.roc_curve(y_test , y_pred_proba)
# Calculate the roc_auc score of y_test and y_pred_proba and store it in variable called roc_auc.
roc_auc = roc_auc_score(y_test , y_pred_proba)
print(roc_auc)
# Plot auc curve of 'roc_auc' using the line plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc)).
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
plt.legend(loc = 4)
plt.show()
# Code starts here
# Code ends here
| en | 0.769892 | # -------------- # Data loading and splitting #The first step - you know the drill by now - load the dataset and see how it looks like. Additionally, split it into train and test set. # import the libraries # Code starts here # Load dataset using pandas read_csv api in variable df and give file path as path. # Display first 5 columns of dataframe df. # Store all the features(independent values) in a variable called X # Store the target variable (dependent value) in a variable called y # Split the dataframe into X_train,X_test,y_train,y_test using train_test_split() function. Use test_size = 0.2 and random_state = 6 # Code ends here # -------------- # Outlier Detection # Let's plot the box plot to check for the outlier. # Code starts here # Plot the boxplot for X_train['bmi']. # Set quantile equal to 0.95for X_train['bmi']. and store it in variable q_value. # Check the value counts of the y_train # Code ends here # -------------- # Code starts here # Correlation Check ! #Let's check the pair_plot for feature vs feature. This tells us which features are highly correlated with the other feature and help us predict its better logistic regression model. # Find the correlation between the features which are stored in 'X_train' and store the result in a variable called 'relation'. # plot pairplot for X_train. # Code ends here # -------------- # Predictor check! #Let's check the count_plot for different features vs target variable insuranceclaim. This tells us which features are highly correlated with the target variable insuranceclaim and help us predict it better. # Code starts here # Create a list cols store the columns 'children','sex','region','smoker' in it. # Create subplot with (nrows = 2 , ncols = 2) and store it in variable's fig ,axes # Create for loop to iterate through row. # Create another for loop inside for to access column. # create variable col and pass cols[ i * 2 + j]. # Using seaborn plot the countplot where x=X_train[col], hue=y_train, ax=axes[i,j] # Code ends here # -------------- # Is my Insurance claim prediction right? # Now let's come to the actual task, using logistic regression to predict the insuranceclaim. We will select the best model by cross-validation using Grid Search. # You are given a list of values for regularization parameters for the logistic regression model. # parameters for grid search # Instantiate a logistic regression model with LogisticRegression() and pass the parameter as random_state=9 and save it to a variable called 'lr'. # Inside GridSearchCV() pass estimator as the logistic model, param_grid=parameters. to do grid search on the logistic regression model store the result in variable grid. # Fit the model on the training data X_train and y_train. # Make predictions on the X_test features and save the results in a variable called 'y_pred'. # Calculate accuracy for grid and store the result in the variable accuracy # print accuracy # Code starts here # Code ends here # -------------- # Performance of a classifier ! # Now let's visualize the performance of a binary classifier. Check the performance of the classifier using roc auc curve. # Calculate the roc_auc_score and store the result in variable score. # Predict the probability using grid.predict_proba on X_test and take the second column and store the result in y_pred_proba. # Use metrics.roc_curve to calculate the fpr and tpr and store the result in variables fpr, tpr, _. # Calculate the roc_auc score of y_test and y_pred_proba and store it in variable called roc_auc. # Plot auc curve of 'roc_auc' using the line plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc)). # Code starts here # Code ends here | 3.877106 | 4 |
bridgedata/models/gcbc_images_context.py | yanlai00/bridge_data_imitation_learning | 8 | 6623801 | import numpy as np
import pdb
import torch
import os
import torch.nn as nn
import torch.nn.functional as F
from bridgedata.utils.general_utils import AttrDict
from bridgedata.utils.general_utils import select_indices, trch2npy
from bridgedata.models.base_model import BaseModel
from bridgedata.models.utils.resnet import get_resnet_encoder
from bridgedata.models.utils.subnetworks import ConvEncoder
from bridgedata.models.utils.layers import BaseProcessingNet
from bridgedata.utils.general_utils import np_unstack
from bridgedata.models.utils.spatial_softmax import SpatialSoftmax
from bridgedata.data_sets.data_augmentation import get_random_crop
from bridgedata.models.gcbc_images import GCBCImages
from bridgedata.models.gcbc_images import get_tlen_from_padmask
import cv2
from bridgedata.models.gcbc_images import GeneralImageEncoder
class GCBCImagesContext(GCBCImages):
def __init__(self, overrideparams, logger):
super().__init__(overrideparams, logger)
self._hp = self._default_hparams()
self._override_defaults(overrideparams) # override defaults with config file
def _default_hparams(self):
default_dict = AttrDict(
encoder_embedding_size=128,
num_context=3,
)
# add new params to parent params
parent_params = super()._default_hparams()
parent_params.update(default_dict)
return parent_params
def build_network(self):
if self._hp.resnet is not None:
self.encoder = GeneralImageEncoder(self._hp.resnet, out_dim=self._hp.encoder_embedding_size,
use_spatial_softmax=self._hp.encoder_spatial_softmax)
self.embedding_size = self._hp.encoder_embedding_size*2 + self._hp.action_dim*self._hp.num_context
if self._hp.goal_cond:
input_dim = 2*self.embedding_size
else:
input_dim = self.embedding_size
else:
raise NotImplementedError
self.action_predictor = BaseProcessingNet(input_dim, mid_dim=256, out_dim=self._hp.action_dim, num_layers=2)
self.future_action_predictor = BaseProcessingNet(input_dim, mid_dim=256,
out_dim=self._hp.action_dim*self._hp.extra_horizon, num_layers=3)
if self._hp.domain_class_mult:
assert self._hp.num_domains > 1
self.classifier = BaseProcessingNet(input_dim, mid_dim=256,
out_dim=self._hp.num_domains, num_layers=3)
def get_context(self, actions, batch_size, images, tstart_context):
context_actions = []
context_images = []
for b in range(batch_size):
context_actions.append(actions[b, tstart_context[b]:tstart_context[b] + self._hp.num_context])
context_images.append(images[b, tstart_context[b]:tstart_context[b] + self._hp.num_context])
context_actions = torch.stack(context_actions, dim=0)
context_images = torch.stack(context_images, dim=0)
return AttrDict(actions=context_actions, images=context_images)
def get_embedding(self, pred_input, context):
assert np.all(np.array(pred_input.shape[-3:]) == np.array([3, 48, 64]))
embedding = self.encoder(pred_input)
context_emb = [self.encoder(c.squeeze()) for c in torch.split(context.images, 1, 1)]
context_emb = torch.stack(context_emb, dim=0).mean(dim=0)
context_actions = torch.unbind(context.actions, 1)
return torch.cat([embedding, context_emb, *context_actions], dim=1)
def get_context_image_rows(self):
context_images = torch.unbind(self.context.images, dim=1)
image_rows = []
for context_image in context_images:
row = trch2npy(torch.cat(torch.unbind((context_image + 1)/2, dim=0), dim=2)).transpose(1, 2, 0)
image_rows.append(row)
return image_rows
| import numpy as np
import pdb
import torch
import os
import torch.nn as nn
import torch.nn.functional as F
from bridgedata.utils.general_utils import AttrDict
from bridgedata.utils.general_utils import select_indices, trch2npy
from bridgedata.models.base_model import BaseModel
from bridgedata.models.utils.resnet import get_resnet_encoder
from bridgedata.models.utils.subnetworks import ConvEncoder
from bridgedata.models.utils.layers import BaseProcessingNet
from bridgedata.utils.general_utils import np_unstack
from bridgedata.models.utils.spatial_softmax import SpatialSoftmax
from bridgedata.data_sets.data_augmentation import get_random_crop
from bridgedata.models.gcbc_images import GCBCImages
from bridgedata.models.gcbc_images import get_tlen_from_padmask
import cv2
from bridgedata.models.gcbc_images import GeneralImageEncoder
class GCBCImagesContext(GCBCImages):
def __init__(self, overrideparams, logger):
super().__init__(overrideparams, logger)
self._hp = self._default_hparams()
self._override_defaults(overrideparams) # override defaults with config file
def _default_hparams(self):
default_dict = AttrDict(
encoder_embedding_size=128,
num_context=3,
)
# add new params to parent params
parent_params = super()._default_hparams()
parent_params.update(default_dict)
return parent_params
def build_network(self):
if self._hp.resnet is not None:
self.encoder = GeneralImageEncoder(self._hp.resnet, out_dim=self._hp.encoder_embedding_size,
use_spatial_softmax=self._hp.encoder_spatial_softmax)
self.embedding_size = self._hp.encoder_embedding_size*2 + self._hp.action_dim*self._hp.num_context
if self._hp.goal_cond:
input_dim = 2*self.embedding_size
else:
input_dim = self.embedding_size
else:
raise NotImplementedError
self.action_predictor = BaseProcessingNet(input_dim, mid_dim=256, out_dim=self._hp.action_dim, num_layers=2)
self.future_action_predictor = BaseProcessingNet(input_dim, mid_dim=256,
out_dim=self._hp.action_dim*self._hp.extra_horizon, num_layers=3)
if self._hp.domain_class_mult:
assert self._hp.num_domains > 1
self.classifier = BaseProcessingNet(input_dim, mid_dim=256,
out_dim=self._hp.num_domains, num_layers=3)
def get_context(self, actions, batch_size, images, tstart_context):
context_actions = []
context_images = []
for b in range(batch_size):
context_actions.append(actions[b, tstart_context[b]:tstart_context[b] + self._hp.num_context])
context_images.append(images[b, tstart_context[b]:tstart_context[b] + self._hp.num_context])
context_actions = torch.stack(context_actions, dim=0)
context_images = torch.stack(context_images, dim=0)
return AttrDict(actions=context_actions, images=context_images)
def get_embedding(self, pred_input, context):
assert np.all(np.array(pred_input.shape[-3:]) == np.array([3, 48, 64]))
embedding = self.encoder(pred_input)
context_emb = [self.encoder(c.squeeze()) for c in torch.split(context.images, 1, 1)]
context_emb = torch.stack(context_emb, dim=0).mean(dim=0)
context_actions = torch.unbind(context.actions, 1)
return torch.cat([embedding, context_emb, *context_actions], dim=1)
def get_context_image_rows(self):
context_images = torch.unbind(self.context.images, dim=1)
image_rows = []
for context_image in context_images:
row = trch2npy(torch.cat(torch.unbind((context_image + 1)/2, dim=0), dim=2)).transpose(1, 2, 0)
image_rows.append(row)
return image_rows
| en | 0.596231 | # override defaults with config file # add new params to parent params | 1.593491 | 2 |
process_data.py | Shiqan/VgOversight | 0 | 6623802 | <reponame>Shiqan/VgOversight
import datetime
from sqlalchemy.exc import SQLAlchemyError
from flask_app import app, db
from models import Team, Guild, Match, Roster, Participant, Player
def process_batch_query(matches):
teams = db.session.query(Team).all()
teams = [(team.id, {member.id for member in team._members}) for team in teams]
guilds = db.session.query(Guild).all()
guilds = [(guild.id, {member.id for member in guild._members}) for guild in guilds]
for match in matches['data']:
team_roster = {}
guild_roster = {}
for roster in match['relationships']['rosters']['data']:
roster_data = [i for i in matches['included'] if i['id'] == roster['id']]
participants = set()
for participant in roster_data[0]['relationships']['participants']['data']:
participant_data = [i['relationships']['player']['data']['id'] for i in matches['included'] if
i['id'] == participant['id']]
participants.add(participant_data[0])
for team_id, members in teams:
if participants < members:
team_roster[roster['id']] = team_id
for guild_id, members in guilds:
if participants < members:
guild_roster[roster['id']] = guild_id
if team_roster or guild_roster:
process_match(match)
createdAt = datetime.datetime.strptime(match['attributes']['createdAt'], '%Y-%m-%dT%H:%M:%SZ')
shardId = match['attributes']['shardId']
for roster in match['relationships']['rosters']['data']:
roster_data = [i for i in matches['included'] if i['id'] == roster['id']]
assert len(roster_data) == 1
team_id = None
guild_id = None
if roster['id'] in team_roster:
team_id = team_roster[roster['id']]
if roster['id'] in guild_roster:
guild_id = guild_roster[roster['id']]
process_roster(roster_data[0], match['id'], team_id=team_id, guild_id=guild_id)
for participant in roster_data[0]['relationships']['participants']['data']:
participant_data = [i for i in matches['included'] if i['id'] == participant['id']]
assert len(participant_data) == 1
player_data = [i for i in matches['included'] if
i['id'] == participant_data[0]['relationships']['player']['data']['id']]
assert len(player_data) == 1
process_player(player_data[0], region=shardId)
process_participant(participant_data[0], roster['id'], createdAt=createdAt)
def process_match(data):
test = db.session.query(Match).get(data['id'])
if not test:
m = Match(id=data['id'],
createdAt=datetime.datetime.strptime(data['attributes']['createdAt'], '%Y-%m-%dT%H:%M:%SZ'),
duration=data['attributes']['duration'],
gameMode=data['attributes']['gameMode'],
patchVersion=data['attributes']['patchVersion'],
shardId=data['attributes']['shardId'],
endGameReason=data['attributes']['stats']['endGameReason'],
queue=data['attributes']['stats']['queue'])
db.session.add(m)
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
def process_roster(data, match_id, team_id=None, guild_id=None):
test = db.session.query(Roster).get(data['id'])
if not test:
r = Roster(id=data['id'], match_id=match_id,
acesEarned=data['attributes']['stats']['acesEarned'],
gold=data['attributes']['stats']['gold'],
heroKills=data['attributes']['stats']['heroKills'],
krakenCaptures=data['attributes']['stats']['krakenCaptures'],
side=data['attributes']['stats']['side'],
turrentKills=data['attributes']['stats']['turretKills'],
turrentsRemaining=data['attributes']['stats']['turretsRemaining'],
team_api=data['relationships']['team']['data'])
if team_id:
r.team_id = team_id
if guild_id:
r.guild_id = guild_id
db.session.add(r)
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
def process_participant(data, roster_id, createdAt=None):
test = db.session.query(Participant).get(data['id'])
if not test:
p = Participant(id=data['id'], roster_id=roster_id,
player_id=data['relationships']['player']['data']['id'],
actor=data['attributes']['actor'],
kills=data['attributes']['stats']['kills'],
assists=data['attributes']['stats']['assists'],
deaths=data['attributes']['stats']['deaths'],
jungleKills=data['attributes']['stats']['jungleKills'],
crystalMineCaptures=data['attributes']['stats']['crystalMineCaptures'],
goldMindCaptures=data['attributes']['stats']['goldMineCaptures'],
krakenCaptures=data['attributes']['stats']['krakenCaptures'],
turrentCaptures=data['attributes']['stats']['turretCaptures'],
winner=data['attributes']['stats']['winner'],
farm=data['attributes']['stats']['farm'],
minionKills=data['attributes']['stats']['minionKills'],
nonJungleMinionKills=data['attributes']['stats']['nonJungleMinionKills'],
firstAfkTime=data['attributes']['stats']['firstAfkTime'],
wentAfk=data['attributes']['stats']['wentAfk'],
itemGrants=data['attributes']['stats']['itemGrants'],
itemSells=data['attributes']['stats']['itemSells'],
itemUses=data['attributes']['stats']['itemUses'],
items=data['attributes']['stats']['items'],
skinKey=data['attributes']['stats']['skinKey'],
karmaLevel=data['attributes']['stats']['karmaLevel'],
level=data['attributes']['stats']['level'],
skillTier=data['attributes']['stats']['skillTier'])
if createdAt:
p.createdAt = createdAt
db.session.add(p)
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
def process_player(data, region="eu"):
test = db.session.query(Player).get(data['id'])
if not test:
p = Player(id=data['id'], name=data['attributes']['name'],
shardId=region,
lifetimeGold=data['attributes']['stats']['lifetimeGold'],
lossStreak=data['attributes']['stats']['lossStreak'],
winStreak=data['attributes']['stats']['winStreak'],
played=data['attributes']['stats']['played'],
played_ranked=data['attributes']['stats']['played_ranked'],
wins=data['attributes']['stats']['wins'],
xp=data['attributes']['stats']['xp'])
db.session.add(p)
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
else:
test.lifetimeGold = data['attributes']['stats']['lifetimeGold']
test.lossStreak = data['attributes']['stats']['lossStreak']
test.winStreak = data['attributes']['stats']['winStreak']
test.played = data['attributes']['stats']['played']
test.played_ranked = data['attributes']['stats']['played_ranked']
test.wins = data['attributes']['stats']['wins']
test.xp = data['attributes']['stats']['xp']
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
| import datetime
from sqlalchemy.exc import SQLAlchemyError
from flask_app import app, db
from models import Team, Guild, Match, Roster, Participant, Player
def process_batch_query(matches):
teams = db.session.query(Team).all()
teams = [(team.id, {member.id for member in team._members}) for team in teams]
guilds = db.session.query(Guild).all()
guilds = [(guild.id, {member.id for member in guild._members}) for guild in guilds]
for match in matches['data']:
team_roster = {}
guild_roster = {}
for roster in match['relationships']['rosters']['data']:
roster_data = [i for i in matches['included'] if i['id'] == roster['id']]
participants = set()
for participant in roster_data[0]['relationships']['participants']['data']:
participant_data = [i['relationships']['player']['data']['id'] for i in matches['included'] if
i['id'] == participant['id']]
participants.add(participant_data[0])
for team_id, members in teams:
if participants < members:
team_roster[roster['id']] = team_id
for guild_id, members in guilds:
if participants < members:
guild_roster[roster['id']] = guild_id
if team_roster or guild_roster:
process_match(match)
createdAt = datetime.datetime.strptime(match['attributes']['createdAt'], '%Y-%m-%dT%H:%M:%SZ')
shardId = match['attributes']['shardId']
for roster in match['relationships']['rosters']['data']:
roster_data = [i for i in matches['included'] if i['id'] == roster['id']]
assert len(roster_data) == 1
team_id = None
guild_id = None
if roster['id'] in team_roster:
team_id = team_roster[roster['id']]
if roster['id'] in guild_roster:
guild_id = guild_roster[roster['id']]
process_roster(roster_data[0], match['id'], team_id=team_id, guild_id=guild_id)
for participant in roster_data[0]['relationships']['participants']['data']:
participant_data = [i for i in matches['included'] if i['id'] == participant['id']]
assert len(participant_data) == 1
player_data = [i for i in matches['included'] if
i['id'] == participant_data[0]['relationships']['player']['data']['id']]
assert len(player_data) == 1
process_player(player_data[0], region=shardId)
process_participant(participant_data[0], roster['id'], createdAt=createdAt)
def process_match(data):
test = db.session.query(Match).get(data['id'])
if not test:
m = Match(id=data['id'],
createdAt=datetime.datetime.strptime(data['attributes']['createdAt'], '%Y-%m-%dT%H:%M:%SZ'),
duration=data['attributes']['duration'],
gameMode=data['attributes']['gameMode'],
patchVersion=data['attributes']['patchVersion'],
shardId=data['attributes']['shardId'],
endGameReason=data['attributes']['stats']['endGameReason'],
queue=data['attributes']['stats']['queue'])
db.session.add(m)
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
def process_roster(data, match_id, team_id=None, guild_id=None):
test = db.session.query(Roster).get(data['id'])
if not test:
r = Roster(id=data['id'], match_id=match_id,
acesEarned=data['attributes']['stats']['acesEarned'],
gold=data['attributes']['stats']['gold'],
heroKills=data['attributes']['stats']['heroKills'],
krakenCaptures=data['attributes']['stats']['krakenCaptures'],
side=data['attributes']['stats']['side'],
turrentKills=data['attributes']['stats']['turretKills'],
turrentsRemaining=data['attributes']['stats']['turretsRemaining'],
team_api=data['relationships']['team']['data'])
if team_id:
r.team_id = team_id
if guild_id:
r.guild_id = guild_id
db.session.add(r)
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
def process_participant(data, roster_id, createdAt=None):
test = db.session.query(Participant).get(data['id'])
if not test:
p = Participant(id=data['id'], roster_id=roster_id,
player_id=data['relationships']['player']['data']['id'],
actor=data['attributes']['actor'],
kills=data['attributes']['stats']['kills'],
assists=data['attributes']['stats']['assists'],
deaths=data['attributes']['stats']['deaths'],
jungleKills=data['attributes']['stats']['jungleKills'],
crystalMineCaptures=data['attributes']['stats']['crystalMineCaptures'],
goldMindCaptures=data['attributes']['stats']['goldMineCaptures'],
krakenCaptures=data['attributes']['stats']['krakenCaptures'],
turrentCaptures=data['attributes']['stats']['turretCaptures'],
winner=data['attributes']['stats']['winner'],
farm=data['attributes']['stats']['farm'],
minionKills=data['attributes']['stats']['minionKills'],
nonJungleMinionKills=data['attributes']['stats']['nonJungleMinionKills'],
firstAfkTime=data['attributes']['stats']['firstAfkTime'],
wentAfk=data['attributes']['stats']['wentAfk'],
itemGrants=data['attributes']['stats']['itemGrants'],
itemSells=data['attributes']['stats']['itemSells'],
itemUses=data['attributes']['stats']['itemUses'],
items=data['attributes']['stats']['items'],
skinKey=data['attributes']['stats']['skinKey'],
karmaLevel=data['attributes']['stats']['karmaLevel'],
level=data['attributes']['stats']['level'],
skillTier=data['attributes']['stats']['skillTier'])
if createdAt:
p.createdAt = createdAt
db.session.add(p)
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
def process_player(data, region="eu"):
test = db.session.query(Player).get(data['id'])
if not test:
p = Player(id=data['id'], name=data['attributes']['name'],
shardId=region,
lifetimeGold=data['attributes']['stats']['lifetimeGold'],
lossStreak=data['attributes']['stats']['lossStreak'],
winStreak=data['attributes']['stats']['winStreak'],
played=data['attributes']['stats']['played'],
played_ranked=data['attributes']['stats']['played_ranked'],
wins=data['attributes']['stats']['wins'],
xp=data['attributes']['stats']['xp'])
db.session.add(p)
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e))
else:
test.lifetimeGold = data['attributes']['stats']['lifetimeGold']
test.lossStreak = data['attributes']['stats']['lossStreak']
test.winStreak = data['attributes']['stats']['winStreak']
test.played = data['attributes']['stats']['played']
test.played_ranked = data['attributes']['stats']['played_ranked']
test.wins = data['attributes']['stats']['wins']
test.xp = data['attributes']['stats']['xp']
try:
db.session.commit()
except SQLAlchemyError as e:
db.session.rollback()
app.logger.error('ERROR: Session rollback - reason "%s"' % str(e)) | none | 1 | 2.521178 | 3 | |
brinagen/__init__.py | belboo/brinagen | 0 | 6623803 | <reponame>belboo/brinagen
__all__ = ['snp_dict', 'tools']
| __all__ = ['snp_dict', 'tools'] | none | 1 | 1.105056 | 1 | |
test/create_image.py | hugs/detour | 2 | 6623804 | import Image
from PIL import Image
import ImageFont
img = Image.new("RGB", (1250, 480), (255, 255, 255))
import ImageDraw
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/System/Library/Fonts/Monaco.dfont", 20, encoding="armn")
draw.text((20, 20), "<- 10 ->" * 10, font=font, fill="black")
draw.text((20, 40), "<- 10 ->" * 10, font=font, fill="black")
img.save("foo.jpg", "JPEG")
| import Image
from PIL import Image
import ImageFont
img = Image.new("RGB", (1250, 480), (255, 255, 255))
import ImageDraw
draw = ImageDraw.Draw(img)
font = ImageFont.truetype("/System/Library/Fonts/Monaco.dfont", 20, encoding="armn")
draw.text((20, 20), "<- 10 ->" * 10, font=font, fill="black")
draw.text((20, 40), "<- 10 ->" * 10, font=font, fill="black")
img.save("foo.jpg", "JPEG")
| none | 1 | 3.070716 | 3 | |
main.py | hjayaweera/random_select | 0 | 6623805 | <gh_stars>0
import random
import numpy as np
import matplotlib.pyplot as plt
class File(object):
file_name="test.txt"
file_mode="r"
def __init__(self,file_name="test.txt",file_mode="r"):
self.file_name=file_name
self.file_mode=file_mode
def read_file(self):
if(self.file_mode=="r"):
f = open(self.file_name,self.file_mode)
message = f.read().split('\n')
f.close()
return message
def append_file(self,message):
if(self.file_mode=="a"):
f = open(self.file_name,self.file_mode)
f.write(message+"\n")
f.close()
def write_file(self,message):
if(self.file_mode=="w"):
f = open(self.file_name,self.file_mode)
f.write(message)
f.close()
class Rand(object):
list_in=""
def __init__(self,list_in):
self.list_in=list_in
def select_rand(self,no_of_items=1):
#if list is short send everything available
selected=random.choice(self.list_in)
return selected
class select_rand(object):
n=1
def __init__(self):
pass
def select(self,list,n=1):
self.n =n
f1=File("input_file.txt","r")
f2=File("output_file.txt","a");
available_list=f1.read_file()
r=Rand(available_list)
m=r.select_rand(1)
available_list.remove(m)
f2.append_file(m);
f3=File("input_file.txt","w");
f3.write_file('\n'.join(available_list));
print(m)
| import random
import numpy as np
import matplotlib.pyplot as plt
class File(object):
file_name="test.txt"
file_mode="r"
def __init__(self,file_name="test.txt",file_mode="r"):
self.file_name=file_name
self.file_mode=file_mode
def read_file(self):
if(self.file_mode=="r"):
f = open(self.file_name,self.file_mode)
message = f.read().split('\n')
f.close()
return message
def append_file(self,message):
if(self.file_mode=="a"):
f = open(self.file_name,self.file_mode)
f.write(message+"\n")
f.close()
def write_file(self,message):
if(self.file_mode=="w"):
f = open(self.file_name,self.file_mode)
f.write(message)
f.close()
class Rand(object):
list_in=""
def __init__(self,list_in):
self.list_in=list_in
def select_rand(self,no_of_items=1):
#if list is short send everything available
selected=random.choice(self.list_in)
return selected
class select_rand(object):
n=1
def __init__(self):
pass
def select(self,list,n=1):
self.n =n
f1=File("input_file.txt","r")
f2=File("output_file.txt","a");
available_list=f1.read_file()
r=Rand(available_list)
m=r.select_rand(1)
available_list.remove(m)
f2.append_file(m);
f3=File("input_file.txt","w");
f3.write_file('\n'.join(available_list));
print(m) | en | 0.783711 | #if list is short send everything available | 3.631441 | 4 |
docs/conf.py | vale981/cl-telegram-bot | 1 | 6623806 | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('./sphinxcontrib-cldomain/sphinxcontrib'))
# -- Project information -----------------------------------------------------
project = 'cl-tg-bot'
copyright = '2019, <NAME>'
author = '<NAME>'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.cldomain',
'sphinxcontrib.hyperspec'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'src/**']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
html_css_files = [
'style.css',
]
html_theme_options = {
}
from os.path import join, dirname, realpath, expandvars
# --- CL domain customizations:
#
# cl_systems: The systems and packages from which to extract documentation:
#
# name - The name of the system to load.
# path - The path to the system.
# packages - A list of the packages to extract symbol information from.
#
# Note: This conf.py sits in a subdirectory below ("../"), relative to where
# the "my-system.asd" system description file lives:
cl_systems = [{"name": "cl-telegram-bot",
"path": join(dirname(realpath(__file__)), "../"),
"packages": ["cl-telegram-bot"]}]
# cl_quicklisp: The default is $HOME/quicklisp. Shown here for completeness,
# and you can comment it out:
cl_quicklisp = expandvars('$HOME/.rosswell/lisp/quicklisp')
# Ensure that the default highlighting language is CL:
highlight_language = 'common-lisp'
# For developer debugging only (and the curious, although, it did kill the cat!)
# Currently ``True`` or ``False`` to output the JSON collected from cldomain.
cl_debug = False
| # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
sys.path.insert(0, os.path.abspath('./sphinxcontrib-cldomain/sphinxcontrib'))
# -- Project information -----------------------------------------------------
project = 'cl-tg-bot'
copyright = '2019, <NAME>'
author = '<NAME>'
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinxcontrib.cldomain',
'sphinxcontrib.hyperspec'
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store', 'src/**']
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
html_sidebars = {
'**': [
'about.html',
'navigation.html',
'relations.html',
'searchbox.html',
'donate.html',
]
}
html_css_files = [
'style.css',
]
html_theme_options = {
}
from os.path import join, dirname, realpath, expandvars
# --- CL domain customizations:
#
# cl_systems: The systems and packages from which to extract documentation:
#
# name - The name of the system to load.
# path - The path to the system.
# packages - A list of the packages to extract symbol information from.
#
# Note: This conf.py sits in a subdirectory below ("../"), relative to where
# the "my-system.asd" system description file lives:
cl_systems = [{"name": "cl-telegram-bot",
"path": join(dirname(realpath(__file__)), "../"),
"packages": ["cl-telegram-bot"]}]
# cl_quicklisp: The default is $HOME/quicklisp. Shown here for completeness,
# and you can comment it out:
cl_quicklisp = expandvars('$HOME/.rosswell/lisp/quicklisp')
# Ensure that the default highlighting language is CL:
highlight_language = 'common-lisp'
# For developer debugging only (and the curious, although, it did kill the cat!)
# Currently ``True`` or ``False`` to output the JSON collected from cldomain.
cl_debug = False
| en | 0.744343 | # Configuration file for the Sphinx documentation builder. # # This file only contains a selection of the most common options. For a full # list see the documentation: # https://www.sphinx-doc.org/en/master/usage/configuration.html # -- Path setup -------------------------------------------------------------- # If extensions (or modules to document with autodoc) are in another directory, # add these directories to sys.path here. If the directory is relative to the # documentation root, use os.path.abspath to make it absolute, like shown here. # # -- Project information ----------------------------------------------------- # -- General configuration --------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom # ones. # Add any paths that contain templates here, relative to this directory. # List of patterns, relative to source directory, that match files and # directories to ignore when looking for source files. # This pattern also affects html_static_path and html_extra_path. # -- Options for HTML output ------------------------------------------------- # The theme to use for HTML and HTML Help pages. See the documentation for # a list of builtin themes. # # Add any paths that contain custom static files (such as style sheets) here, # relative to this directory. They are copied after the builtin static files, # so a file named "default.css" will overwrite the builtin "default.css". # --- CL domain customizations: # # cl_systems: The systems and packages from which to extract documentation: # # name - The name of the system to load. # path - The path to the system. # packages - A list of the packages to extract symbol information from. # # Note: This conf.py sits in a subdirectory below ("../"), relative to where # the "my-system.asd" system description file lives: # cl_quicklisp: The default is $HOME/quicklisp. Shown here for completeness, # and you can comment it out: # Ensure that the default highlighting language is CL: # For developer debugging only (and the curious, although, it did kill the cat!) # Currently ``True`` or ``False`` to output the JSON collected from cldomain. | 1.690347 | 2 |
main.py | RainrainWu/swe-compass | 1 | 6623807 | <filename>main.py
import json
import argparse
from subprocess import call
from analyzer.planner import Planner
from config import PlannerConfig
parser = argparse.ArgumentParser()
parser.add_argument(
"--update",
"-u",
action="store_true",
help="update job description samples by scrapers",
default=False,
)
args = parser.parse_args()
if __name__ == "__main__":
if args.update:
call("poetry run python runner.py", cwd="./scraper", shell=True)
else:
planner = Planner(PlannerConfig.run_plan)
planner.run()
| <filename>main.py
import json
import argparse
from subprocess import call
from analyzer.planner import Planner
from config import PlannerConfig
parser = argparse.ArgumentParser()
parser.add_argument(
"--update",
"-u",
action="store_true",
help="update job description samples by scrapers",
default=False,
)
args = parser.parse_args()
if __name__ == "__main__":
if args.update:
call("poetry run python runner.py", cwd="./scraper", shell=True)
else:
planner = Planner(PlannerConfig.run_plan)
planner.run()
| none | 1 | 2.330787 | 2 | |
tweepy_scraper.py | cperiz/trending_hashtags | 0 | 6623808 | <reponame>cperiz/trending_hashtags<gh_stars>0
from __future__ import absolute_import, print_function
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import time
import os
from lib.counter import HashProcessor
from lib.counter import ReplyProcessor
from lib.async_sender import Sender
"""
#: Downloads tweets from the geobox area (set in bounding box).
#: Finds most common hashtags.
#: Uses rabbitMQ to asynchronously message a program that
#: downloads news urls for these hashtags or prints a histogram of hashtags.
"""
"""
# ----- Bounding boxes for geolocations ------#
## Online-Tool to create boxes (c+p as raw CSV): http://boundingbox.klokantech.com/
#GEOBOX_WORLD = [-180,-90,180,90]
#GEOBOX_GERMANY = [5.0770049095, 47.2982950435, 15.0403900146, 54.9039819757]
#stream.filter(locations=GEOBOX_GERMANY)
#---------------------------------------------#
"""
consumer_key = os.environ['consumer_key']
consumer_secret = os.environ['consumer_secret']
access_token = os.environ['access_token']
access_token_secret = os.environ['access_token_secret']
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a listener that counts hashtags and communicates with OAuthHandler
programs via asynchronous messaging.
"""
def __init__(self, t_start, t_silent, *args, **kwargs):
super(StdOutListener, self).__init__(*args, **kwargs)
self.hash_processor = HashProcessor()
self.reply_processor = ReplyProcessor()
self.sender = Sender()
self.t_start = t_start
self.t_silent = t_silent
self.c = 1
def on_data(self, tweet):
tweet = json.loads(tweet)
self.hash_processor.process(tweet)
self.reply_processor.process(tweet)
if time.time()-self.t_start > self.c*t_silent:
self.c += 1
print()
print("time: ", time.time()-self.t_start)
topXhash = self.hash_processor.get_topX_counts(10)
topXreply = self.reply_processor.get_topX_counts(10)
print(topXhash)
print()
print(topXreply)
#: send to exchange to download
#self.sender.send_msg(msg=",".join([i[0] for i in topX]))
#: send to exchange to plot
self.sender.send_msg(msg="|||".join([i[0] + "|::|" + str(i[1]) for i in topXhash]), name='hash_feed')
self.sender.send_msg(msg="|||".join([i[0] + "|::|" + str(i[1]) for i in topXreply]), name='reply_feed')
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
t_start = time.time()
t_silent = 25 # seconds
l = StdOutListener(t_start, t_silent)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
GEOBOX_MA = [-73.7990632216,41.90293316,-70.2467151391,42.9610385979]
#GEOBOX_CA = [-124.5984090405,32.5791974819,-116.648756203,43.1737269492]
stream.filter(locations=GEOBOX_MA)
| from __future__ import absolute_import, print_function
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
import json
import time
import os
from lib.counter import HashProcessor
from lib.counter import ReplyProcessor
from lib.async_sender import Sender
"""
#: Downloads tweets from the geobox area (set in bounding box).
#: Finds most common hashtags.
#: Uses rabbitMQ to asynchronously message a program that
#: downloads news urls for these hashtags or prints a histogram of hashtags.
"""
"""
# ----- Bounding boxes for geolocations ------#
## Online-Tool to create boxes (c+p as raw CSV): http://boundingbox.klokantech.com/
#GEOBOX_WORLD = [-180,-90,180,90]
#GEOBOX_GERMANY = [5.0770049095, 47.2982950435, 15.0403900146, 54.9039819757]
#stream.filter(locations=GEOBOX_GERMANY)
#---------------------------------------------#
"""
consumer_key = os.environ['consumer_key']
consumer_secret = os.environ['consumer_secret']
access_token = os.environ['access_token']
access_token_secret = os.environ['access_token_secret']
class StdOutListener(StreamListener):
""" A listener handles tweets that are received from the stream.
This is a listener that counts hashtags and communicates with OAuthHandler
programs via asynchronous messaging.
"""
def __init__(self, t_start, t_silent, *args, **kwargs):
super(StdOutListener, self).__init__(*args, **kwargs)
self.hash_processor = HashProcessor()
self.reply_processor = ReplyProcessor()
self.sender = Sender()
self.t_start = t_start
self.t_silent = t_silent
self.c = 1
def on_data(self, tweet):
tweet = json.loads(tweet)
self.hash_processor.process(tweet)
self.reply_processor.process(tweet)
if time.time()-self.t_start > self.c*t_silent:
self.c += 1
print()
print("time: ", time.time()-self.t_start)
topXhash = self.hash_processor.get_topX_counts(10)
topXreply = self.reply_processor.get_topX_counts(10)
print(topXhash)
print()
print(topXreply)
#: send to exchange to download
#self.sender.send_msg(msg=",".join([i[0] for i in topX]))
#: send to exchange to plot
self.sender.send_msg(msg="|||".join([i[0] + "|::|" + str(i[1]) for i in topXhash]), name='hash_feed')
self.sender.send_msg(msg="|||".join([i[0] + "|::|" + str(i[1]) for i in topXreply]), name='reply_feed')
return True
def on_error(self, status):
print(status)
if __name__ == '__main__':
t_start = time.time()
t_silent = 25 # seconds
l = StdOutListener(t_start, t_silent)
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
GEOBOX_MA = [-73.7990632216,41.90293316,-70.2467151391,42.9610385979]
#GEOBOX_CA = [-124.5984090405,32.5791974819,-116.648756203,43.1737269492]
stream.filter(locations=GEOBOX_MA) | en | 0.749818 | #: Downloads tweets from the geobox area (set in bounding box). #: Finds most common hashtags. #: Uses rabbitMQ to asynchronously message a program that #: downloads news urls for these hashtags or prints a histogram of hashtags. # ----- Bounding boxes for geolocations ------# ## Online-Tool to create boxes (c+p as raw CSV): http://boundingbox.klokantech.com/ #GEOBOX_WORLD = [-180,-90,180,90] #GEOBOX_GERMANY = [5.0770049095, 47.2982950435, 15.0403900146, 54.9039819757] #stream.filter(locations=GEOBOX_GERMANY) #---------------------------------------------# A listener handles tweets that are received from the stream. This is a listener that counts hashtags and communicates with OAuthHandler programs via asynchronous messaging. #: send to exchange to download #self.sender.send_msg(msg=",".join([i[0] for i in topX])) #: send to exchange to plot # seconds #GEOBOX_CA = [-124.5984090405,32.5791974819,-116.648756203,43.1737269492] | 2.764404 | 3 |
forest/test_util.py | andrewgryan/sql-playground | 0 | 6623809 | import unittest
import bokeh
import util
class TestDropdown(unittest.TestCase):
def test_on_click_sets_label(self):
dropdown = bokeh.models.Dropdown(menu=[("A", "a")])
callback = util.autolabel(dropdown)
callback("a")
self.assertEqual(dropdown.label, "A")
def test_autowarn(self):
dropdown = bokeh.models.Dropdown(
label="A",
menu=[("A", "a")])
callback = util.autowarn(dropdown)
attr, old, new = "menu", None, [("B", "b")]
callback(attr, old, new)
self.assertEqual(dropdown.button_type, "danger")
def test_find_label_given_menu_and_value(self):
menu = [("A", "a"), ("B", "b"), ("C", "c")]
value = "b"
result = util.find_label(menu, value)
expect = "B"
self.assertEqual(expect, result)
def test_pluck_label_given_menu(self):
menu = [("A", "a"), ("B", "b"), ("C", "c")]
result = util.pluck_label(menu)
expect = ["A", "B", "C"]
self.assertEqual(expect, result)
| import unittest
import bokeh
import util
class TestDropdown(unittest.TestCase):
def test_on_click_sets_label(self):
dropdown = bokeh.models.Dropdown(menu=[("A", "a")])
callback = util.autolabel(dropdown)
callback("a")
self.assertEqual(dropdown.label, "A")
def test_autowarn(self):
dropdown = bokeh.models.Dropdown(
label="A",
menu=[("A", "a")])
callback = util.autowarn(dropdown)
attr, old, new = "menu", None, [("B", "b")]
callback(attr, old, new)
self.assertEqual(dropdown.button_type, "danger")
def test_find_label_given_menu_and_value(self):
menu = [("A", "a"), ("B", "b"), ("C", "c")]
value = "b"
result = util.find_label(menu, value)
expect = "B"
self.assertEqual(expect, result)
def test_pluck_label_given_menu(self):
menu = [("A", "a"), ("B", "b"), ("C", "c")]
result = util.pluck_label(menu)
expect = ["A", "B", "C"]
self.assertEqual(expect, result)
| none | 1 | 2.837509 | 3 | |
blog/migrations/0001_initial.py | wisdomkhan/CRUD_Blog | 0 | 6623810 | <filename>blog/migrations/0001_initial.py
# Generated by Django 3.2 on 2021-09-22 14:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AddBlog',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500)),
('content', models.TextField(max_length=100000)),
('genre', models.CharField(max_length=100)),
('author', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| <filename>blog/migrations/0001_initial.py
# Generated by Django 3.2 on 2021-09-22 14:35
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='AddBlog',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('title', models.CharField(max_length=500)),
('content', models.TextField(max_length=100000)),
('genre', models.CharField(max_length=100)),
('author', models.CharField(max_length=100)),
('created', models.DateTimeField(auto_now_add=True)),
],
),
]
| en | 0.803838 | # Generated by Django 3.2 on 2021-09-22 14:35 | 1.89907 | 2 |
tests/test_mysql.py | dkudeki/BookwormDB | 73 | 6623811 | from builtins import hex
import unittest
import bookwormDB
from bookwormDB.configuration import Configfile
import bookwormDB.CreateDatabase
import logging
import MySQLdb
import random
logging.basicConfig(level=10)
"""
Tests of the MySQL configuration.
"""
class Bookworm_MySQL_Configuration(unittest.TestCase):
def test_server_connection(self):
logging.info("\n\nTESTING SERVER CONNECTION\n\n")
"""
Connect to MySQL and run a simple query.
"""
import bookwormDB.CreateDatabase
db = bookwormDB.CreateDatabase.DB(dbname="mysql")
sampleQuery=db.query("SELECT 1+1").fetchall()
self.assertTrue(sampleQuery[0][0]==2)
"""
To properly test things, we actually build some bookworms.
This assumes that the directory '/tmp' is writeable,
which isn't strictly necessary for a bookworm to be built.
"""
def test_config_files(self):
logging.info("\n\nTESTING CONFIG FILE ACCESS\n\n")
def test_config_file(conf):
user = conf.config.get("client","user")
pw = conf.config.get("client","password")
return (user,pw)
global_configuration_file = Configfile("read_only")
admin_configuration_file = Configfile("admin")
(admin_user,admin_pw) = test_config_file(global_configuration_file)
(client_user,client_pw) = test_config_file(admin_configuration_file)
logging.info("admin user is {} and password is {}".format(admin_user,admin_pw))
logging.info("client user is {} and password is {}".format(client_user,client_pw))
logging.info("Checking that admin and client users are distinct")
self.assertTrue(admin_user != client_user)
def test_createDB_permission(self):
logging.info("\nTESTING ABILITY TO CREATE DATABASES\n\n")
import bookwormDB.configuration
dbname = "A" + hex(random.getrandbits(128))[2:-1]
import bookwormDB.CreateDatabase
db = bookwormDB.CreateDatabase.DB(dbname="mysql")
cursor = db.query("CREATE DATABASE {}".format(dbname))
cursor.execute("DROP DATABASE {}".format(dbname))
cursor.close()
if __name__=="__main__":
unittest.main()
| from builtins import hex
import unittest
import bookwormDB
from bookwormDB.configuration import Configfile
import bookwormDB.CreateDatabase
import logging
import MySQLdb
import random
logging.basicConfig(level=10)
"""
Tests of the MySQL configuration.
"""
class Bookworm_MySQL_Configuration(unittest.TestCase):
def test_server_connection(self):
logging.info("\n\nTESTING SERVER CONNECTION\n\n")
"""
Connect to MySQL and run a simple query.
"""
import bookwormDB.CreateDatabase
db = bookwormDB.CreateDatabase.DB(dbname="mysql")
sampleQuery=db.query("SELECT 1+1").fetchall()
self.assertTrue(sampleQuery[0][0]==2)
"""
To properly test things, we actually build some bookworms.
This assumes that the directory '/tmp' is writeable,
which isn't strictly necessary for a bookworm to be built.
"""
def test_config_files(self):
logging.info("\n\nTESTING CONFIG FILE ACCESS\n\n")
def test_config_file(conf):
user = conf.config.get("client","user")
pw = conf.config.get("client","password")
return (user,pw)
global_configuration_file = Configfile("read_only")
admin_configuration_file = Configfile("admin")
(admin_user,admin_pw) = test_config_file(global_configuration_file)
(client_user,client_pw) = test_config_file(admin_configuration_file)
logging.info("admin user is {} and password is {}".format(admin_user,admin_pw))
logging.info("client user is {} and password is {}".format(client_user,client_pw))
logging.info("Checking that admin and client users are distinct")
self.assertTrue(admin_user != client_user)
def test_createDB_permission(self):
logging.info("\nTESTING ABILITY TO CREATE DATABASES\n\n")
import bookwormDB.configuration
dbname = "A" + hex(random.getrandbits(128))[2:-1]
import bookwormDB.CreateDatabase
db = bookwormDB.CreateDatabase.DB(dbname="mysql")
cursor = db.query("CREATE DATABASE {}".format(dbname))
cursor.execute("DROP DATABASE {}".format(dbname))
cursor.close()
if __name__=="__main__":
unittest.main()
| en | 0.932606 | Tests of the MySQL configuration. Connect to MySQL and run a simple query. To properly test things, we actually build some bookworms. This assumes that the directory '/tmp' is writeable, which isn't strictly necessary for a bookworm to be built. | 2.854463 | 3 |
Lesson 4-Branches/activity_step_30.py | samy-khelifa/Version-Control-with-Git-and-GitHub | 5 | 6623812 | <filename>Lesson 4-Branches/activity_step_30.py
# Activity
@classmethod
def distance(cls, unit, *args):
distance = 0
distance = reduce(lambda x, y: x*y, args)
return "%s %s" %(distance, unit)
| <filename>Lesson 4-Branches/activity_step_30.py
# Activity
@classmethod
def distance(cls, unit, *args):
distance = 0
distance = reduce(lambda x, y: x*y, args)
return "%s %s" %(distance, unit)
| en | 0.566569 | # Activity | 3.224627 | 3 |
py4j-python/src/py4j/version.py | torokati44/py4j | 0 | 6623813 | __version__ = '0.10.9.3'
| __version__ = '0.10.9.3'
| none | 1 | 1.050121 | 1 | |
pubdb_prepare.py | Archieyoung/SVAN | 7 | 6623814 | #!/usr/bin/env python3
"""
prepare SV database for annotation
convert 1000genome, DGV, dbVar SV files into bed files
"""
import sys
import gzip
import logging
import operator
import os
from glob import iglob
from datetime import date
from sv_vcf import SV
# 1000genome
class one_thousand_sv(object):
def __init__(self,record):
# 1000genome vcf file parse
self.record = record
fields = record.strip().split("\t")
(self.chrom,self.pos1,self.id,self.ref,self.alt,self.qual,self.filter,
self.info,self.format) = fields[:9]
self.samples = fields[9:]
# info dict
self.info_dict = {}
info_list = self.info.split(";")
for i in info_list:
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
# end
if "END" in self.info_dict:
self.pos2 = self.info_dict["END"]
else:
# if can not find end in info, end = start(eg. insertion)
self.pos2 = self.pos1
# SVLEN
if "SVLEN" in self.info_dict:
self.svlen = self.info_dict["SVLEN"]
else:
self.svlen = "NA"
# SVTYPE
self.sub_svtype = self.info_dict["SVTYPE"]
if self.sub_svtype in ["SVA","LINE1","ALU","INS"]:
self.svtype = "INS"
elif self.sub_svtype in ["DEL","DEL_ALU","DEL_HERV","DEL_LINE1",
"DEL_SVA"]:
self.svtype = "DEL"
else:
self.svtype = self.sub_svtype
# allele frequency
# multi-alleles(CNVs,0,1,2...) frequency is not considered here,
# treated as bi-alleles(0,1) frequency
af_populations = ["AF","EAS_AF","EUR_AF","AFR_AF","AMR_AF","SAS_AF"]
self.AFs = [self._get_af(i) for i in af_populations]
def _get_af(self,af_population):
# af_population: AF=0.00698882;EAS_AF=0.0069;EUR_AF=0.0189;
# AFR_AF=0.0;AMR_AF=0.0072;SAS_AF=0.0041;
try:
af = sum([float(i) for i in self.info_dict[af_population].split(
",")])
af = "{:.6}".format(af)
except:
af = "NA"
logging.warning('Can not find "{}" in INFO of record: {}'.format(
af_population,self.record))
return af
@classmethod
def print_bed(cls,vcf_gz,out_name):
bed_list = []
with gzip.open(vcf_gz,"r") as io:
n = 0
for line in io:
line = line.decode("utf-8")
if line[0] == "#":
continue
db_svid = "1000genome{}".format(n) # make 1000genome SV id
n += 1
sv = one_thousand_sv(line)
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.sub_svtype]+sv.AFs
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class dgv_gold_cnv(object):
# dgv gff3 file parse
def __init__(self,record):
self.record = record
fields = record.strip().split("\t")
# remove "chr" prefix in chrom if it exists
self.chrom = fields[0].replace("chr","")
self.pos1 = fields[3]
self.pos2 = fields[4]
self.info_dict = {}
for i in fields[-1].split(";"):
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
if self.info_dict["variant_sub_type"] == "Gain":
self.svtype = "DUP"
elif self.info_dict["variant_sub_type"] == "Loss":
self.svtype = "DEL"
else:
raise RuntimeError('variant_sub_type can either be "Gain" or "Loss"')
self.af = self.info_dict["Frequency"]
self.af = str(float(self.af.replace("%",""))*0.01)
self.sample_size = self.info_dict["num_unique_samples_tested"]
@classmethod
def print_bed(cls,gff3,out_name):
bed_list = []
with open(gff3,"r") as io:
n = 0
for line in io:
if line[0] == "#":
continue
sv = dgv_gold_cnv(line)
db_svid = "dgv{}".format(n)
n += 1
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.af, sv.sample_size]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class dbVar_nstd37_sv(object):
# dbvar vcf file parse
def __init__(self,record):
self.record = record
fields = record.strip().split("\t")
(self.chrom,self.pos1,self.id,self.ref,self.alt,self.qual,self.filter,
self.info) = fields[:8]
# info dict
self.info_dict = {}
info_list = self.info.split(";")
for i in info_list:
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
self.pos2 = self.info_dict["END"]
self.svtype = self.info_dict["SVTYPE"]
try:
self.clnsig = self.info_dict["CLNSIG"]
except KeyError:
self.clnsig = "NA"
try:
self.pheno = self.info_dict["PHENO"]
except KeyError:
self.pheno = "NA"
@classmethod
def print_bed(cls,vcf_gz,out_name):
bed_list = []
with gzip.open(vcf_gz,"r") as io:
n = 0
for line in io:
line = line.decode("utf-8")
if line[0] == "#":
continue
sv = dbVar_nstd37_sv(line)
db_svid = "dbvar{}".format(n)
n += 1
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.clnsig, sv.pheno]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class decipher_HI(object):
"""
Convert decipher_HI_Predictions_Version3.bed.gz to database bed
<NAME>, <NAME>, <NAME>, <NAME> (2010) Characterising and Predicting Haploinsufficiency in the Human Genome. PLOS Genetics 6(10): e1001154.
"""
def __init__(self,record):
fields = record.strip().split("\t")
self.chrom,self.pos1,self.pos2,self.gene_hi = fields[:4]
# remove "chr"
self.chrom = self.chrom.replace("chr","")
self.svtype = "WILD" # wild means that it can match any SV type, for doing svtye-insensity annotation
@classmethod
def print_bed(cls,input_gz,out_name):
bed_list = []
with gzip.open(input_gz,"r") as io:
io.readline() # remove header
n = 0
for line in io:
line = line.decode("utf-8")
sv = decipher_HI(line)
sv.pos1 = int(sv.pos1)
db_svid = "decipherHI{}".format(n)
n += 1
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.gene_hi]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class cosmic_cnv(object):
"""
Convert CosmicCompleteCNA.tsv.gz(CNV) into database bed
too many records 31723168, need refine for annotation, beta!!!
"""
def __init__(self,record):
fields = record.strip().split("\t")
self.CNV_ID = fields[0]
self.Primary_site = fields[5]
self.Primary_histology = fields[9]
self.svtype = fields[-4]
if self.svtype == "gain":
self.svtype = "DUP"
if self.svtype == "loss":
self.svtype = "DEL"
sv_positions = fields[-1] # chrom:start..end
if ":" and ".." in sv_positions:
sp1 = sv_positions.split(":")
sp2 = sp1[1].split("..")
self.chrom = sp1
self.pos1 = sp2[0]
self.pos2 = sp2[1]
else:
raise RuntimeError("{} not match 'chrom:start..end'".format(
sv_positions))
@classmethod
def print_bed(cls,input_gz,out_name):
bed_list = []
cnv_ids = []
with gzip.open(input_gz,"r") as io:
io.readline() # remove header
n = 0
for line in io:
line = line.decode("utf-8")
sv = cosmic_cnv(line)
if sv.CNV_ID in cnv_ids:
continue # remove 'Duplicated' record. CosmicCNA store CNV considering gene informations which is not necessary here
else:
cnv_ids.append(sv.CNV_ID)
sv.pos1 = int(sv.pos1)
db_svid = "cosmic{}".format(n)
n += 1
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.Primary_site, sv.Primary_histology]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
#class cosmic_sv(object):
# """
# convert cosmic CosmicStructExport.tsv.gz into database bed
# """
# def __init__(self,record):
# fileds = record.strip().split("\t")
def main():
#one_thousand_sv.print_bed(sys.argv[1],sys.argv[2])
#dgv_gold_cnv.print_bed(sys.argv[1],sys.argv[2])
#dbVar_nstd37_sv.print_bed(sys.argv[1],sys.argv[2])
#decipher_HI.print_bed(sys.argv[1],sys.argv[2])
#cosmic_cnv.print_bed(sys.argv[1],sys.argv[2])
#make_grand_sv_db(sys.argv[1], "tmp")
pass
if __name__ == "__main__":
main()
| #!/usr/bin/env python3
"""
prepare SV database for annotation
convert 1000genome, DGV, dbVar SV files into bed files
"""
import sys
import gzip
import logging
import operator
import os
from glob import iglob
from datetime import date
from sv_vcf import SV
# 1000genome
class one_thousand_sv(object):
def __init__(self,record):
# 1000genome vcf file parse
self.record = record
fields = record.strip().split("\t")
(self.chrom,self.pos1,self.id,self.ref,self.alt,self.qual,self.filter,
self.info,self.format) = fields[:9]
self.samples = fields[9:]
# info dict
self.info_dict = {}
info_list = self.info.split(";")
for i in info_list:
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
# end
if "END" in self.info_dict:
self.pos2 = self.info_dict["END"]
else:
# if can not find end in info, end = start(eg. insertion)
self.pos2 = self.pos1
# SVLEN
if "SVLEN" in self.info_dict:
self.svlen = self.info_dict["SVLEN"]
else:
self.svlen = "NA"
# SVTYPE
self.sub_svtype = self.info_dict["SVTYPE"]
if self.sub_svtype in ["SVA","LINE1","ALU","INS"]:
self.svtype = "INS"
elif self.sub_svtype in ["DEL","DEL_ALU","DEL_HERV","DEL_LINE1",
"DEL_SVA"]:
self.svtype = "DEL"
else:
self.svtype = self.sub_svtype
# allele frequency
# multi-alleles(CNVs,0,1,2...) frequency is not considered here,
# treated as bi-alleles(0,1) frequency
af_populations = ["AF","EAS_AF","EUR_AF","AFR_AF","AMR_AF","SAS_AF"]
self.AFs = [self._get_af(i) for i in af_populations]
def _get_af(self,af_population):
# af_population: AF=0.00698882;EAS_AF=0.0069;EUR_AF=0.0189;
# AFR_AF=0.0;AMR_AF=0.0072;SAS_AF=0.0041;
try:
af = sum([float(i) for i in self.info_dict[af_population].split(
",")])
af = "{:.6}".format(af)
except:
af = "NA"
logging.warning('Can not find "{}" in INFO of record: {}'.format(
af_population,self.record))
return af
@classmethod
def print_bed(cls,vcf_gz,out_name):
bed_list = []
with gzip.open(vcf_gz,"r") as io:
n = 0
for line in io:
line = line.decode("utf-8")
if line[0] == "#":
continue
db_svid = "1000genome{}".format(n) # make 1000genome SV id
n += 1
sv = one_thousand_sv(line)
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.sub_svtype]+sv.AFs
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class dgv_gold_cnv(object):
# dgv gff3 file parse
def __init__(self,record):
self.record = record
fields = record.strip().split("\t")
# remove "chr" prefix in chrom if it exists
self.chrom = fields[0].replace("chr","")
self.pos1 = fields[3]
self.pos2 = fields[4]
self.info_dict = {}
for i in fields[-1].split(";"):
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
if self.info_dict["variant_sub_type"] == "Gain":
self.svtype = "DUP"
elif self.info_dict["variant_sub_type"] == "Loss":
self.svtype = "DEL"
else:
raise RuntimeError('variant_sub_type can either be "Gain" or "Loss"')
self.af = self.info_dict["Frequency"]
self.af = str(float(self.af.replace("%",""))*0.01)
self.sample_size = self.info_dict["num_unique_samples_tested"]
@classmethod
def print_bed(cls,gff3,out_name):
bed_list = []
with open(gff3,"r") as io:
n = 0
for line in io:
if line[0] == "#":
continue
sv = dgv_gold_cnv(line)
db_svid = "dgv{}".format(n)
n += 1
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.af, sv.sample_size]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class dbVar_nstd37_sv(object):
# dbvar vcf file parse
def __init__(self,record):
self.record = record
fields = record.strip().split("\t")
(self.chrom,self.pos1,self.id,self.ref,self.alt,self.qual,self.filter,
self.info) = fields[:8]
# info dict
self.info_dict = {}
info_list = self.info.split(";")
for i in info_list:
if "=" in i:
info_id,info_value = i.split("=")
self.info_dict[info_id] = info_value
else:
self.info_dict[i] = i
self.pos2 = self.info_dict["END"]
self.svtype = self.info_dict["SVTYPE"]
try:
self.clnsig = self.info_dict["CLNSIG"]
except KeyError:
self.clnsig = "NA"
try:
self.pheno = self.info_dict["PHENO"]
except KeyError:
self.pheno = "NA"
@classmethod
def print_bed(cls,vcf_gz,out_name):
bed_list = []
with gzip.open(vcf_gz,"r") as io:
n = 0
for line in io:
line = line.decode("utf-8")
if line[0] == "#":
continue
sv = dbVar_nstd37_sv(line)
db_svid = "dbvar{}".format(n)
n += 1
sv.pos1 = int(sv.pos1)
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.clnsig, sv.pheno]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class decipher_HI(object):
"""
Convert decipher_HI_Predictions_Version3.bed.gz to database bed
<NAME>, <NAME>, <NAME>, <NAME> (2010) Characterising and Predicting Haploinsufficiency in the Human Genome. PLOS Genetics 6(10): e1001154.
"""
def __init__(self,record):
fields = record.strip().split("\t")
self.chrom,self.pos1,self.pos2,self.gene_hi = fields[:4]
# remove "chr"
self.chrom = self.chrom.replace("chr","")
self.svtype = "WILD" # wild means that it can match any SV type, for doing svtye-insensity annotation
@classmethod
def print_bed(cls,input_gz,out_name):
bed_list = []
with gzip.open(input_gz,"r") as io:
io.readline() # remove header
n = 0
for line in io:
line = line.decode("utf-8")
sv = decipher_HI(line)
sv.pos1 = int(sv.pos1)
db_svid = "decipherHI{}".format(n)
n += 1
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.gene_hi]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
class cosmic_cnv(object):
"""
Convert CosmicCompleteCNA.tsv.gz(CNV) into database bed
too many records 31723168, need refine for annotation, beta!!!
"""
def __init__(self,record):
fields = record.strip().split("\t")
self.CNV_ID = fields[0]
self.Primary_site = fields[5]
self.Primary_histology = fields[9]
self.svtype = fields[-4]
if self.svtype == "gain":
self.svtype = "DUP"
if self.svtype == "loss":
self.svtype = "DEL"
sv_positions = fields[-1] # chrom:start..end
if ":" and ".." in sv_positions:
sp1 = sv_positions.split(":")
sp2 = sp1[1].split("..")
self.chrom = sp1
self.pos1 = sp2[0]
self.pos2 = sp2[1]
else:
raise RuntimeError("{} not match 'chrom:start..end'".format(
sv_positions))
@classmethod
def print_bed(cls,input_gz,out_name):
bed_list = []
cnv_ids = []
with gzip.open(input_gz,"r") as io:
io.readline() # remove header
n = 0
for line in io:
line = line.decode("utf-8")
sv = cosmic_cnv(line)
if sv.CNV_ID in cnv_ids:
continue # remove 'Duplicated' record. CosmicCNA store CNV considering gene informations which is not necessary here
else:
cnv_ids.append(sv.CNV_ID)
sv.pos1 = int(sv.pos1)
db_svid = "cosmic{}".format(n)
n += 1
bed = [sv.chrom, sv.pos1, sv.pos2, sv.svtype, db_svid,
sv.Primary_site, sv.Primary_histology]
bed_list.append(bed)
bed_list.sort(key = operator.itemgetter(0, 1))
bed_lines = []
for i in bed_list:
i[1] = str(i[1])
bed_lines.append("\t".join(i)+"\n")
with open(out_name,"w") as io:
io.writelines(bed_lines)
#class cosmic_sv(object):
# """
# convert cosmic CosmicStructExport.tsv.gz into database bed
# """
# def __init__(self,record):
# fileds = record.strip().split("\t")
def main():
#one_thousand_sv.print_bed(sys.argv[1],sys.argv[2])
#dgv_gold_cnv.print_bed(sys.argv[1],sys.argv[2])
#dbVar_nstd37_sv.print_bed(sys.argv[1],sys.argv[2])
#decipher_HI.print_bed(sys.argv[1],sys.argv[2])
#cosmic_cnv.print_bed(sys.argv[1],sys.argv[2])
#make_grand_sv_db(sys.argv[1], "tmp")
pass
if __name__ == "__main__":
main()
| en | 0.45694 | #!/usr/bin/env python3 prepare SV database for annotation convert 1000genome, DGV, dbVar SV files into bed files # 1000genome # 1000genome vcf file parse # info dict # end # if can not find end in info, end = start(eg. insertion) # SVLEN # SVTYPE # allele frequency # multi-alleles(CNVs,0,1,2...) frequency is not considered here, # treated as bi-alleles(0,1) frequency # af_population: AF=0.00698882;EAS_AF=0.0069;EUR_AF=0.0189; # AFR_AF=0.0;AMR_AF=0.0072;SAS_AF=0.0041; # make 1000genome SV id # dgv gff3 file parse # remove "chr" prefix in chrom if it exists # dbvar vcf file parse # info dict Convert decipher_HI_Predictions_Version3.bed.gz to database bed <NAME>, <NAME>, <NAME>, <NAME> (2010) Characterising and Predicting Haploinsufficiency in the Human Genome. PLOS Genetics 6(10): e1001154. # remove "chr" # wild means that it can match any SV type, for doing svtye-insensity annotation # remove header Convert CosmicCompleteCNA.tsv.gz(CNV) into database bed too many records 31723168, need refine for annotation, beta!!! # chrom:start..end # remove header # remove 'Duplicated' record. CosmicCNA store CNV considering gene informations which is not necessary here #class cosmic_sv(object): # """ # convert cosmic CosmicStructExport.tsv.gz into database bed # """ # def __init__(self,record): # fileds = record.strip().split("\t") #one_thousand_sv.print_bed(sys.argv[1],sys.argv[2]) #dgv_gold_cnv.print_bed(sys.argv[1],sys.argv[2]) #dbVar_nstd37_sv.print_bed(sys.argv[1],sys.argv[2]) #decipher_HI.print_bed(sys.argv[1],sys.argv[2]) #cosmic_cnv.print_bed(sys.argv[1],sys.argv[2]) #make_grand_sv_db(sys.argv[1], "tmp") | 2.546576 | 3 |
codes/models/VSR_model.py | grofit/traiNNer | 78 | 6623815 | from __future__ import absolute_import
import os
import logging
from collections import OrderedDict
import torch
import torch.nn as nn
import models.networks as networks
from .base_model import BaseModel
from . import losses
from dataops.colors import ycbcr_to_rgb
import torch.nn.functional as F
from dataops.debug import tmp_vis, tmp_vis_flow, describe_numpy, describe_tensor
logger = logging.getLogger('base')
class VSRModel(BaseModel):
def __init__(self, opt):
super(VSRModel, self).__init__(opt)
train_opt = opt['train']
self.scale = opt.get('scale', 4)
self.tensor_shape = opt.get('tensor_shape', 'TCHW')
# specify the models you want to load/save to the disk.
# The training/test scripts will call <BaseModel.save_networks>
# and <BaseModel.load_networks>
# for training and testing, a generator 'G' is needed
self.model_names = ['G']
# define networks and load pretrained models
self.netG = networks.define_G(opt).to(self.device) # G
if self.is_train:
self.netG.train()
opt_G_nets = [self.netG]
opt_D_nets = []
if train_opt['gan_weight']:
self.model_names.append('D') # add discriminator to the network list
self.netD = networks.define_D(opt).to(self.device) # D
self.netD.train()
opt_D_nets.append(self.netD)
self.load() # load G and D if needed
# define losses, optimizer, scheduler and other components
if self.is_train:
# setup network cap
# define if the generator will have a final
# capping mechanism in the output
self.outm = train_opt.get('finalcap', None)
# setup frequency separation
self.setup_fs()
# initialize losses
# generator losses:
self.generatorlosses = losses.GeneratorLoss(opt, self.device)
# TODO: show the configured losses names in logger
# print(self.generatorlosses.loss_list)
# discriminator loss:
self.setup_gan()
# Optical Flow Reconstruction loss:
ofr_type = train_opt.get('ofr_type', None)
ofr_weight = train_opt.get('ofr_weight', [0.1, 0.2, 0.1, 0.01])
if ofr_type and ofr_weight:
self.ofr_weight = ofr_weight[3] #lambda 4
self.ofr_wl1 = ofr_weight[0] #lambda 1
self.ofr_wl2 = ofr_weight[1] #lambda 2
ofr_wl3 = ofr_weight[2] #lambda 3
if ofr_type == 'ofr':
from models.modules.loss import OFR_loss
#TODO: make the regularization weight an option. lambda3 = 0.1
self.cri_ofr = OFR_loss(reg_weight=ofr_wl3).to(self.device)
else:
self.cri_ofr = False
# configure FreezeD
if self.cri_gan:
self.setup_freezeD()
# prepare optimizers
self.setup_optimizers(opt_G_nets, opt_D_nets, init_setup=True)
# prepare schedulers
self.setup_schedulers()
# set gradients to zero
self.optimizer_G.zero_grad()
if self.cri_gan:
self.optimizer_D.zero_grad()
# init loss log
self.log_dict = OrderedDict()
# configure SWA
self.setup_swa()
# configure virtual batch
self.setup_virtual_batch()
# configure AMP
self.setup_amp()
# print network
# TODO: pass verbose flag from config file
self.print_network(verbose=False)
def feed_data(self, data, need_HR=True):
# data
if len(data['LR'].size()) == 4:
b, n_frames, h_lr, w_lr = data['LR'].size()
LR = data['LR'].view(b, -1, 1, h_lr, w_lr) # b, t, c, h, w
elif len(data['LR'].size()) == 5: # for networks that work with 3 channel images
if self.tensor_shape == 'CTHW':
_, _, n_frames, _, _ = data['LR'].size() # b, c, t, h, w
else:
# TCHW
_, n_frames, _, _, _ = data['LR'].size() # b, t, c, h, w
LR = data['LR']
self.idx_center = (n_frames - 1) // 2
self.n_frames = n_frames
# LR images (LR_y_cube)
self.var_L = LR.to(self.device)
# bicubic upscaled LR and RGB center HR
if isinstance(data['HR_center'], torch.Tensor):
self.real_H_center = data['HR_center'].to(self.device)
else:
self.real_H_center = None
if isinstance(data['LR_bicubic'], torch.Tensor):
self.var_LR_bic = data['LR_bicubic'].to(self.device)
else:
self.var_LR_bic = None
if need_HR: # train or val
# HR images
if len(data['HR'].size()) == 4:
HR = data['HR'].view(b, -1, 1, h_lr * self.scale, w_lr * self.scale) # b, t, c, h, w
elif len(data['HR'].size()) == 5: # for networks that work with 3 channel images
HR = data['HR'] # b, t, c, h, w
self.real_H = HR.to(self.device)
# discriminator references
input_ref = data.get('ref', data['HR'])
if len(input_ref.size()) == 4:
input_ref = input_ref.view(b, -1, 1, h_lr * self.scale, w_lr * self.scale) # b, t, c, h, w
self.var_ref = input_ref.to(self.device)
elif len(input_ref.size()) == 5: # for networks that work with 3 channel images
self.var_ref = input_ref.to(self.device)
def feed_data_batch(self, data, need_HR=True):
# TODO
# LR
self.var_L = data
def optimize_parameters(self, step):
"""Calculate losses, gradients, and update network weights;
called in every training iteration."""
eff_step = step/self.accumulations
# G
# freeze discriminator while generator is trained to prevent BP
if self.cri_gan:
self.requires_grad(self.netD, flag=False, net_type='D')
# Network forward, generate SR
with self.cast():
# inference
self.fake_H = self.netG(self.var_L)
if not isinstance(self.fake_H, torch.Tensor) and len(self.fake_H) == 4:
flow_L1, flow_L2, flow_L3, self.fake_H = self.fake_H
#/with self.cast():
# TODO: TMP test to view samples of the optical flows
# tmp_vis(self.real_H[:, self.idx_center, :, :, :], True)
# print(flow_L1[0].shape)
# tmp_vis(flow_L1[0][:, 0:1, :, :], to_np=True, rgb2bgr=False)
# tmp_vis(flow_L2[0][:, 0:1, :, :], to_np=True, rgb2bgr=False)
# tmp_vis(flow_L3[0][:, 0:1, :, :], to_np=True, rgb2bgr=False)
# tmp_vis_flow(flow_L1[0])
# tmp_vis_flow(flow_L2[0])
# tmp_vis_flow(flow_L3[0])
# calculate and log losses
loss_results = []
l_g_total = 0
# training generator and discriminator
# update generator (on its own if only training generator or alternatively if training GAN)
if (self.cri_gan is not True) or (step % self.D_update_ratio == 0 and step > self.D_init_iters):
with self.cast(): # Casts operations to mixed precision if enabled, else nullcontext
# get the central frame for SR losses
if isinstance(self.var_LR_bic, torch.Tensor) and isinstance(self.real_H_center, torch.Tensor):
# tmp_vis(ycbcr_to_rgb(self.var_LR_bic), True)
# print("fake_H:", self.fake_H.shape)
fake_H_cb = self.var_LR_bic[:, 1, :, :].to(self.device)
# print("fake_H_cb: ", fake_H_cb.shape)
fake_H_cr = self.var_LR_bic[:, 2, :, :].to(self.device)
# print("fake_H_cr: ", fake_H_cr.shape)
centralSR = ycbcr_to_rgb(torch.stack((self.fake_H.squeeze(1), fake_H_cb, fake_H_cr), -3))
# print("central rgb", centralSR.shape)
# tmp_vis(centralSR, True)
# centralHR = ycbcr_to_rgb(self.real_H_center) #Not needed, can send the rgb HR from dataloader
centralHR = self.real_H_center
# print(centralHR.shape)
# tmp_vis(centralHR)
else:
# if self.var_L.shape[2] == 1:
centralSR = self.fake_H
centralHR = self.real_H[:, :, self.idx_center, :, :] if self.tensor_shape == 'CTHW' else self.real_H[:, self.idx_center, :, :, :]
# tmp_vis(torch.cat((centralSR, centralHR), -1))
# regular losses
# loss_SR = criterion(self.fake_H, self.real_H[:, idx_center, :, :, :]) #torch.nn.MSELoss()
loss_results, self.log_dict = self.generatorlosses(
centralSR, centralHR, self.log_dict, self.f_low)
l_g_total += sum(loss_results)/self.accumulations
# optical flow reconstruction loss
# TODO: see if can be moved into loss file
# TODO 2: test if AMP could affect the loss due to loss of precision
if self.cri_ofr: # OFR_loss()
l_g_ofr = 0
for i in range(self.n_frames):
if i != self.idx_center:
loss_L1 = self.cri_ofr(
F.avg_pool2d(self.var_L[:, i, :, :, :], kernel_size=2),
F.avg_pool2d(self.var_L[:, self.idx_center, :, :, :], kernel_size=2),
flow_L1[i])
loss_L2 = self.cri_ofr(
self.var_L[:, i, :, :, :],
self.var_L[:, self.idx_center, :, :, :], flow_L2[i])
loss_L3 = self.cri_ofr(
self.real_H[:, i, :, :, :],
self.real_H[:, self.idx_center, :, :, :], flow_L3[i])
# ofr weights option. lambda2 = 0.2, lambda1 = 0.1 in the paper
l_g_ofr += loss_L3 + self.ofr_wl2 * loss_L2 + self.ofr_wl1 * loss_L1
# ofr weight option. lambda4 = 0.01 in the paper
l_g_ofr = self.ofr_weight * l_g_ofr / (self.n_frames - 1)
self.log_dict['ofr'] = l_g_ofr.item()
l_g_total += l_g_ofr/self.accumulations
if self.cri_gan:
# adversarial loss
l_g_gan = self.adversarial(
centralSR, centralHR, netD=self.netD,
stage='generator', fsfilter = self.f_high) # (sr, hr)
self.log_dict['l_g_gan'] = l_g_gan.item()
l_g_total += l_g_gan/self.accumulations
#/with self.cast():
# high precision generator losses (can be affected by AMP half precision)
if self.generatorlosses.precise_loss_list:
loss_results, self.log_dict = self.generatorlosses(
centralSR, centralHR, self.log_dict, self.f_low,
precise=True)
l_g_total += sum(loss_results)/self.accumulations
# calculate G gradients
self.calc_gradients(l_g_total)
# step G optimizer
self.optimizer_step(step, self.optimizer_G, "G")
if self.cri_gan:
# update discriminator
# unfreeze discriminator
for p in self.netD.parameters():
p.requires_grad = True
l_d_total = 0
with self.cast(): # Casts operations to mixed precision if enabled, else nullcontext
l_d_total, gan_logs = self.adversarial(
centralSR, centralHR, netD=self.netD,
stage='discriminator', fsfilter = self.f_high) # (sr, hr)
for g_log in gan_logs:
self.log_dict[g_log] = gan_logs[g_log]
l_d_total /= self.accumulations
# /with autocast():
# calculate G gradients
self.calc_gradients(l_d_total)
# step D optimizer
self.optimizer_step(step, self.optimizer_D, "D")
def test(self):
# TODO: test/val code
self.netG.eval()
with torch.no_grad():
if self.is_train:
self.fake_H = self.netG(self.var_L)
if len(self.fake_H) == 4:
_, _, _, self.fake_H = self.fake_H
else:
# self.fake_H = self.netG(self.var_L, isTest=True)
self.fake_H = self.netG(self.var_L)
if len(self.fake_H) == 4:
_, _, _, self.fake_H = self.fake_H
self.netG.train()
def get_current_log(self):
return self.log_dict
def get_current_visuals(self, need_HR=True):
# TODO: temporal considerations
out_dict = OrderedDict()
out_dict['LR'] = self.var_L.detach()[0].float().cpu()
out_dict['SR'] = self.fake_H.detach()[0].float().cpu()
if need_HR:
out_dict['HR'] = self.real_H.detach()[0].float().cpu()
return out_dict
def get_current_visuals_batch(self, need_HR=True):
# TODO: temporal considerations
out_dict = OrderedDict()
out_dict['LR'] = self.var_L.detach().float().cpu()
out_dict['SR'] = self.fake_H.detach().float().cpu()
if need_HR:
out_dict['HR'] = self.real_H.detach().float().cpu()
return out_dict
| from __future__ import absolute_import
import os
import logging
from collections import OrderedDict
import torch
import torch.nn as nn
import models.networks as networks
from .base_model import BaseModel
from . import losses
from dataops.colors import ycbcr_to_rgb
import torch.nn.functional as F
from dataops.debug import tmp_vis, tmp_vis_flow, describe_numpy, describe_tensor
logger = logging.getLogger('base')
class VSRModel(BaseModel):
def __init__(self, opt):
super(VSRModel, self).__init__(opt)
train_opt = opt['train']
self.scale = opt.get('scale', 4)
self.tensor_shape = opt.get('tensor_shape', 'TCHW')
# specify the models you want to load/save to the disk.
# The training/test scripts will call <BaseModel.save_networks>
# and <BaseModel.load_networks>
# for training and testing, a generator 'G' is needed
self.model_names = ['G']
# define networks and load pretrained models
self.netG = networks.define_G(opt).to(self.device) # G
if self.is_train:
self.netG.train()
opt_G_nets = [self.netG]
opt_D_nets = []
if train_opt['gan_weight']:
self.model_names.append('D') # add discriminator to the network list
self.netD = networks.define_D(opt).to(self.device) # D
self.netD.train()
opt_D_nets.append(self.netD)
self.load() # load G and D if needed
# define losses, optimizer, scheduler and other components
if self.is_train:
# setup network cap
# define if the generator will have a final
# capping mechanism in the output
self.outm = train_opt.get('finalcap', None)
# setup frequency separation
self.setup_fs()
# initialize losses
# generator losses:
self.generatorlosses = losses.GeneratorLoss(opt, self.device)
# TODO: show the configured losses names in logger
# print(self.generatorlosses.loss_list)
# discriminator loss:
self.setup_gan()
# Optical Flow Reconstruction loss:
ofr_type = train_opt.get('ofr_type', None)
ofr_weight = train_opt.get('ofr_weight', [0.1, 0.2, 0.1, 0.01])
if ofr_type and ofr_weight:
self.ofr_weight = ofr_weight[3] #lambda 4
self.ofr_wl1 = ofr_weight[0] #lambda 1
self.ofr_wl2 = ofr_weight[1] #lambda 2
ofr_wl3 = ofr_weight[2] #lambda 3
if ofr_type == 'ofr':
from models.modules.loss import OFR_loss
#TODO: make the regularization weight an option. lambda3 = 0.1
self.cri_ofr = OFR_loss(reg_weight=ofr_wl3).to(self.device)
else:
self.cri_ofr = False
# configure FreezeD
if self.cri_gan:
self.setup_freezeD()
# prepare optimizers
self.setup_optimizers(opt_G_nets, opt_D_nets, init_setup=True)
# prepare schedulers
self.setup_schedulers()
# set gradients to zero
self.optimizer_G.zero_grad()
if self.cri_gan:
self.optimizer_D.zero_grad()
# init loss log
self.log_dict = OrderedDict()
# configure SWA
self.setup_swa()
# configure virtual batch
self.setup_virtual_batch()
# configure AMP
self.setup_amp()
# print network
# TODO: pass verbose flag from config file
self.print_network(verbose=False)
def feed_data(self, data, need_HR=True):
# data
if len(data['LR'].size()) == 4:
b, n_frames, h_lr, w_lr = data['LR'].size()
LR = data['LR'].view(b, -1, 1, h_lr, w_lr) # b, t, c, h, w
elif len(data['LR'].size()) == 5: # for networks that work with 3 channel images
if self.tensor_shape == 'CTHW':
_, _, n_frames, _, _ = data['LR'].size() # b, c, t, h, w
else:
# TCHW
_, n_frames, _, _, _ = data['LR'].size() # b, t, c, h, w
LR = data['LR']
self.idx_center = (n_frames - 1) // 2
self.n_frames = n_frames
# LR images (LR_y_cube)
self.var_L = LR.to(self.device)
# bicubic upscaled LR and RGB center HR
if isinstance(data['HR_center'], torch.Tensor):
self.real_H_center = data['HR_center'].to(self.device)
else:
self.real_H_center = None
if isinstance(data['LR_bicubic'], torch.Tensor):
self.var_LR_bic = data['LR_bicubic'].to(self.device)
else:
self.var_LR_bic = None
if need_HR: # train or val
# HR images
if len(data['HR'].size()) == 4:
HR = data['HR'].view(b, -1, 1, h_lr * self.scale, w_lr * self.scale) # b, t, c, h, w
elif len(data['HR'].size()) == 5: # for networks that work with 3 channel images
HR = data['HR'] # b, t, c, h, w
self.real_H = HR.to(self.device)
# discriminator references
input_ref = data.get('ref', data['HR'])
if len(input_ref.size()) == 4:
input_ref = input_ref.view(b, -1, 1, h_lr * self.scale, w_lr * self.scale) # b, t, c, h, w
self.var_ref = input_ref.to(self.device)
elif len(input_ref.size()) == 5: # for networks that work with 3 channel images
self.var_ref = input_ref.to(self.device)
def feed_data_batch(self, data, need_HR=True):
# TODO
# LR
self.var_L = data
def optimize_parameters(self, step):
"""Calculate losses, gradients, and update network weights;
called in every training iteration."""
eff_step = step/self.accumulations
# G
# freeze discriminator while generator is trained to prevent BP
if self.cri_gan:
self.requires_grad(self.netD, flag=False, net_type='D')
# Network forward, generate SR
with self.cast():
# inference
self.fake_H = self.netG(self.var_L)
if not isinstance(self.fake_H, torch.Tensor) and len(self.fake_H) == 4:
flow_L1, flow_L2, flow_L3, self.fake_H = self.fake_H
#/with self.cast():
# TODO: TMP test to view samples of the optical flows
# tmp_vis(self.real_H[:, self.idx_center, :, :, :], True)
# print(flow_L1[0].shape)
# tmp_vis(flow_L1[0][:, 0:1, :, :], to_np=True, rgb2bgr=False)
# tmp_vis(flow_L2[0][:, 0:1, :, :], to_np=True, rgb2bgr=False)
# tmp_vis(flow_L3[0][:, 0:1, :, :], to_np=True, rgb2bgr=False)
# tmp_vis_flow(flow_L1[0])
# tmp_vis_flow(flow_L2[0])
# tmp_vis_flow(flow_L3[0])
# calculate and log losses
loss_results = []
l_g_total = 0
# training generator and discriminator
# update generator (on its own if only training generator or alternatively if training GAN)
if (self.cri_gan is not True) or (step % self.D_update_ratio == 0 and step > self.D_init_iters):
with self.cast(): # Casts operations to mixed precision if enabled, else nullcontext
# get the central frame for SR losses
if isinstance(self.var_LR_bic, torch.Tensor) and isinstance(self.real_H_center, torch.Tensor):
# tmp_vis(ycbcr_to_rgb(self.var_LR_bic), True)
# print("fake_H:", self.fake_H.shape)
fake_H_cb = self.var_LR_bic[:, 1, :, :].to(self.device)
# print("fake_H_cb: ", fake_H_cb.shape)
fake_H_cr = self.var_LR_bic[:, 2, :, :].to(self.device)
# print("fake_H_cr: ", fake_H_cr.shape)
centralSR = ycbcr_to_rgb(torch.stack((self.fake_H.squeeze(1), fake_H_cb, fake_H_cr), -3))
# print("central rgb", centralSR.shape)
# tmp_vis(centralSR, True)
# centralHR = ycbcr_to_rgb(self.real_H_center) #Not needed, can send the rgb HR from dataloader
centralHR = self.real_H_center
# print(centralHR.shape)
# tmp_vis(centralHR)
else:
# if self.var_L.shape[2] == 1:
centralSR = self.fake_H
centralHR = self.real_H[:, :, self.idx_center, :, :] if self.tensor_shape == 'CTHW' else self.real_H[:, self.idx_center, :, :, :]
# tmp_vis(torch.cat((centralSR, centralHR), -1))
# regular losses
# loss_SR = criterion(self.fake_H, self.real_H[:, idx_center, :, :, :]) #torch.nn.MSELoss()
loss_results, self.log_dict = self.generatorlosses(
centralSR, centralHR, self.log_dict, self.f_low)
l_g_total += sum(loss_results)/self.accumulations
# optical flow reconstruction loss
# TODO: see if can be moved into loss file
# TODO 2: test if AMP could affect the loss due to loss of precision
if self.cri_ofr: # OFR_loss()
l_g_ofr = 0
for i in range(self.n_frames):
if i != self.idx_center:
loss_L1 = self.cri_ofr(
F.avg_pool2d(self.var_L[:, i, :, :, :], kernel_size=2),
F.avg_pool2d(self.var_L[:, self.idx_center, :, :, :], kernel_size=2),
flow_L1[i])
loss_L2 = self.cri_ofr(
self.var_L[:, i, :, :, :],
self.var_L[:, self.idx_center, :, :, :], flow_L2[i])
loss_L3 = self.cri_ofr(
self.real_H[:, i, :, :, :],
self.real_H[:, self.idx_center, :, :, :], flow_L3[i])
# ofr weights option. lambda2 = 0.2, lambda1 = 0.1 in the paper
l_g_ofr += loss_L3 + self.ofr_wl2 * loss_L2 + self.ofr_wl1 * loss_L1
# ofr weight option. lambda4 = 0.01 in the paper
l_g_ofr = self.ofr_weight * l_g_ofr / (self.n_frames - 1)
self.log_dict['ofr'] = l_g_ofr.item()
l_g_total += l_g_ofr/self.accumulations
if self.cri_gan:
# adversarial loss
l_g_gan = self.adversarial(
centralSR, centralHR, netD=self.netD,
stage='generator', fsfilter = self.f_high) # (sr, hr)
self.log_dict['l_g_gan'] = l_g_gan.item()
l_g_total += l_g_gan/self.accumulations
#/with self.cast():
# high precision generator losses (can be affected by AMP half precision)
if self.generatorlosses.precise_loss_list:
loss_results, self.log_dict = self.generatorlosses(
centralSR, centralHR, self.log_dict, self.f_low,
precise=True)
l_g_total += sum(loss_results)/self.accumulations
# calculate G gradients
self.calc_gradients(l_g_total)
# step G optimizer
self.optimizer_step(step, self.optimizer_G, "G")
if self.cri_gan:
# update discriminator
# unfreeze discriminator
for p in self.netD.parameters():
p.requires_grad = True
l_d_total = 0
with self.cast(): # Casts operations to mixed precision if enabled, else nullcontext
l_d_total, gan_logs = self.adversarial(
centralSR, centralHR, netD=self.netD,
stage='discriminator', fsfilter = self.f_high) # (sr, hr)
for g_log in gan_logs:
self.log_dict[g_log] = gan_logs[g_log]
l_d_total /= self.accumulations
# /with autocast():
# calculate G gradients
self.calc_gradients(l_d_total)
# step D optimizer
self.optimizer_step(step, self.optimizer_D, "D")
def test(self):
# TODO: test/val code
self.netG.eval()
with torch.no_grad():
if self.is_train:
self.fake_H = self.netG(self.var_L)
if len(self.fake_H) == 4:
_, _, _, self.fake_H = self.fake_H
else:
# self.fake_H = self.netG(self.var_L, isTest=True)
self.fake_H = self.netG(self.var_L)
if len(self.fake_H) == 4:
_, _, _, self.fake_H = self.fake_H
self.netG.train()
def get_current_log(self):
return self.log_dict
def get_current_visuals(self, need_HR=True):
# TODO: temporal considerations
out_dict = OrderedDict()
out_dict['LR'] = self.var_L.detach()[0].float().cpu()
out_dict['SR'] = self.fake_H.detach()[0].float().cpu()
if need_HR:
out_dict['HR'] = self.real_H.detach()[0].float().cpu()
return out_dict
def get_current_visuals_batch(self, need_HR=True):
# TODO: temporal considerations
out_dict = OrderedDict()
out_dict['LR'] = self.var_L.detach().float().cpu()
out_dict['SR'] = self.fake_H.detach().float().cpu()
if need_HR:
out_dict['HR'] = self.real_H.detach().float().cpu()
return out_dict
| en | 0.606275 | # specify the models you want to load/save to the disk. # The training/test scripts will call <BaseModel.save_networks> # and <BaseModel.load_networks> # for training and testing, a generator 'G' is needed # define networks and load pretrained models # G # add discriminator to the network list # D # load G and D if needed # define losses, optimizer, scheduler and other components # setup network cap # define if the generator will have a final # capping mechanism in the output # setup frequency separation # initialize losses # generator losses: # TODO: show the configured losses names in logger # print(self.generatorlosses.loss_list) # discriminator loss: # Optical Flow Reconstruction loss: #lambda 4 #lambda 1 #lambda 2 #lambda 3 #TODO: make the regularization weight an option. lambda3 = 0.1 # configure FreezeD # prepare optimizers # prepare schedulers # set gradients to zero # init loss log # configure SWA # configure virtual batch # configure AMP # print network # TODO: pass verbose flag from config file # data # b, t, c, h, w # for networks that work with 3 channel images # b, c, t, h, w # TCHW # b, t, c, h, w # LR images (LR_y_cube) # bicubic upscaled LR and RGB center HR # train or val # HR images # b, t, c, h, w # for networks that work with 3 channel images # b, t, c, h, w # discriminator references # b, t, c, h, w # for networks that work with 3 channel images # TODO # LR Calculate losses, gradients, and update network weights; called in every training iteration. # G # freeze discriminator while generator is trained to prevent BP # Network forward, generate SR # inference #/with self.cast(): # TODO: TMP test to view samples of the optical flows # tmp_vis(self.real_H[:, self.idx_center, :, :, :], True) # print(flow_L1[0].shape) # tmp_vis(flow_L1[0][:, 0:1, :, :], to_np=True, rgb2bgr=False) # tmp_vis(flow_L2[0][:, 0:1, :, :], to_np=True, rgb2bgr=False) # tmp_vis(flow_L3[0][:, 0:1, :, :], to_np=True, rgb2bgr=False) # tmp_vis_flow(flow_L1[0]) # tmp_vis_flow(flow_L2[0]) # tmp_vis_flow(flow_L3[0]) # calculate and log losses # training generator and discriminator # update generator (on its own if only training generator or alternatively if training GAN) # Casts operations to mixed precision if enabled, else nullcontext # get the central frame for SR losses # tmp_vis(ycbcr_to_rgb(self.var_LR_bic), True) # print("fake_H:", self.fake_H.shape) # print("fake_H_cb: ", fake_H_cb.shape) # print("fake_H_cr: ", fake_H_cr.shape) # print("central rgb", centralSR.shape) # tmp_vis(centralSR, True) # centralHR = ycbcr_to_rgb(self.real_H_center) #Not needed, can send the rgb HR from dataloader # print(centralHR.shape) # tmp_vis(centralHR) # if self.var_L.shape[2] == 1: # tmp_vis(torch.cat((centralSR, centralHR), -1)) # regular losses # loss_SR = criterion(self.fake_H, self.real_H[:, idx_center, :, :, :]) #torch.nn.MSELoss() # optical flow reconstruction loss # TODO: see if can be moved into loss file # TODO 2: test if AMP could affect the loss due to loss of precision # OFR_loss() # ofr weights option. lambda2 = 0.2, lambda1 = 0.1 in the paper # ofr weight option. lambda4 = 0.01 in the paper # adversarial loss # (sr, hr) #/with self.cast(): # high precision generator losses (can be affected by AMP half precision) # calculate G gradients # step G optimizer # update discriminator # unfreeze discriminator # Casts operations to mixed precision if enabled, else nullcontext # (sr, hr) # /with autocast(): # calculate G gradients # step D optimizer # TODO: test/val code # self.fake_H = self.netG(self.var_L, isTest=True) # TODO: temporal considerations # TODO: temporal considerations | 2.180042 | 2 |
tests/test_arrays.py | ritabt/petra | 0 | 6623816 | <filename>tests/test_arrays.py
from typing import cast, Callable
import subprocess
import petra as pt
import unittest
from ctypes import CFUNCTYPE, c_int32
program = pt.Program("module")
My_Array = pt.ArrayType(pt.Int32_t, 3)
array_var = pt.Symbol(My_Array, "array_var")
program.add_func(
"array_set_get_values",
(),
pt.Int32_t,
pt.Block(
[
pt.DefineVar(array_var),
pt.Assign(
pt.Var(array_var), pt.SetElement(pt.Var(array_var), pt.Int32(1), 0)
),
pt.Assign(
pt.Var(array_var), pt.SetElement(pt.Var(array_var), pt.Int32(2), 1)
),
pt.Assign(
pt.Var(array_var), pt.SetElement(pt.Var(array_var), pt.Int32(3), 2)
),
pt.Return(
pt.Add(
pt.GetElement(pt.Var(array_var), 0),
pt.Add(
pt.GetElement(pt.Var(array_var), 1),
pt.GetElement(pt.Var(array_var), 2),
),
)
),
]
),
)
class ArraysTestCase(unittest.TestCase):
def setUp(self) -> None:
self.engine = program.compile()
array_set_get_values = self.engine.get_function_address("array_set_get_values")
self.array_set_get_values = cast(
Callable[[], int], CFUNCTYPE(c_int32)(array_set_get_values)
)
def test_array_set_get_values(self) -> None:
self.assertEqual(self.array_set_get_values(), 6)
| <filename>tests/test_arrays.py
from typing import cast, Callable
import subprocess
import petra as pt
import unittest
from ctypes import CFUNCTYPE, c_int32
program = pt.Program("module")
My_Array = pt.ArrayType(pt.Int32_t, 3)
array_var = pt.Symbol(My_Array, "array_var")
program.add_func(
"array_set_get_values",
(),
pt.Int32_t,
pt.Block(
[
pt.DefineVar(array_var),
pt.Assign(
pt.Var(array_var), pt.SetElement(pt.Var(array_var), pt.Int32(1), 0)
),
pt.Assign(
pt.Var(array_var), pt.SetElement(pt.Var(array_var), pt.Int32(2), 1)
),
pt.Assign(
pt.Var(array_var), pt.SetElement(pt.Var(array_var), pt.Int32(3), 2)
),
pt.Return(
pt.Add(
pt.GetElement(pt.Var(array_var), 0),
pt.Add(
pt.GetElement(pt.Var(array_var), 1),
pt.GetElement(pt.Var(array_var), 2),
),
)
),
]
),
)
class ArraysTestCase(unittest.TestCase):
def setUp(self) -> None:
self.engine = program.compile()
array_set_get_values = self.engine.get_function_address("array_set_get_values")
self.array_set_get_values = cast(
Callable[[], int], CFUNCTYPE(c_int32)(array_set_get_values)
)
def test_array_set_get_values(self) -> None:
self.assertEqual(self.array_set_get_values(), 6)
| none | 1 | 2.335365 | 2 | |
output/models/nist_data/atomic/nmtoken/schema_instance/nistschema_sv_iv_atomic_nmtoken_pattern_3_xsd/nistschema_sv_iv_atomic_nmtoken_pattern_3.py | tefra/xsdata-w3c-tests | 1 | 6623817 | <gh_stars>1-10
from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-NMTOKEN-pattern-3-NS"
@dataclass
class NistschemaSvIvAtomicNmtokenPattern3:
class Meta:
name = "NISTSchema-SV-IV-atomic-NMTOKEN-pattern-3"
namespace = "NISTSchema-SV-IV-atomic-NMTOKEN-pattern-3-NS"
value: str = field(
default="",
metadata={
"required": True,
"pattern": r"\c{6}",
}
)
| from dataclasses import dataclass, field
__NAMESPACE__ = "NISTSchema-SV-IV-atomic-NMTOKEN-pattern-3-NS"
@dataclass
class NistschemaSvIvAtomicNmtokenPattern3:
class Meta:
name = "NISTSchema-SV-IV-atomic-NMTOKEN-pattern-3"
namespace = "NISTSchema-SV-IV-atomic-NMTOKEN-pattern-3-NS"
value: str = field(
default="",
metadata={
"required": True,
"pattern": r"\c{6}",
}
) | none | 1 | 1.803223 | 2 | |
BPt/main/helpers.py | sahahn/ABCD_ML | 1 | 6623818 | <reponame>sahahn/ABCD_ML
def clean_str(in_str):
# If float input, want to
# represent without decimals if
# they are just 0's
if isinstance(in_str, float):
as_int_str = f'{in_str:.0f}'
if float(as_int_str) == in_str:
in_str = as_int_str
# Make sure str
in_str = str(in_str)
# Get rid of some common repr issues
in_str = in_str.replace('"', '')
in_str = in_str.replace("'", '')
return in_str
| def clean_str(in_str):
# If float input, want to
# represent without decimals if
# they are just 0's
if isinstance(in_str, float):
as_int_str = f'{in_str:.0f}'
if float(as_int_str) == in_str:
in_str = as_int_str
# Make sure str
in_str = str(in_str)
# Get rid of some common repr issues
in_str = in_str.replace('"', '')
in_str = in_str.replace("'", '')
return in_str | en | 0.895567 | # If float input, want to # represent without decimals if # they are just 0's # Make sure str # Get rid of some common repr issues | 3.472411 | 3 |
setup.py | rpappalax/box-it-up | 0 | 6623819 | #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = "box-it-up",
version = "0.0.3",
description = "Python class for formatting various kinds of table data into an ascii table.",
author = "<NAME>",
author_email = "<EMAIL>",
url = "https://github.com/rpappalax/box-it-up",
install_requires = [],
packages = find_packages(),
keywords = ['testing', 'logging', 'reporting', 'stats', 'table'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
]
)
| #!/usr/bin/env python
from setuptools import setup, find_packages
setup(
name = "box-it-up",
version = "0.0.3",
description = "Python class for formatting various kinds of table data into an ascii table.",
author = "<NAME>",
author_email = "<EMAIL>",
url = "https://github.com/rpappalax/box-it-up",
install_requires = [],
packages = find_packages(),
keywords = ['testing', 'logging', 'reporting', 'stats', 'table'],
classifiers=[
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.2",
"Programming Language :: Python :: 3.3",
]
)
| ru | 0.26433 | #!/usr/bin/env python | 1.719566 | 2 |
providers/poczta.py | krzynio/pl-packagetrack | 7 | 6623820 | <filename>providers/poczta.py
#!/usr/bin/env python
import requests
import os, sys
from pyquery import PyQuery as pq
import time
import logging
import dateparser
import re
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from models import trackingStatus,trackingEvent
NAME = "<NAME>"
ID = __name__[10:]
POPULARITY = 10
def guess(number):
if re.search(r"^[A-Z]{2}\d{9}[A-Z]{2}$", number): # International Postal Union
return True
return len(number) == 20 # domestic
def track(number):
r = requests.get("http://emonitoring.poczta-polska.pl/")
cookies = r.cookies
session_id = r.cookies['PHPSESSID']
r = requests.post("http://emonitoring.poczta-polska.pl/wssClient.php",
headers = {
'Referer': "http://emonitoring.poczta-polska.pl/",
'User-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
},
data = {
'n': number,
's': session_id
},
cookies = cookies
)
d = pq(r.text)
table = d('table#zadarzenia_td')
events = []
status = "TRANSIT"
i = 0
for row in table('tr').items():
if i > 0:
l = [t.text() for t in (row('td').items())]
adr = row('td a.jedn').attr('title')
if adr and '|' in adr:
l.append(', '.join(adr.split('|')[0:2]))
if l:
d = dateparser.parse(l[1], settings={'DATE_ORDER': 'YMD'})
if len(l) == 4:
l[2] = "%s - %s" % (l[2], l[3])
events.append(trackingEvent(d, l[2], l[0]))
if re.search("(Odebrano|Doręczono)", l[0]):
status = "DELIVERED"
i = i + 1
if len(events) > 0:
return trackingStatus(number, ID, status, events[::-1])
else:
return trackingStatus(number, ID, 'NOTFOUND', [])
| <filename>providers/poczta.py
#!/usr/bin/env python
import requests
import os, sys
from pyquery import PyQuery as pq
import time
import logging
import dateparser
import re
sys.path.insert(1, os.path.join(sys.path[0], '..'))
from models import trackingStatus,trackingEvent
NAME = "<NAME>"
ID = __name__[10:]
POPULARITY = 10
def guess(number):
if re.search(r"^[A-Z]{2}\d{9}[A-Z]{2}$", number): # International Postal Union
return True
return len(number) == 20 # domestic
def track(number):
r = requests.get("http://emonitoring.poczta-polska.pl/")
cookies = r.cookies
session_id = r.cookies['PHPSESSID']
r = requests.post("http://emonitoring.poczta-polska.pl/wssClient.php",
headers = {
'Referer': "http://emonitoring.poczta-polska.pl/",
'User-agent': "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_4) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36",
},
data = {
'n': number,
's': session_id
},
cookies = cookies
)
d = pq(r.text)
table = d('table#zadarzenia_td')
events = []
status = "TRANSIT"
i = 0
for row in table('tr').items():
if i > 0:
l = [t.text() for t in (row('td').items())]
adr = row('td a.jedn').attr('title')
if adr and '|' in adr:
l.append(', '.join(adr.split('|')[0:2]))
if l:
d = dateparser.parse(l[1], settings={'DATE_ORDER': 'YMD'})
if len(l) == 4:
l[2] = "%s - %s" % (l[2], l[3])
events.append(trackingEvent(d, l[2], l[0]))
if re.search("(Odebrano|Doręczono)", l[0]):
status = "DELIVERED"
i = i + 1
if len(events) > 0:
return trackingStatus(number, ID, status, events[::-1])
else:
return trackingStatus(number, ID, 'NOTFOUND', [])
| en | 0.287577 | #!/usr/bin/env python # International Postal Union # domestic #zadarzenia_td') | 2.412727 | 2 |
asteroid.py | penguintutor/pico-spacegame | 2 | 6623821 | import utime
from constants import *
class Asteroid:
def __init__ (self, display, start_time, image_size, start_pos, velocity, color=(150, 75, 0)):
self.display = display
if (image_size == "asteroid_sml"):
self.size = 5
elif (image_size == "asteroid_med"):
self.size = 8
else:
self.size = 12
self.start_pos = start_pos
#self.x = start_pos[0]
#self.y = start_pos[1]
# start position is off screen
self.x = -20
self.y = -20
self.start_time = start_time
self.velocity = velocity
self.color = color
self.status = STATUS_WAITING
def draw(self, display_buffer):
if self.status != STATUS_VISIBLE:
return
self.display.set_pen(*self.color)
self.display.circle(int(self.x), int(self.y), self.size)
def update(self, level_time):
if self.status == STATUS_WAITING:
# Check if time reached
if (utime.time() > level_time + self.start_time):
#print ("Starting new asteroid")
# Reset to start position
self.x = self.start_pos[0]
self.y = self.start_pos[1]
self.status = STATUS_VISIBLE
elif self.status == STATUS_VISIBLE:
self.y+=self.velocity
def reset(self):
self.status = STATUS_WAITING
def hit(self):
self.status = STATUS_DESTROYED
def collidepoint (self, point_x, point_y):
# simplified check based on rect around centre of asteroid
if (point_x > (self.x - self.size) and point_x < (self.x + self.size) and point_y > (self.y - self.size) and point_y < (self.y + self.size)) :
return True
return False | import utime
from constants import *
class Asteroid:
def __init__ (self, display, start_time, image_size, start_pos, velocity, color=(150, 75, 0)):
self.display = display
if (image_size == "asteroid_sml"):
self.size = 5
elif (image_size == "asteroid_med"):
self.size = 8
else:
self.size = 12
self.start_pos = start_pos
#self.x = start_pos[0]
#self.y = start_pos[1]
# start position is off screen
self.x = -20
self.y = -20
self.start_time = start_time
self.velocity = velocity
self.color = color
self.status = STATUS_WAITING
def draw(self, display_buffer):
if self.status != STATUS_VISIBLE:
return
self.display.set_pen(*self.color)
self.display.circle(int(self.x), int(self.y), self.size)
def update(self, level_time):
if self.status == STATUS_WAITING:
# Check if time reached
if (utime.time() > level_time + self.start_time):
#print ("Starting new asteroid")
# Reset to start position
self.x = self.start_pos[0]
self.y = self.start_pos[1]
self.status = STATUS_VISIBLE
elif self.status == STATUS_VISIBLE:
self.y+=self.velocity
def reset(self):
self.status = STATUS_WAITING
def hit(self):
self.status = STATUS_DESTROYED
def collidepoint (self, point_x, point_y):
# simplified check based on rect around centre of asteroid
if (point_x > (self.x - self.size) and point_x < (self.x + self.size) and point_y > (self.y - self.size) and point_y < (self.y + self.size)) :
return True
return False | en | 0.790236 | #self.x = start_pos[0] #self.y = start_pos[1] # start position is off screen # Check if time reached #print ("Starting new asteroid") # Reset to start position # simplified check based on rect around centre of asteroid | 3.154644 | 3 |
train_thu.py | MengyuanChen21/CVPR2022-FTCL | 2 | 6623822 | <gh_stars>1-10
from tqdm import tqdm
import numpy as np
import torch
def train(args, model, dataloader, pair_dataloader, criterion, optimizer):
model.train()
print("-------------------------------------------------------------------------------")
device = args.device
# train_process
train_num_correct = 0
train_num_total = 0
loss_stack = []
acm_loss_stack = []
act_inst_loss_stack = []
act_cont_loss_stack = []
act_back_loss_stack = []
guide_loss_stack = []
att_loss_stack = []
feat_loss_stack = []
lcs_loss_stack = []
fsd_loss_stack = []
if not args.ftcl:
for input_feature, vid_label_t in tqdm(dataloader):
vid_label_t = vid_label_t.to(device)
input_feature = input_feature.to(device)
act_inst_cls, act_cont_cls, act_back_cls, \
act_inst_feat, act_cont_feat, act_back_feat, \
temp_att, act_inst_cas, _, _, _, \
lcs_candi, fsd_act_candi, fsd_bak_candi = model(input_feature)
loss, loss_dict = criterion(act_inst_cls, act_cont_cls, act_back_cls, vid_label_t, temp_att,
act_inst_feat, act_cont_feat, act_back_feat, act_inst_cas,
lcs_candi, fsd_act_candi, fsd_bak_candi, args)
optimizer.zero_grad()
if not torch.isnan(loss):
loss.backward()
optimizer.step()
with torch.no_grad():
fg_score = act_inst_cls[:, :args.action_cls_num]
label_np = vid_label_t.cpu().numpy()
score_np = fg_score.cpu().numpy()
pred_np = np.zeros_like(score_np)
pred_np[score_np >= args.cls_threshold] = 1
pred_np[score_np < args.cls_threshold] = 0
correct_pred = np.sum(label_np == pred_np, axis=1)
train_num_correct += np.sum((correct_pred == args.action_cls_num))
train_num_total += correct_pred.shape[0]
loss_stack.append(loss.cpu().item())
act_inst_loss_stack.append(loss_dict["act_inst_loss"])
act_cont_loss_stack.append(loss_dict["act_cont_loss"])
act_back_loss_stack.append(loss_dict["act_back_loss"])
guide_loss_stack.append(loss_dict["guide_loss"])
feat_loss_stack.append(loss_dict["feat_loss"])
att_loss_stack.append(loss_dict["sparse_loss"])
acm_loss_stack.append(loss_dict["acm_loss"])
lcs_loss_stack.append(loss_dict["lcs_loss"])
fsd_loss_stack.append(loss_dict["fsd_loss"])
train_acc = train_num_correct / train_num_total
train_log_dict = {"train_act_inst_cls_loss": np.mean(act_inst_loss_stack),
"train_act_cont_cls_loss": np.mean(act_cont_loss_stack),
"train_act_back_cls_loss": np.mean(act_back_loss_stack),
"train_guide_loss": np.mean(guide_loss_stack),
"train_feat_loss": np.mean(feat_loss_stack),
"train_att_loss": np.mean(att_loss_stack),
"train_acm_loss": np.mean(acm_loss_stack),
"train_lcs_loss": np.mean(lcs_loss_stack),
"train_fsd_loss": np.mean(fsd_loss_stack),
"train_loss": np.mean(loss_stack),
"train_acc": train_acc}
print("")
print("train_act_inst_cls_loss:{:.3f} train_act_cont_cls_loss:{:.3f}".format(np.mean(act_inst_loss_stack),
np.mean(act_cont_loss_stack)))
print("train_act_back_cls_loss:{:.3f} train_att_loss:{:.3f}".format(np.mean(act_back_loss_stack),
np.mean(att_loss_stack)))
print("train_feat_loss: {:.3f} train_loss:{:.3f}".format(np.mean(feat_loss_stack), np.mean(loss_stack)))
print("train acc:{:.3f}".format(train_acc))
print("-------------------------------------------------------------------------------")
return train_log_dict
else:
for input_feature_1, input_feature_2, vid_label_1, vid_label_2 in tqdm(pair_dataloader):
vid_label_1 = vid_label_1.to(device)
vid_label_2 = vid_label_2.to(device)
input_feature_1 = input_feature_1.to(device)
input_feature_2 = input_feature_2.to(device)
output_1, output_2 = model(args.ftcl, input_feature_1, input_feature_2)
act_inst_cls_1, act_cont_cls_1, act_back_cls_1, act_inst_feat_1, act_cont_feat_1, act_back_feat_1, \
temp_att_1, act_inst_cas_1, act_cas_1, act_cont_cas_1, act_back_cas_1, \
candi_for_dp_1, act_candi_for_nw_1, bak_candi_for_nw_1 = output_1
act_inst_cls_2, act_cont_cls_2, act_back_cls_2, act_inst_feat_2, act_cont_feat_2, act_back_feat_2, \
temp_att_2, act_inst_cas_2, act_cas_2, act_cont_cas_2, act_back_cas_2, \
candi_for_dp_2, act_candi_for_nw_2, bak_candi_for_nw_2 = output_2
loss, loss_dict = criterion(act_inst_cls_1, act_cont_cls_1, act_back_cls_1, vid_label_1, temp_att_1,
act_inst_feat_1, act_cont_feat_1, act_back_feat_1, act_inst_cas_1,
candi_for_dp_1, act_candi_for_nw_1, bak_candi_for_nw_1,
args,
act_inst_cls_2, act_cont_cls_2, act_back_cls_2, vid_label_2, temp_att_2,
act_inst_feat_2, act_cont_feat_2, act_back_feat_2, act_inst_cas_2,
candi_for_dp_2, act_candi_for_nw_2, bak_candi_for_nw_2,
)
optimizer.zero_grad()
if not torch.isnan(loss):
loss.backward()
optimizer.step()
with torch.no_grad():
fg_score_1 = act_inst_cls_1[:, :args.action_cls_num]
fg_score_2 = act_inst_cls_2[:, :args.action_cls_num]
label_np_1 = vid_label_1.cpu().numpy()
label_np_2 = vid_label_2.cpu().numpy()
score_np_1 = fg_score_1.cpu().numpy()
score_np_2 = fg_score_2.cpu().numpy()
pred_np_1 = np.zeros_like(score_np_1)
pred_np_2 = np.zeros_like(score_np_2)
pred_np_1[score_np_1 >= args.cls_threshold] = 1
pred_np_2[score_np_2 >= args.cls_threshold] = 1
pred_np_1[score_np_1 < args.cls_threshold] = 0
pred_np_2[score_np_2 < args.cls_threshold] = 0
correct_pred_1 = np.sum(label_np_1 == pred_np_1, axis=1)
correct_pred_2 = np.sum(label_np_2 == pred_np_2, axis=1)
train_num_correct += np.sum(((correct_pred_1 == args.action_cls_num) *
(correct_pred_2 == args.action_cls_num)))
train_num_total += correct_pred_1.shape[0]
loss_stack.append(loss.cpu().item())
act_inst_loss_stack.append(loss_dict["act_inst_loss"])
act_cont_loss_stack.append(loss_dict["act_cont_loss"])
act_back_loss_stack.append(loss_dict["act_back_loss"])
guide_loss_stack.append(loss_dict["guide_loss"])
feat_loss_stack.append(loss_dict["feat_loss"])
att_loss_stack.append(loss_dict["sparse_loss"])
acm_loss_stack.append(loss_dict["acm_loss"])
lcs_loss_stack.append(loss_dict["lcs_loss"])
fsd_loss_stack.append(loss_dict["fsd_loss"])
train_acc = train_num_correct / train_num_total
train_log_dict = {"train_act_inst_cls_loss": np.mean(act_inst_loss_stack),
"train_act_cont_cls_loss": np.mean(act_cont_loss_stack),
"train_act_back_cls_loss": np.mean(act_back_loss_stack),
"train_guide_loss": np.mean(guide_loss_stack),
"train_feat_loss": np.mean(feat_loss_stack),
"train_att_loss": np.mean(att_loss_stack),
"train_acm_loss": np.mean(acm_loss_stack),
"train_lcs_loss": np.mean(lcs_loss_stack),
"train_fsd_loss": np.mean(fsd_loss_stack),
"train_loss": np.mean(loss_stack),
"train_acc": train_acc}
print("\n")
print("train_act_inst_cls_loss:{:.3f} train_act_cont_cls_loss:{:.3f}".format(np.mean(act_inst_loss_stack),
np.mean(act_cont_loss_stack)))
print("train_act_back_cls_loss:{:.3f} train_att_loss:{:.3f}".format(np.mean(act_back_loss_stack),
np.mean(att_loss_stack)))
print("train_feat_loss: {:.3f} train_loss:{:.3f}".format(np.mean(feat_loss_stack), np.mean(loss_stack)))
print("train acc:{:.3f}".format(train_acc))
return train_log_dict
| from tqdm import tqdm
import numpy as np
import torch
def train(args, model, dataloader, pair_dataloader, criterion, optimizer):
model.train()
print("-------------------------------------------------------------------------------")
device = args.device
# train_process
train_num_correct = 0
train_num_total = 0
loss_stack = []
acm_loss_stack = []
act_inst_loss_stack = []
act_cont_loss_stack = []
act_back_loss_stack = []
guide_loss_stack = []
att_loss_stack = []
feat_loss_stack = []
lcs_loss_stack = []
fsd_loss_stack = []
if not args.ftcl:
for input_feature, vid_label_t in tqdm(dataloader):
vid_label_t = vid_label_t.to(device)
input_feature = input_feature.to(device)
act_inst_cls, act_cont_cls, act_back_cls, \
act_inst_feat, act_cont_feat, act_back_feat, \
temp_att, act_inst_cas, _, _, _, \
lcs_candi, fsd_act_candi, fsd_bak_candi = model(input_feature)
loss, loss_dict = criterion(act_inst_cls, act_cont_cls, act_back_cls, vid_label_t, temp_att,
act_inst_feat, act_cont_feat, act_back_feat, act_inst_cas,
lcs_candi, fsd_act_candi, fsd_bak_candi, args)
optimizer.zero_grad()
if not torch.isnan(loss):
loss.backward()
optimizer.step()
with torch.no_grad():
fg_score = act_inst_cls[:, :args.action_cls_num]
label_np = vid_label_t.cpu().numpy()
score_np = fg_score.cpu().numpy()
pred_np = np.zeros_like(score_np)
pred_np[score_np >= args.cls_threshold] = 1
pred_np[score_np < args.cls_threshold] = 0
correct_pred = np.sum(label_np == pred_np, axis=1)
train_num_correct += np.sum((correct_pred == args.action_cls_num))
train_num_total += correct_pred.shape[0]
loss_stack.append(loss.cpu().item())
act_inst_loss_stack.append(loss_dict["act_inst_loss"])
act_cont_loss_stack.append(loss_dict["act_cont_loss"])
act_back_loss_stack.append(loss_dict["act_back_loss"])
guide_loss_stack.append(loss_dict["guide_loss"])
feat_loss_stack.append(loss_dict["feat_loss"])
att_loss_stack.append(loss_dict["sparse_loss"])
acm_loss_stack.append(loss_dict["acm_loss"])
lcs_loss_stack.append(loss_dict["lcs_loss"])
fsd_loss_stack.append(loss_dict["fsd_loss"])
train_acc = train_num_correct / train_num_total
train_log_dict = {"train_act_inst_cls_loss": np.mean(act_inst_loss_stack),
"train_act_cont_cls_loss": np.mean(act_cont_loss_stack),
"train_act_back_cls_loss": np.mean(act_back_loss_stack),
"train_guide_loss": np.mean(guide_loss_stack),
"train_feat_loss": np.mean(feat_loss_stack),
"train_att_loss": np.mean(att_loss_stack),
"train_acm_loss": np.mean(acm_loss_stack),
"train_lcs_loss": np.mean(lcs_loss_stack),
"train_fsd_loss": np.mean(fsd_loss_stack),
"train_loss": np.mean(loss_stack),
"train_acc": train_acc}
print("")
print("train_act_inst_cls_loss:{:.3f} train_act_cont_cls_loss:{:.3f}".format(np.mean(act_inst_loss_stack),
np.mean(act_cont_loss_stack)))
print("train_act_back_cls_loss:{:.3f} train_att_loss:{:.3f}".format(np.mean(act_back_loss_stack),
np.mean(att_loss_stack)))
print("train_feat_loss: {:.3f} train_loss:{:.3f}".format(np.mean(feat_loss_stack), np.mean(loss_stack)))
print("train acc:{:.3f}".format(train_acc))
print("-------------------------------------------------------------------------------")
return train_log_dict
else:
for input_feature_1, input_feature_2, vid_label_1, vid_label_2 in tqdm(pair_dataloader):
vid_label_1 = vid_label_1.to(device)
vid_label_2 = vid_label_2.to(device)
input_feature_1 = input_feature_1.to(device)
input_feature_2 = input_feature_2.to(device)
output_1, output_2 = model(args.ftcl, input_feature_1, input_feature_2)
act_inst_cls_1, act_cont_cls_1, act_back_cls_1, act_inst_feat_1, act_cont_feat_1, act_back_feat_1, \
temp_att_1, act_inst_cas_1, act_cas_1, act_cont_cas_1, act_back_cas_1, \
candi_for_dp_1, act_candi_for_nw_1, bak_candi_for_nw_1 = output_1
act_inst_cls_2, act_cont_cls_2, act_back_cls_2, act_inst_feat_2, act_cont_feat_2, act_back_feat_2, \
temp_att_2, act_inst_cas_2, act_cas_2, act_cont_cas_2, act_back_cas_2, \
candi_for_dp_2, act_candi_for_nw_2, bak_candi_for_nw_2 = output_2
loss, loss_dict = criterion(act_inst_cls_1, act_cont_cls_1, act_back_cls_1, vid_label_1, temp_att_1,
act_inst_feat_1, act_cont_feat_1, act_back_feat_1, act_inst_cas_1,
candi_for_dp_1, act_candi_for_nw_1, bak_candi_for_nw_1,
args,
act_inst_cls_2, act_cont_cls_2, act_back_cls_2, vid_label_2, temp_att_2,
act_inst_feat_2, act_cont_feat_2, act_back_feat_2, act_inst_cas_2,
candi_for_dp_2, act_candi_for_nw_2, bak_candi_for_nw_2,
)
optimizer.zero_grad()
if not torch.isnan(loss):
loss.backward()
optimizer.step()
with torch.no_grad():
fg_score_1 = act_inst_cls_1[:, :args.action_cls_num]
fg_score_2 = act_inst_cls_2[:, :args.action_cls_num]
label_np_1 = vid_label_1.cpu().numpy()
label_np_2 = vid_label_2.cpu().numpy()
score_np_1 = fg_score_1.cpu().numpy()
score_np_2 = fg_score_2.cpu().numpy()
pred_np_1 = np.zeros_like(score_np_1)
pred_np_2 = np.zeros_like(score_np_2)
pred_np_1[score_np_1 >= args.cls_threshold] = 1
pred_np_2[score_np_2 >= args.cls_threshold] = 1
pred_np_1[score_np_1 < args.cls_threshold] = 0
pred_np_2[score_np_2 < args.cls_threshold] = 0
correct_pred_1 = np.sum(label_np_1 == pred_np_1, axis=1)
correct_pred_2 = np.sum(label_np_2 == pred_np_2, axis=1)
train_num_correct += np.sum(((correct_pred_1 == args.action_cls_num) *
(correct_pred_2 == args.action_cls_num)))
train_num_total += correct_pred_1.shape[0]
loss_stack.append(loss.cpu().item())
act_inst_loss_stack.append(loss_dict["act_inst_loss"])
act_cont_loss_stack.append(loss_dict["act_cont_loss"])
act_back_loss_stack.append(loss_dict["act_back_loss"])
guide_loss_stack.append(loss_dict["guide_loss"])
feat_loss_stack.append(loss_dict["feat_loss"])
att_loss_stack.append(loss_dict["sparse_loss"])
acm_loss_stack.append(loss_dict["acm_loss"])
lcs_loss_stack.append(loss_dict["lcs_loss"])
fsd_loss_stack.append(loss_dict["fsd_loss"])
train_acc = train_num_correct / train_num_total
train_log_dict = {"train_act_inst_cls_loss": np.mean(act_inst_loss_stack),
"train_act_cont_cls_loss": np.mean(act_cont_loss_stack),
"train_act_back_cls_loss": np.mean(act_back_loss_stack),
"train_guide_loss": np.mean(guide_loss_stack),
"train_feat_loss": np.mean(feat_loss_stack),
"train_att_loss": np.mean(att_loss_stack),
"train_acm_loss": np.mean(acm_loss_stack),
"train_lcs_loss": np.mean(lcs_loss_stack),
"train_fsd_loss": np.mean(fsd_loss_stack),
"train_loss": np.mean(loss_stack),
"train_acc": train_acc}
print("\n")
print("train_act_inst_cls_loss:{:.3f} train_act_cont_cls_loss:{:.3f}".format(np.mean(act_inst_loss_stack),
np.mean(act_cont_loss_stack)))
print("train_act_back_cls_loss:{:.3f} train_att_loss:{:.3f}".format(np.mean(act_back_loss_stack),
np.mean(att_loss_stack)))
print("train_feat_loss: {:.3f} train_loss:{:.3f}".format(np.mean(feat_loss_stack), np.mean(loss_stack)))
print("train acc:{:.3f}".format(train_acc))
return train_log_dict | en | 0.655885 | # train_process | 2.382727 | 2 |
funcao/funcao-zip.py | robertoweller/python | 0 | 6623823 | <reponame>robertoweller/python
def ziP(*iterables):
# zip('ABCD', 'xy') --> Ax By
sentinel = object()
iterators = [iter(it) for it in iterables]
while iterators:
result = []
for it in iterators:
elem = next(it, sentinel)
if elem is sentinel:
return
result.append(elem)
yield tuple(result)
l_A = [1, 2, 3]
l_B = ["A", "B", "C"]
myList = ziP(l_A, l_B)
print(list(myList))
| def ziP(*iterables):
# zip('ABCD', 'xy') --> Ax By
sentinel = object()
iterators = [iter(it) for it in iterables]
while iterators:
result = []
for it in iterators:
elem = next(it, sentinel)
if elem is sentinel:
return
result.append(elem)
yield tuple(result)
l_A = [1, 2, 3]
l_B = ["A", "B", "C"]
myList = ziP(l_A, l_B)
print(list(myList)) | en | 0.389012 | # zip('ABCD', 'xy') --> Ax By | 4.080132 | 4 |
game_stats.py | plmanish/Alien-Invasion | 0 | 6623824 | <reponame>plmanish/Alien-Invasion
class GameStats():
"""Track statistics for Alien Invasion."""
def __init__(self, ai_settings):
"""Initialize statistics."""
self.ai_settings = ai_settings
self.reset_stats()
# Start Alien Invasion in an active state.
self.game_active = False
# High score should never be reset.
file = open("highest_score.txt", "r")
self.high_score = file.readline()
file.close()
if len(self.high_score) == 0:
self.high_score = 0
else:
self.high_score = int(self.high_score)
def reset_stats(self):
"""Initialize statistics that can change during the game."""
self.ships_left = self.ai_settings.ships_limit
self.score = 0
self.level = 1
| class GameStats():
"""Track statistics for Alien Invasion."""
def __init__(self, ai_settings):
"""Initialize statistics."""
self.ai_settings = ai_settings
self.reset_stats()
# Start Alien Invasion in an active state.
self.game_active = False
# High score should never be reset.
file = open("highest_score.txt", "r")
self.high_score = file.readline()
file.close()
if len(self.high_score) == 0:
self.high_score = 0
else:
self.high_score = int(self.high_score)
def reset_stats(self):
"""Initialize statistics that can change during the game."""
self.ships_left = self.ai_settings.ships_limit
self.score = 0
self.level = 1 | en | 0.908124 | Track statistics for Alien Invasion. Initialize statistics. # Start Alien Invasion in an active state. # High score should never be reset. Initialize statistics that can change during the game. | 3.421973 | 3 |
operbench/models/base.py | lirixiang123/oper_bench | 0 | 6623825 | <reponame>lirixiang123/oper_bench
"""
@file: base
@author: <EMAIL>
@date: 2020/03/11
@desc:
"""
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from . import cmdb
from . import user
from . import ops_tools
| """
@file: base
@author: <EMAIL>
@date: 2020/03/11
@desc:
"""
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from . import cmdb
from . import user
from . import ops_tools | en | 0.30328 | @file: base @author: <EMAIL> @date: 2020/03/11 @desc: | 1.213781 | 1 |
tests/test_all.py | kuviokelluja/DefuseZip | 0 | 6623826 | import sys
import tempfile
from pathlib import Path
from shutil import copy
import pytest
from DefuseZip.loader import DefuseZip
class Test_all:
DANGEROUS = True
SAFE = False
testdata = [
("LICENSE.zip", SAFE),
("single.zip", SAFE),
("double_nested.zip", SAFE),
("travelsal.zip", DANGEROUS),
("medium_zipbomb.zip", DANGEROUS),
("big_zipbomb.zip", DANGEROUS),
("bigger_zipbomb.zip", DANGEROUS),
("huge_zipbomb.zip", DANGEROUS),
("zblg_BAMSOFTWARE.zip", DANGEROUS)
# ,('zbxl_BAMSOFTWARE.zip', DANGEROUS)
]
def test_LICENCE_no_travelsal(self):
file = Path(__file__).parent / "example_zips" / "LICENSE.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
assert not defusezip.has_travelsal()
def test_travelsal_dangerous(self):
file = Path(__file__).parent / "example_zips" / "travelsal.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
assert defusezip.is_dangerous()
@pytest.mark.parametrize("filename,expected", testdata)
def test_is_safe(self, filename: str, expected: bool):
file = Path(__file__).parent / "example_zips" / filename
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
assert defusezip.is_dangerous() == expected
testdata2 = [
("nonexistant.zip", FileNotFoundError, False),
("exists_for_a_while.zip", FileNotFoundError, True),
]
@pytest.mark.parametrize("filename, expected, create", testdata2)
def test_not_found(self, filename: str, expected: bool, create: bool):
zfile = Path(__file__).parent / "example_zips" / filename
if create:
cp = Path(zfile.parent / "single.zip")
copy(cp, zfile)
with pytest.raises(FileNotFoundError):
defusezip = DefuseZip(
zfile,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
if create:
zfile.unlink()
defusezip.scan()
def test_output_safe(self, capsys):
file = Path(__file__).parent / "example_zips" / "LICENSE.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
defusezip.output()
captured = capsys.readouterr()
assert "Dangerous = False" in captured.out
def test_safe_extract(self):
file = Path(__file__).parent / "example_zips" / "single.zip"
retval = False
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
if sys.platform == "win32":
with pytest.raises(NotImplementedError):
with tempfile.TemporaryDirectory() as tmpdir:
retval = defusezip.safe_extract(tmpdir, max_cpu_time=60)
dest = Path(tmpdir)
ex = any(dest.iterdir())
# expected value to true, because the real test on windows is NotImplementedError
ex = True
retval = True
else:
with tempfile.TemporaryDirectory() as tmpdir:
retval = defusezip.safe_extract(tmpdir, max_cpu_time=60)
dest = Path(tmpdir)
ex = any(dest.iterdir())
assert ex
assert retval
def test_output_dangerous(self, capsys):
file = Path(__file__).parent / "example_zips" / "travelsal.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
defusezip.output()
captured = capsys.readouterr()
assert "Dangerous = True" in captured.out
def test_no_scan(self, capsys):
if sys.platform == "win32":
assert True
return True
file = Path(__file__).parent / "example_zips" / "travelsal.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
with pytest.raises(Exception):
defusezip.safe_extract(Path.cwd())
def test_extract_deleted_file(self, capsys):
if sys.platform == "win32":
assert True
return True
zfile = Path(__file__).parent / "example_zips" / "deleted.zip"
cp = Path(zfile.parent / "single.zip")
copy(cp, zfile)
defusezip = DefuseZip(
zfile,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
zfile.unlink()
with pytest.raises(FileNotFoundError):
with tempfile.TemporaryDirectory() as tmpdir:
defusezip.safe_extract(Path(tmpdir))
| import sys
import tempfile
from pathlib import Path
from shutil import copy
import pytest
from DefuseZip.loader import DefuseZip
class Test_all:
DANGEROUS = True
SAFE = False
testdata = [
("LICENSE.zip", SAFE),
("single.zip", SAFE),
("double_nested.zip", SAFE),
("travelsal.zip", DANGEROUS),
("medium_zipbomb.zip", DANGEROUS),
("big_zipbomb.zip", DANGEROUS),
("bigger_zipbomb.zip", DANGEROUS),
("huge_zipbomb.zip", DANGEROUS),
("zblg_BAMSOFTWARE.zip", DANGEROUS)
# ,('zbxl_BAMSOFTWARE.zip', DANGEROUS)
]
def test_LICENCE_no_travelsal(self):
file = Path(__file__).parent / "example_zips" / "LICENSE.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
assert not defusezip.has_travelsal()
def test_travelsal_dangerous(self):
file = Path(__file__).parent / "example_zips" / "travelsal.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
assert defusezip.is_dangerous()
@pytest.mark.parametrize("filename,expected", testdata)
def test_is_safe(self, filename: str, expected: bool):
file = Path(__file__).parent / "example_zips" / filename
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
assert defusezip.is_dangerous() == expected
testdata2 = [
("nonexistant.zip", FileNotFoundError, False),
("exists_for_a_while.zip", FileNotFoundError, True),
]
@pytest.mark.parametrize("filename, expected, create", testdata2)
def test_not_found(self, filename: str, expected: bool, create: bool):
zfile = Path(__file__).parent / "example_zips" / filename
if create:
cp = Path(zfile.parent / "single.zip")
copy(cp, zfile)
with pytest.raises(FileNotFoundError):
defusezip = DefuseZip(
zfile,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
if create:
zfile.unlink()
defusezip.scan()
def test_output_safe(self, capsys):
file = Path(__file__).parent / "example_zips" / "LICENSE.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
defusezip.output()
captured = capsys.readouterr()
assert "Dangerous = False" in captured.out
def test_safe_extract(self):
file = Path(__file__).parent / "example_zips" / "single.zip"
retval = False
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
if sys.platform == "win32":
with pytest.raises(NotImplementedError):
with tempfile.TemporaryDirectory() as tmpdir:
retval = defusezip.safe_extract(tmpdir, max_cpu_time=60)
dest = Path(tmpdir)
ex = any(dest.iterdir())
# expected value to true, because the real test on windows is NotImplementedError
ex = True
retval = True
else:
with tempfile.TemporaryDirectory() as tmpdir:
retval = defusezip.safe_extract(tmpdir, max_cpu_time=60)
dest = Path(tmpdir)
ex = any(dest.iterdir())
assert ex
assert retval
def test_output_dangerous(self, capsys):
file = Path(__file__).parent / "example_zips" / "travelsal.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
defusezip.output()
captured = capsys.readouterr()
assert "Dangerous = True" in captured.out
def test_no_scan(self, capsys):
if sys.platform == "win32":
assert True
return True
file = Path(__file__).parent / "example_zips" / "travelsal.zip"
defusezip = DefuseZip(
file,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
with pytest.raises(Exception):
defusezip.safe_extract(Path.cwd())
def test_extract_deleted_file(self, capsys):
if sys.platform == "win32":
assert True
return True
zfile = Path(__file__).parent / "example_zips" / "deleted.zip"
cp = Path(zfile.parent / "single.zip")
copy(cp, zfile)
defusezip = DefuseZip(
zfile,
nested_levels_limit=100,
killswitch_seconds=5,
nested_zips_limit=100000,
ratio_threshold=1032,
)
defusezip.scan()
zfile.unlink()
with pytest.raises(FileNotFoundError):
with tempfile.TemporaryDirectory() as tmpdir:
defusezip.safe_extract(Path(tmpdir))
| en | 0.494085 | # ,('zbxl_BAMSOFTWARE.zip', DANGEROUS) # expected value to true, because the real test on windows is NotImplementedError | 2.304082 | 2 |
Task/Non-decimal-radices-Input/Python/non-decimal-radices-input.py | mullikine/RosettaCodeData | 5 | 6623827 | <filename>Task/Non-decimal-radices-Input/Python/non-decimal-radices-input.py
>>> text = '100'
>>> for base in range(2,21):
print ("String '%s' in base %i is %i in base 10"
% (text, base, int(text, base)))
String '100' in base 2 is 4 in base 10
String '100' in base 3 is 9 in base 10
String '100' in base 4 is 16 in base 10
String '100' in base 5 is 25 in base 10
String '100' in base 6 is 36 in base 10
String '100' in base 7 is 49 in base 10
String '100' in base 8 is 64 in base 10
String '100' in base 9 is 81 in base 10
String '100' in base 10 is 100 in base 10
String '100' in base 11 is 121 in base 10
String '100' in base 12 is 144 in base 10
String '100' in base 13 is 169 in base 10
String '100' in base 14 is 196 in base 10
String '100' in base 15 is 225 in base 10
String '100' in base 16 is 256 in base 10
String '100' in base 17 is 289 in base 10
String '100' in base 18 is 324 in base 10
String '100' in base 19 is 361 in base 10
String '100' in base 20 is 400 in base 10
| <filename>Task/Non-decimal-radices-Input/Python/non-decimal-radices-input.py
>>> text = '100'
>>> for base in range(2,21):
print ("String '%s' in base %i is %i in base 10"
% (text, base, int(text, base)))
String '100' in base 2 is 4 in base 10
String '100' in base 3 is 9 in base 10
String '100' in base 4 is 16 in base 10
String '100' in base 5 is 25 in base 10
String '100' in base 6 is 36 in base 10
String '100' in base 7 is 49 in base 10
String '100' in base 8 is 64 in base 10
String '100' in base 9 is 81 in base 10
String '100' in base 10 is 100 in base 10
String '100' in base 11 is 121 in base 10
String '100' in base 12 is 144 in base 10
String '100' in base 13 is 169 in base 10
String '100' in base 14 is 196 in base 10
String '100' in base 15 is 225 in base 10
String '100' in base 16 is 256 in base 10
String '100' in base 17 is 289 in base 10
String '100' in base 18 is 324 in base 10
String '100' in base 19 is 361 in base 10
String '100' in base 20 is 400 in base 10
| none | 1 | 3.760882 | 4 | |
stubs.min/System/Diagnostics/__init___parts/PresentationTraceSources.py | ricardyn/ironpython-stubs | 1 | 6623828 | class PresentationTraceSources(object):
""" Provides debug tracing support that is specifically targeted for Windows Presentation Foundation (WPF) applications. """
@staticmethod
def GetTraceLevel(element):
"""
GetTraceLevel(element: object) -> PresentationTraceLevel
Gets the value of the System.Diagnostics.PresentationTraceSources.TraceLevel�
attached property for a specified element.
element: The element from which the property value is read.
Returns: The System.Diagnostics.PresentationTraceSources.TraceLevel property value for
the element.
"""
pass
@staticmethod
def Refresh():
"""
Refresh()
Refreshes trace sources,by forcing the app.config file to be re-read.
"""
pass
@staticmethod
def SetTraceLevel(element,traceLevel):
"""
SetTraceLevel(element: object,traceLevel: PresentationTraceLevel)
Sets the value of the System.Diagnostics.PresentationTraceSources.TraceLevel�
attached property to a specified element.
element: The element to which the attached property is written.
traceLevel: The needed System.Diagnostics.PresentationTraceLevel value.
"""
pass
AnimationSource=None
DataBindingSource=None
DependencyPropertySource=None
DocumentsSource=None
FreezableSource=None
HwndHostSource=None
MarkupSource=None
NameScopeSource=None
ResourceDictionarySource=None
RoutedEventSource=None
ShellSource=None
TraceLevelProperty=None
__all__=[
'GetTraceLevel',
'Refresh',
'SetTraceLevel',
'TraceLevelProperty',
]
| class PresentationTraceSources(object):
""" Provides debug tracing support that is specifically targeted for Windows Presentation Foundation (WPF) applications. """
@staticmethod
def GetTraceLevel(element):
"""
GetTraceLevel(element: object) -> PresentationTraceLevel
Gets the value of the System.Diagnostics.PresentationTraceSources.TraceLevel�
attached property for a specified element.
element: The element from which the property value is read.
Returns: The System.Diagnostics.PresentationTraceSources.TraceLevel property value for
the element.
"""
pass
@staticmethod
def Refresh():
"""
Refresh()
Refreshes trace sources,by forcing the app.config file to be re-read.
"""
pass
@staticmethod
def SetTraceLevel(element,traceLevel):
"""
SetTraceLevel(element: object,traceLevel: PresentationTraceLevel)
Sets the value of the System.Diagnostics.PresentationTraceSources.TraceLevel�
attached property to a specified element.
element: The element to which the attached property is written.
traceLevel: The needed System.Diagnostics.PresentationTraceLevel value.
"""
pass
AnimationSource=None
DataBindingSource=None
DependencyPropertySource=None
DocumentsSource=None
FreezableSource=None
HwndHostSource=None
MarkupSource=None
NameScopeSource=None
ResourceDictionarySource=None
RoutedEventSource=None
ShellSource=None
TraceLevelProperty=None
__all__=[
'GetTraceLevel',
'Refresh',
'SetTraceLevel',
'TraceLevelProperty',
]
| en | 0.662457 | Provides debug tracing support that is specifically targeted for Windows Presentation Foundation (WPF) applications. GetTraceLevel(element: object) -> PresentationTraceLevel
Gets the value of the System.Diagnostics.PresentationTraceSources.TraceLevel�
attached property for a specified element.
element: The element from which the property value is read.
Returns: The System.Diagnostics.PresentationTraceSources.TraceLevel property value for
the element. Refresh()
Refreshes trace sources,by forcing the app.config file to be re-read. SetTraceLevel(element: object,traceLevel: PresentationTraceLevel)
Sets the value of the System.Diagnostics.PresentationTraceSources.TraceLevel�
attached property to a specified element.
element: The element to which the attached property is written.
traceLevel: The needed System.Diagnostics.PresentationTraceLevel value. | 1.940104 | 2 |
capture/cf/server.py | JohnDMcMaster/pr0ntools | 38 | 6623829 | <filename>capture/cf/server.py
#!/usr/bin/python
'''
For now this has very narrow focus of taking in a directory, serving it, and then terminating
Eventually this should become a service that can register projects in different directories
Do not assume that the two computers have any connection between them other than the socket
-Do not share file paths
-Do not open additional sockets
Initially client is expected to be a PyQt GUI
Eventually the client should be a web application (maybe Django)
'''
import argparse
from multiprocessing import Process, Queue
from Queue import Empty
import time
import os
import shutil
import glob
import traceback
import multiprocessing
import json
from util import add_bool_arg
from SimpleXMLRPCServer import SimpleXMLRPCServer
from xmlrpclib import Binary
import datetime
class Server(object):
def __init__(self, indir, verbose=False):
self.running = True
self.server = None
self.indir = indir
self.verbose = verbose
# Unallocated
self.todo = set()
# Client has requested but not completed
self.outstanding = {}
self.completed = set()
def add_dir(self, indir):
# out.png means it should have completed successfully
# alternatively open every json file and see if it looks okay
print 'Scanning for new jobs: %s' % indir
for fn in glob.glob(indir + '/*/out.png'):
base = os.path.dirname(fn)
print ' Adding: %s' % base
self.todo.add(base)
print 'Scan complete'
def run(self):
print 'Building job list'
self.add_dir(self.indir)
print 'Starting server'
server = SimpleXMLRPCServer((args.bind, args.port), logRequests=self.verbose, allow_none=True)
server.register_introspection_functions()
server.register_multicall_functions()
#server.register_instance(self.rpc)
server.register_function(self.job_req, "job_req")
server.register_function(self.job_done, "job_done")
server.serve_forever()
'''
RPC
'''
def job_req(self):
try:
if args.reserve and len(self.todo) == 0:
print 'reserve: reloading'
self.outstanding = {}
self.completed = set()
self.add_dir(self.indir)
'''
In order to process the client needs:
-Output image (out.png)
-Image for grid (cropped or original if not rotating)
-Offsets into the original image (out.json)
'''
try:
base = self.todo.pop()
except KeyError:
# No jobs to hand out
print 'WARNING: client requested job but no jobs'
return None
print 'Allocating %s' % base
j = json.load(open(os.path.join(base, 'out.json')))
if j['pass'] != True:
raise Exception("Bad job %s" % base)
ret = {
'name': base,
'png': Binary(open(os.path.join(base, j['png'])).read()),
'img': Binary(open(os.path.join(base, j['img'])).read()),
'json': j,
}
self.outstanding[base] = {
'ret': ret,
# so can timeout clients that don't complete jobs
'tstart': time.time(),
}
return ret
except:
traceback.print_exc()
raise
'''
new_png may be None indicating the job was rejected
In this case msg must be set
Otherwise msg is optional
'''
def job_done(self, base, new_png, msg):
try:
print 'Completed: %s: %s' % (base, new_png is not None)
submit = self.outstanding[base]
print 'Time: %0.1f' % (time.time() - submit['tstart'],)
if new_png is not None:
open(os.path.join(base, 'sweep.png'), 'w').write(new_png.data)
open(os.path.join(base, 'sweep.txt'), 'w').write(msg)
self.completed.add(base)
del self.outstanding[base]
except:
traceback.print_exc()
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Grid auto-bitmap test')
# ord('pr') = 28786
parser.add_argument('--port', type=int, default=28786, help='TCP port number')
parser.add_argument('--bind', default='localhost', help='Address to bind to')
add_bool_arg(parser, '--debug', default=False)
add_bool_arg(parser, '--reserve', default=False)
parser.add_argument('dir', help='Directory to nom')
args = parser.parse_args()
s = Server(args.dir, args.debug)
s.run()
| <filename>capture/cf/server.py
#!/usr/bin/python
'''
For now this has very narrow focus of taking in a directory, serving it, and then terminating
Eventually this should become a service that can register projects in different directories
Do not assume that the two computers have any connection between them other than the socket
-Do not share file paths
-Do not open additional sockets
Initially client is expected to be a PyQt GUI
Eventually the client should be a web application (maybe Django)
'''
import argparse
from multiprocessing import Process, Queue
from Queue import Empty
import time
import os
import shutil
import glob
import traceback
import multiprocessing
import json
from util import add_bool_arg
from SimpleXMLRPCServer import SimpleXMLRPCServer
from xmlrpclib import Binary
import datetime
class Server(object):
def __init__(self, indir, verbose=False):
self.running = True
self.server = None
self.indir = indir
self.verbose = verbose
# Unallocated
self.todo = set()
# Client has requested but not completed
self.outstanding = {}
self.completed = set()
def add_dir(self, indir):
# out.png means it should have completed successfully
# alternatively open every json file and see if it looks okay
print 'Scanning for new jobs: %s' % indir
for fn in glob.glob(indir + '/*/out.png'):
base = os.path.dirname(fn)
print ' Adding: %s' % base
self.todo.add(base)
print 'Scan complete'
def run(self):
print 'Building job list'
self.add_dir(self.indir)
print 'Starting server'
server = SimpleXMLRPCServer((args.bind, args.port), logRequests=self.verbose, allow_none=True)
server.register_introspection_functions()
server.register_multicall_functions()
#server.register_instance(self.rpc)
server.register_function(self.job_req, "job_req")
server.register_function(self.job_done, "job_done")
server.serve_forever()
'''
RPC
'''
def job_req(self):
try:
if args.reserve and len(self.todo) == 0:
print 'reserve: reloading'
self.outstanding = {}
self.completed = set()
self.add_dir(self.indir)
'''
In order to process the client needs:
-Output image (out.png)
-Image for grid (cropped or original if not rotating)
-Offsets into the original image (out.json)
'''
try:
base = self.todo.pop()
except KeyError:
# No jobs to hand out
print 'WARNING: client requested job but no jobs'
return None
print 'Allocating %s' % base
j = json.load(open(os.path.join(base, 'out.json')))
if j['pass'] != True:
raise Exception("Bad job %s" % base)
ret = {
'name': base,
'png': Binary(open(os.path.join(base, j['png'])).read()),
'img': Binary(open(os.path.join(base, j['img'])).read()),
'json': j,
}
self.outstanding[base] = {
'ret': ret,
# so can timeout clients that don't complete jobs
'tstart': time.time(),
}
return ret
except:
traceback.print_exc()
raise
'''
new_png may be None indicating the job was rejected
In this case msg must be set
Otherwise msg is optional
'''
def job_done(self, base, new_png, msg):
try:
print 'Completed: %s: %s' % (base, new_png is not None)
submit = self.outstanding[base]
print 'Time: %0.1f' % (time.time() - submit['tstart'],)
if new_png is not None:
open(os.path.join(base, 'sweep.png'), 'w').write(new_png.data)
open(os.path.join(base, 'sweep.txt'), 'w').write(msg)
self.completed.add(base)
del self.outstanding[base]
except:
traceback.print_exc()
raise
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Grid auto-bitmap test')
# ord('pr') = 28786
parser.add_argument('--port', type=int, default=28786, help='TCP port number')
parser.add_argument('--bind', default='localhost', help='Address to bind to')
add_bool_arg(parser, '--debug', default=False)
add_bool_arg(parser, '--reserve', default=False)
parser.add_argument('dir', help='Directory to nom')
args = parser.parse_args()
s = Server(args.dir, args.debug)
s.run()
| en | 0.927522 | #!/usr/bin/python For now this has very narrow focus of taking in a directory, serving it, and then terminating Eventually this should become a service that can register projects in different directories Do not assume that the two computers have any connection between them other than the socket -Do not share file paths -Do not open additional sockets Initially client is expected to be a PyQt GUI Eventually the client should be a web application (maybe Django) # Unallocated # Client has requested but not completed # out.png means it should have completed successfully # alternatively open every json file and see if it looks okay #server.register_instance(self.rpc) RPC In order to process the client needs: -Output image (out.png) -Image for grid (cropped or original if not rotating) -Offsets into the original image (out.json) # No jobs to hand out # so can timeout clients that don't complete jobs new_png may be None indicating the job was rejected In this case msg must be set Otherwise msg is optional # ord('pr') = 28786 | 2.639213 | 3 |
scripts/ExtractBagFile/ReadBagExtended.py | Wuselwog/bus-stop-detection | 0 | 6623830 | <gh_stars>0
# -*- coding: utf-8 -*-
"""
Extract images and GPS from a rosbag.
"""
import os
from os.path import isfile, join
import argparse
import cv2
import rosbag, rospy
from sensor_msgs.msg import Image, NavSatFix
from cv_bridge import CvBridge
from exif import set_gps_location
def write_images(img_buffer, args):
for img in img_buffer:
image_dir, cv_img, LAST_GPS = img
# img_buffer.append(image_dir, cv_img, LAST_GPS)
cv2.imwrite(image_dir, cv_img)
if args.gps_save:
set_gps_location(image_dir, LAST_GPS.latitude, LAST_GPS.longitude, LAST_GPS.altitude)
def main():
# latitude, longitude and width in degrees of break areas
washington_depot_loc = [40.224142, -80.216757, 90. / 1.11 / 100000.]
pittsburgh_pause_loc = [40.446020, -79.988753, 90. / 1.11 / 100000.]
gas_station_loc = [40.17822, -80.26139, 50. / 1.11 / 100000.]
# washington_pause_loc = [40.172611, -80.244531]
pause_locs = [washington_depot_loc, pittsburgh_pause_loc, gas_station_loc]
parser = argparse.ArgumentParser(description="Extract images and GPS from a rosbag.")
parser.add_argument(
"-f", "--folder", default='.', help="The folder from which all Ros Bags should get read")
parser.add_argument(
"-i", "--input", nargs='+', type=str, default=[], help="Input ROS bags")
#parser.add_argument(
# "-i", "--input", default='./test.bag', help="Input ROS bag")
parser.add_argument(
"-c", "--cam-id", nargs='+', type=int, default=[3,], help="Selected camera IDs to extract")
parser.add_argument(
"-o", "--output", default='./output', help="Output dir")
parser.add_argument(
"-g", "--gps-save", action='store_true', help="Whether to save GPS as exif info of the images")
parser.add_argument(
"-t", "--time", nargs='+', type=int, default=[0, ], help="Selected time to extract n frames before")
parser.add_argument(
"-n", "--num_images", type=int, default=0, help="Amount of frames that should be extracted")
# parser.add_argument(
# "-r", "--recurse", action='store_true', help="Extra")
args = parser.parse_args()
bag_files = args.input
folder = args.folder
output_dir = args.output
frames = args.time
num_images = args.num_images
extract(bag_files, output_dir, folder, frames, num_images, args.gps_save, args.cam_id)
def extract(bag_files, output_dir, folder, frames, num_images, gps_save, cam_id):
os.makedirs(output_dir, exist_ok=True)
topics = ['/fix'] if gps_save else []
# topics.append('/velocity')
for cam_id in cam_id:
topics.append('/camera{}/image_raw/compressed'.format(cam_id))
if len(bag_files) == 0:
bag_files = imgs = sorted([join(folder, f) for f in os.listdir(folder) if isfile(join(folder, f)) and f[-4:] == ".bag"])
bridge = CvBridge()
bus_stopped = False
img_buffer = []
velocity_threshold = 4
frame_idx = 0
current_frame = frames[0]
print("Looking for img ", current_frame)
found_image = False
finished = False
for num, bag_file in enumerate(bag_files):
print(num, " / ", len(bag_files))
print("Extract images from {} for topics {}".format(bag_file, topics))
bag = rosbag.Bag(bag_file, "r")
# info_dict = yaml.load(bag._get_yaml_info())
# print(info_dict)
found_image = True
while (found_image):
found_image = False
if gps_save:
LAST_GPS = NavSatFix()
print(LAST_GPS)
velocity = 0
for topic, msg, t in bag.read_messages(topics=topics, start_time=rospy.Time(current_frame - num_images), end_time=rospy.Time(current_frame + num_images)):
if 'velocity' in topic:
print(velocity)
# velocity = msg.velocity
# if velocity <= 0.2:
# bus_stopped = True
# elif velocity > velocity_threshold:
# if bus_stopped:
# write_images(img_buffer, args)
# img_buffer.clear()
# bus_stopped = False
elif 'image_raw' in topic:
# Check if the bus is currently doing a break
# if abs(t.secs - frame) > num_images or t.secs > frame:
# continue
cv_img = bridge.compressed_imgmsg_to_cv2(msg, desired_encoding="passthrough")
time_stamps = '_{:0>10d}_{:0>9d}'.format(t.secs, t.nsecs)
image_filename = topic[1:8] + time_stamps + '.jpg'
image_dir = os.path.join(output_dir, image_filename)
# img_buffer.append((image_dir, cv_img, LAST_GPS))
cv2.imwrite(image_dir, cv_img)
if gps_save:
set_gps_location(image_dir, LAST_GPS.latitude, LAST_GPS.longitude, LAST_GPS.altitude)
if not found_image and frame_idx + 1 < len(frames):
frame_idx += 1
found_image = True
current_frame = frames[frame_idx]
print("Looking next for img ", current_frame)
elif not found_image and not finished:
print("Found all images")
finished = True
elif 'fix' in topic:
LAST_GPS = msg
# print(LAST_GPS)
if finished:
bag.close()
return
bag.close()
# if bus_stopped:
# write_images(img_buffer, args)
return
if __name__ == '__main__':
main()
| # -*- coding: utf-8 -*-
"""
Extract images and GPS from a rosbag.
"""
import os
from os.path import isfile, join
import argparse
import cv2
import rosbag, rospy
from sensor_msgs.msg import Image, NavSatFix
from cv_bridge import CvBridge
from exif import set_gps_location
def write_images(img_buffer, args):
for img in img_buffer:
image_dir, cv_img, LAST_GPS = img
# img_buffer.append(image_dir, cv_img, LAST_GPS)
cv2.imwrite(image_dir, cv_img)
if args.gps_save:
set_gps_location(image_dir, LAST_GPS.latitude, LAST_GPS.longitude, LAST_GPS.altitude)
def main():
# latitude, longitude and width in degrees of break areas
washington_depot_loc = [40.224142, -80.216757, 90. / 1.11 / 100000.]
pittsburgh_pause_loc = [40.446020, -79.988753, 90. / 1.11 / 100000.]
gas_station_loc = [40.17822, -80.26139, 50. / 1.11 / 100000.]
# washington_pause_loc = [40.172611, -80.244531]
pause_locs = [washington_depot_loc, pittsburgh_pause_loc, gas_station_loc]
parser = argparse.ArgumentParser(description="Extract images and GPS from a rosbag.")
parser.add_argument(
"-f", "--folder", default='.', help="The folder from which all Ros Bags should get read")
parser.add_argument(
"-i", "--input", nargs='+', type=str, default=[], help="Input ROS bags")
#parser.add_argument(
# "-i", "--input", default='./test.bag', help="Input ROS bag")
parser.add_argument(
"-c", "--cam-id", nargs='+', type=int, default=[3,], help="Selected camera IDs to extract")
parser.add_argument(
"-o", "--output", default='./output', help="Output dir")
parser.add_argument(
"-g", "--gps-save", action='store_true', help="Whether to save GPS as exif info of the images")
parser.add_argument(
"-t", "--time", nargs='+', type=int, default=[0, ], help="Selected time to extract n frames before")
parser.add_argument(
"-n", "--num_images", type=int, default=0, help="Amount of frames that should be extracted")
# parser.add_argument(
# "-r", "--recurse", action='store_true', help="Extra")
args = parser.parse_args()
bag_files = args.input
folder = args.folder
output_dir = args.output
frames = args.time
num_images = args.num_images
extract(bag_files, output_dir, folder, frames, num_images, args.gps_save, args.cam_id)
def extract(bag_files, output_dir, folder, frames, num_images, gps_save, cam_id):
os.makedirs(output_dir, exist_ok=True)
topics = ['/fix'] if gps_save else []
# topics.append('/velocity')
for cam_id in cam_id:
topics.append('/camera{}/image_raw/compressed'.format(cam_id))
if len(bag_files) == 0:
bag_files = imgs = sorted([join(folder, f) for f in os.listdir(folder) if isfile(join(folder, f)) and f[-4:] == ".bag"])
bridge = CvBridge()
bus_stopped = False
img_buffer = []
velocity_threshold = 4
frame_idx = 0
current_frame = frames[0]
print("Looking for img ", current_frame)
found_image = False
finished = False
for num, bag_file in enumerate(bag_files):
print(num, " / ", len(bag_files))
print("Extract images from {} for topics {}".format(bag_file, topics))
bag = rosbag.Bag(bag_file, "r")
# info_dict = yaml.load(bag._get_yaml_info())
# print(info_dict)
found_image = True
while (found_image):
found_image = False
if gps_save:
LAST_GPS = NavSatFix()
print(LAST_GPS)
velocity = 0
for topic, msg, t in bag.read_messages(topics=topics, start_time=rospy.Time(current_frame - num_images), end_time=rospy.Time(current_frame + num_images)):
if 'velocity' in topic:
print(velocity)
# velocity = msg.velocity
# if velocity <= 0.2:
# bus_stopped = True
# elif velocity > velocity_threshold:
# if bus_stopped:
# write_images(img_buffer, args)
# img_buffer.clear()
# bus_stopped = False
elif 'image_raw' in topic:
# Check if the bus is currently doing a break
# if abs(t.secs - frame) > num_images or t.secs > frame:
# continue
cv_img = bridge.compressed_imgmsg_to_cv2(msg, desired_encoding="passthrough")
time_stamps = '_{:0>10d}_{:0>9d}'.format(t.secs, t.nsecs)
image_filename = topic[1:8] + time_stamps + '.jpg'
image_dir = os.path.join(output_dir, image_filename)
# img_buffer.append((image_dir, cv_img, LAST_GPS))
cv2.imwrite(image_dir, cv_img)
if gps_save:
set_gps_location(image_dir, LAST_GPS.latitude, LAST_GPS.longitude, LAST_GPS.altitude)
if not found_image and frame_idx + 1 < len(frames):
frame_idx += 1
found_image = True
current_frame = frames[frame_idx]
print("Looking next for img ", current_frame)
elif not found_image and not finished:
print("Found all images")
finished = True
elif 'fix' in topic:
LAST_GPS = msg
# print(LAST_GPS)
if finished:
bag.close()
return
bag.close()
# if bus_stopped:
# write_images(img_buffer, args)
return
if __name__ == '__main__':
main() | en | 0.399337 | # -*- coding: utf-8 -*- Extract images and GPS from a rosbag. # img_buffer.append(image_dir, cv_img, LAST_GPS) # latitude, longitude and width in degrees of break areas # washington_pause_loc = [40.172611, -80.244531] #parser.add_argument( # "-i", "--input", default='./test.bag', help="Input ROS bag") # parser.add_argument( # "-r", "--recurse", action='store_true', help="Extra") # topics.append('/velocity') # info_dict = yaml.load(bag._get_yaml_info()) # print(info_dict) # velocity = msg.velocity # if velocity <= 0.2: # bus_stopped = True # elif velocity > velocity_threshold: # if bus_stopped: # write_images(img_buffer, args) # img_buffer.clear() # bus_stopped = False # Check if the bus is currently doing a break # if abs(t.secs - frame) > num_images or t.secs > frame: # continue # img_buffer.append((image_dir, cv_img, LAST_GPS)) # print(LAST_GPS) # if bus_stopped: # write_images(img_buffer, args) | 2.851255 | 3 |
msg.py | takamitsu-iida/webex-teams-practice-2 | 0 | 6623831 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import json
import logging
import os
import sys
from jinja2 import Environment, FileSystemLoader
import redis
import requests
requests.packages.urllib3.disable_warnings()
logger = logging.getLogger(__name__)
def here(path=''):
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
if not here('./lib') in sys.path:
sys.path.append(here('./lib'))
from botscript import bot, redis_url
# name and directory path of this application
app_name = os.path.splitext(os.path.basename(__file__))[0]
app_home = here('.')
conf_dir = os.path.join(app_home, 'conf')
data_dir = os.path.join(app_home, 'data')
card_dir = os.path.join(app_home, 'static', 'cards')
def send_text(text=None, to_person_email=None):
kwargs = {}
if text:
kwargs.update({'text': text})
if to_person_email:
kwargs.update({'to_person_email': to_person_email})
bot.send_message(**kwargs)
def send_card(text=None, card_name=None, to_person_email=None):
kwargs = {}
if text:
kwargs.update({'text': text})
if to_person_email:
kwargs.update({'to_person_email': to_person_email})
contents = get_card_content(card_name)
if contents is None:
return None
kwargs.update({'attachments': [contents]})
return bot.send_message(**kwargs)
def send_image(text=None, image_filename=None, to_person_email=None):
return bot.send_image(text=text, image_filename=image_filename, to_person_email=to_person_email)
def store_message(send_result):
if send_result is not None:
if 'attachments' in send_result:
del send_result['attachments']
# print(json.dumps(send_result, ensure_ascii=False, indent=2))
message_id = send_result.get('id')
conn = redis.StrictRedis.from_url(redis_url, decode_responses=True)
# store as hash
conn.hmset(message_id, send_result)
conn.expire(message_id, 600) # time to live is 10 min
def show_redis_message_list():
conn = redis.StrictRedis.from_url(redis_url, decode_responses=True)
# show keys in db
keys = conn.keys(pattern='*')
for k in keys:
print(k)
# show values in db
for k in keys:
data = conn.hgetall(k)
print(json.dumps(data, ensure_ascii=False, indent=2))
def get_card_content(card_name):
card_path = os.path.join(card_dir, card_name)
if not os.path.isfile(card_path):
logger.error("card file is not found: %s", card_path)
return None
try:
with open(card_path) as f:
card = json.load(f)
return {
'contentType': "application/vnd.microsoft.card.adaptive",
'content': card
}
except (IOError, json.JSONDecodeError) as e:
logger.exception(e)
return None
def send_weather_card(to_person_email=None):
kwargs = {
'text': "weather",
'to_person_email': to_person_email
}
contents = get_weather_card()
if contents is None:
return
kwargs.update({'attachments': [contents]})
bot.send_message(**kwargs)
def get_weather_card():
env = Environment(loader=FileSystemLoader(card_dir))
template = env.get_template('weather.j2')
data = get_weather_data()
if data is None:
return None
rendered = template.render(data)
content = json.loads(rendered)
return {
'contentType': "application/vnd.microsoft.card.adaptive",
'content': content
}
def get_weather_data():
"""get weather information as json data.
http://weather.livedoor.com/weather_hacks/webservice
"""
# pylint: disable=broad-except
city = '140010' # Yokohama
api_path = 'http://weather.livedoor.com/forecast/webservice/json/v1?city={}'.format(city)
get_result = None
try:
get_result = requests.get(api_path)
except Exception:
pass
if get_result is None or not get_result.ok:
print("failed")
return None
json_data = get_result.json()
# data structures are described in http://weather.livedoor.com/weather_hacks/webservice
def normalize(fcst):
r = {}
r['dateLabel'] = fcst.get('dateLabel', '-')
r['date'] = fcst.get('date', '1970-01-01')
r['telop'] = fcst.get('telop', '-')
temp = fcst.get('temperature', {})
r['temp_min'] = '-' if temp is None or temp.get('min') is None else temp.get('min', {}).get('celsius', '-')
r['temp_max'] = '-' if temp is None or temp.get('max') is None else temp.get('max', {}).get('celsius', '-')
image = fcst.get('image', {})
r['img_url'] = '' if image is None else image.get('url', '')
r['img_title'] = '-' if image is None else image.get('title', '-')
return r
fcst_today = json_data.get('forecasts', [{}, {}])[0]
fcst_today = normalize(fcst_today)
fcst_tomorrow = json_data.get('forecasts', [{}, {}])[1]
fcst_tomorrow = normalize(fcst_tomorrow)
city = json_data.get('location', {}).get('city', '-')
title = json_data.get('title', '-')
description = json_data.get('description', {}).get('text', '-')
return {
'city': city, # "横浜"
'title': title, # "神奈川県 横浜 の天気"
'description': description,
'today': fcst_today,
'tomorrow': fcst_tomorrow
}
# {
# "city": "横浜",
# "title": "神奈川県 横浜 の天気",
# "description": " 関東の東海上を、気圧の谷が東へ進んでいます。...",
# "today": {
# "dateLabel": "今日",
# "date": "2019-12-31",
# "telop": "晴れ",
# "temp_min": "-",
# "temp_max": "18",
# "img_url": "http://weather.livedoor.com/img/icon/1.gif",
# "img_title": "晴れ"
# },
# "tomorrow": {
# "dateLabel": "明日",
# "date": "2020-01-01",
# "telop": "晴時々曇",
# "temp_min": "5",
# "temp_max": "11",
# "img_url": "http://weather.livedoor.com/img/icon/2.gif",
# "img_title": "晴時々曇"
# }
# }
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
def main():
to_person_email = os.environ.get('to_person_email')
if to_person_email is None:
sys.exit('failed to read to_person_email from os.environ')
# send_text(text='はい!', to_person_email=to_person_email)
# send_card(text='INPUT CARD', card_name='command.json', to_person_email=to_person_email)
# send_result = send_card(text='CHOICE CARD', card_name='choice.json', to_person_email=to_person_email)
# store_message(send_result)
# show_redis_message_list()
send_image(text='image', image_filename='/Users/iida/python/CF-F10/test/Sortable-master/st/face-01.jpg', to_person_email=to_person_email)
# print(json.dumps(get_weather(), ensure_ascii=False, indent=2))
# send_weather_card(to_person_email=to_person_email)
return 0
sys.exit(main())
| #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# pylint: disable=missing-docstring
import json
import logging
import os
import sys
from jinja2 import Environment, FileSystemLoader
import redis
import requests
requests.packages.urllib3.disable_warnings()
logger = logging.getLogger(__name__)
def here(path=''):
return os.path.abspath(os.path.join(os.path.dirname(__file__), path))
if not here('./lib') in sys.path:
sys.path.append(here('./lib'))
from botscript import bot, redis_url
# name and directory path of this application
app_name = os.path.splitext(os.path.basename(__file__))[0]
app_home = here('.')
conf_dir = os.path.join(app_home, 'conf')
data_dir = os.path.join(app_home, 'data')
card_dir = os.path.join(app_home, 'static', 'cards')
def send_text(text=None, to_person_email=None):
kwargs = {}
if text:
kwargs.update({'text': text})
if to_person_email:
kwargs.update({'to_person_email': to_person_email})
bot.send_message(**kwargs)
def send_card(text=None, card_name=None, to_person_email=None):
kwargs = {}
if text:
kwargs.update({'text': text})
if to_person_email:
kwargs.update({'to_person_email': to_person_email})
contents = get_card_content(card_name)
if contents is None:
return None
kwargs.update({'attachments': [contents]})
return bot.send_message(**kwargs)
def send_image(text=None, image_filename=None, to_person_email=None):
return bot.send_image(text=text, image_filename=image_filename, to_person_email=to_person_email)
def store_message(send_result):
if send_result is not None:
if 'attachments' in send_result:
del send_result['attachments']
# print(json.dumps(send_result, ensure_ascii=False, indent=2))
message_id = send_result.get('id')
conn = redis.StrictRedis.from_url(redis_url, decode_responses=True)
# store as hash
conn.hmset(message_id, send_result)
conn.expire(message_id, 600) # time to live is 10 min
def show_redis_message_list():
conn = redis.StrictRedis.from_url(redis_url, decode_responses=True)
# show keys in db
keys = conn.keys(pattern='*')
for k in keys:
print(k)
# show values in db
for k in keys:
data = conn.hgetall(k)
print(json.dumps(data, ensure_ascii=False, indent=2))
def get_card_content(card_name):
card_path = os.path.join(card_dir, card_name)
if not os.path.isfile(card_path):
logger.error("card file is not found: %s", card_path)
return None
try:
with open(card_path) as f:
card = json.load(f)
return {
'contentType': "application/vnd.microsoft.card.adaptive",
'content': card
}
except (IOError, json.JSONDecodeError) as e:
logger.exception(e)
return None
def send_weather_card(to_person_email=None):
kwargs = {
'text': "weather",
'to_person_email': to_person_email
}
contents = get_weather_card()
if contents is None:
return
kwargs.update({'attachments': [contents]})
bot.send_message(**kwargs)
def get_weather_card():
env = Environment(loader=FileSystemLoader(card_dir))
template = env.get_template('weather.j2')
data = get_weather_data()
if data is None:
return None
rendered = template.render(data)
content = json.loads(rendered)
return {
'contentType': "application/vnd.microsoft.card.adaptive",
'content': content
}
def get_weather_data():
"""get weather information as json data.
http://weather.livedoor.com/weather_hacks/webservice
"""
# pylint: disable=broad-except
city = '140010' # Yokohama
api_path = 'http://weather.livedoor.com/forecast/webservice/json/v1?city={}'.format(city)
get_result = None
try:
get_result = requests.get(api_path)
except Exception:
pass
if get_result is None or not get_result.ok:
print("failed")
return None
json_data = get_result.json()
# data structures are described in http://weather.livedoor.com/weather_hacks/webservice
def normalize(fcst):
r = {}
r['dateLabel'] = fcst.get('dateLabel', '-')
r['date'] = fcst.get('date', '1970-01-01')
r['telop'] = fcst.get('telop', '-')
temp = fcst.get('temperature', {})
r['temp_min'] = '-' if temp is None or temp.get('min') is None else temp.get('min', {}).get('celsius', '-')
r['temp_max'] = '-' if temp is None or temp.get('max') is None else temp.get('max', {}).get('celsius', '-')
image = fcst.get('image', {})
r['img_url'] = '' if image is None else image.get('url', '')
r['img_title'] = '-' if image is None else image.get('title', '-')
return r
fcst_today = json_data.get('forecasts', [{}, {}])[0]
fcst_today = normalize(fcst_today)
fcst_tomorrow = json_data.get('forecasts', [{}, {}])[1]
fcst_tomorrow = normalize(fcst_tomorrow)
city = json_data.get('location', {}).get('city', '-')
title = json_data.get('title', '-')
description = json_data.get('description', {}).get('text', '-')
return {
'city': city, # "横浜"
'title': title, # "神奈川県 横浜 の天気"
'description': description,
'today': fcst_today,
'tomorrow': fcst_tomorrow
}
# {
# "city": "横浜",
# "title": "神奈川県 横浜 の天気",
# "description": " 関東の東海上を、気圧の谷が東へ進んでいます。...",
# "today": {
# "dateLabel": "今日",
# "date": "2019-12-31",
# "telop": "晴れ",
# "temp_min": "-",
# "temp_max": "18",
# "img_url": "http://weather.livedoor.com/img/icon/1.gif",
# "img_title": "晴れ"
# },
# "tomorrow": {
# "dateLabel": "明日",
# "date": "2020-01-01",
# "telop": "晴時々曇",
# "temp_min": "5",
# "temp_max": "11",
# "img_url": "http://weather.livedoor.com/img/icon/2.gif",
# "img_title": "晴時々曇"
# }
# }
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
def main():
to_person_email = os.environ.get('to_person_email')
if to_person_email is None:
sys.exit('failed to read to_person_email from os.environ')
# send_text(text='はい!', to_person_email=to_person_email)
# send_card(text='INPUT CARD', card_name='command.json', to_person_email=to_person_email)
# send_result = send_card(text='CHOICE CARD', card_name='choice.json', to_person_email=to_person_email)
# store_message(send_result)
# show_redis_message_list()
send_image(text='image', image_filename='/Users/iida/python/CF-F10/test/Sortable-master/st/face-01.jpg', to_person_email=to_person_email)
# print(json.dumps(get_weather(), ensure_ascii=False, indent=2))
# send_weather_card(to_person_email=to_person_email)
return 0
sys.exit(main())
| en | 0.460032 | #!/usr/bin/env python3 # -*- coding: utf-8 -*- # pylint: disable=missing-docstring # name and directory path of this application # print(json.dumps(send_result, ensure_ascii=False, indent=2)) # store as hash # time to live is 10 min # show keys in db # show values in db get weather information as json data. http://weather.livedoor.com/weather_hacks/webservice # pylint: disable=broad-except # Yokohama # data structures are described in http://weather.livedoor.com/weather_hacks/webservice # "横浜" # "神奈川県 横浜 の天気" # { # "city": "横浜", # "title": "神奈川県 横浜 の天気", # "description": " 関東の東海上を、気圧の谷が東へ進んでいます。...", # "today": { # "dateLabel": "今日", # "date": "2019-12-31", # "telop": "晴れ", # "temp_min": "-", # "temp_max": "18", # "img_url": "http://weather.livedoor.com/img/icon/1.gif", # "img_title": "晴れ" # }, # "tomorrow": { # "dateLabel": "明日", # "date": "2020-01-01", # "telop": "晴時々曇", # "temp_min": "5", # "temp_max": "11", # "img_url": "http://weather.livedoor.com/img/icon/2.gif", # "img_title": "晴時々曇" # } # } # send_text(text='はい!', to_person_email=to_person_email) # send_card(text='INPUT CARD', card_name='command.json', to_person_email=to_person_email) # send_result = send_card(text='CHOICE CARD', card_name='choice.json', to_person_email=to_person_email) # store_message(send_result) # show_redis_message_list() # print(json.dumps(get_weather(), ensure_ascii=False, indent=2)) # send_weather_card(to_person_email=to_person_email) | 1.981457 | 2 |
FeatureServer/DataSource/Twitter.py | AstunTechnology/featureserver | 55 | 6623832 | <reponame>AstunTechnology/featureserver<gh_stars>10-100
from FeatureServer.DataSource import DataSource
from vectorformats.Feature import Feature
from FeatureServer.Exceptions.NoGeometryException import NoGeometryException
import oauth2 as oauth
import urllib
import urlparse
import simplejson
import math
class Twitter (DataSource):
api = None
geo_keys = ['coordinates', 'geo', 'place']
def __init__(self, name, consumer_key, consumer_secret, token_key, token_secret, srid_out = 4326, attributes="*", encoding = "utf-8", **args):
DataSource.__init__(self, name, **args)
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.token_key = token_key
self.token_secret = token_secret
self.srid_out = srid_out
self.encoding = encoding
self.attributes = attributes
self.api = TwitterAPI(self.consumer_key, self.consumer_secret, self.token_key, self.token_secret)
def select (self, action):
features = []
if action.id is not None:
content = self.api.request('https://api.twitter.com/1.1/statuses/show.json?include_my_retweet=true&include_entities=true&id=' + str(action.id), "GET")
try:
features.append(self.encode_tweet(simplejson.loads(content)))
except Exception as e:
''' '''
else:
if hasattr(self, 'screen_name'):
content = self.api.request('https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=' + self.screen_name, "GET")
features = self.encode_user_tweets(simplejson.loads(content))
elif hasattr(self, 'user_id'):
content = self.api.request('https://api.twitter.com/1.1/statuses/user_timeline.json?user_id=' + self.user_id, "GET")
features = self.encode_user_tweets(simplejson.loads(content))
else:
params = {'count':'100'}
geocode = ''
if action.bbox:
# latitude, longitude
center = "%f,%f" % tuple([ (action.bbox[1] + action.bbox[3]) / 2, (action.bbox[0] + action.bbox[2]) / 2 ])
dLat = math.radians((action.bbox[3] - action.bbox[1]))
dLon = math.radians((action.bbox[2] - action.bbox[0]))
lat1 = math.radians(action.bbox[1])
lat2 = math.radians(action.bbox[3])
a = math.sin(dLat/2) * math.sin(dLat/2) + math.sin(dLon/2) * math.sin(dLon/2) * math.cos(lat1) * math.cos(lat2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = 6371 * c
radius = "%ikm" % math.ceil(d/2)
params['geocode'] = center + ',' + radius
params['q'] = self.query
query = urllib.urlencode(params)
content = self.api.request('https://api.twitter.com/1.1/search/tweets.json?' + query, "GET")
features = self.encode_search_tweets(simplejson.loads(content))
return features
def encode_search_tweets(self, tweets):
features = []
for tweet in tweets['statuses']:
try:
features.append(self.encode_tweet(tweet))
except Exception as e:
continue
return features
def encode_user_tweets(self, tweets):
features = []
for tweet in tweets:
try:
features.append(self.encode_tweet(tweet))
except Exception as e:
continue
return features
def encode_tweet(self, tweet):
try:
geom = self.get_geometry(tweet)
except:
raise
props = {}
node_names = self.get_node_names(tweet)
for attribute in node_names:
keys = attribute.split(".")
value = tweet
for key in keys:
if value[key] is None:
break
value = value[key]
if type(value) is not dict and type(value) is not list:
if type(value) is unicode:
props[attribute] = value
else:
props[attribute] = unicode(str(value), self.encoding)
return Feature( id=tweet["id"], geometry=geom, geometry_attr="geometry", srs=self.srid_out, props=props )
def get_geometry(self, tweet):
if tweet["coordinates"] is not None:
return tweet["coordinates"]
# geo field is deprecated. Should be removed
if tweet["geo"] is not None:
return tweet["geo"]
if tweet["place"] is not None:
if tweet["place"]["bounding_box"] is not None:
return tweet["place"]["bounding_box"]
raise NoGeometryException(locator="Twitter", layer=self.name)
def get_node_names(self, tweet):
nodes = []
if self.attributes == '*':
for key in tweet.keys():
if key not in self.geo_keys:
childs = self.get_nodes(key, tweet[key], key)
nodes.extend(childs)
else:
nodes = self.attributes.split(",")
return nodes
def get_nodes(self, key, tweet, path):
nodes = []
if type(tweet) is dict:
for key in tweet.keys():
if key not in self.geo_keys:
childs = self.get_nodes(key, tweet[key], "%s.%s" % (path, key))
nodes.extend(childs)
else:
nodes.append("%s" % path)
return nodes
class TwitterAPI(object):
settings = {
'request_token_url' : 'https://api.twitter.com/oauth/request_token',
'authorize_url' : 'https://api.twitter.com/oauth/authorize',
'access_token_url' : 'https://api.twitter.com/oauth/access_token'
}
client = None
def __init__(self, consumer_key, consumer_secret, token_key, token_secret):
consumer = oauth.Consumer(key = consumer_key, secret = consumer_secret)
token = oauth.Token(key = token_key, secret = token_secret)
self.client = oauth.Client(consumer, token)
def request(self, url, http_method = "GET", post_body = "", http_headers = {}):
resp, content = self.client.request(url, method = http_method, body = post_body, headers = http_headers)
return content
| from FeatureServer.DataSource import DataSource
from vectorformats.Feature import Feature
from FeatureServer.Exceptions.NoGeometryException import NoGeometryException
import oauth2 as oauth
import urllib
import urlparse
import simplejson
import math
class Twitter (DataSource):
api = None
geo_keys = ['coordinates', 'geo', 'place']
def __init__(self, name, consumer_key, consumer_secret, token_key, token_secret, srid_out = 4326, attributes="*", encoding = "utf-8", **args):
DataSource.__init__(self, name, **args)
self.consumer_key = consumer_key
self.consumer_secret = consumer_secret
self.token_key = token_key
self.token_secret = token_secret
self.srid_out = srid_out
self.encoding = encoding
self.attributes = attributes
self.api = TwitterAPI(self.consumer_key, self.consumer_secret, self.token_key, self.token_secret)
def select (self, action):
features = []
if action.id is not None:
content = self.api.request('https://api.twitter.com/1.1/statuses/show.json?include_my_retweet=true&include_entities=true&id=' + str(action.id), "GET")
try:
features.append(self.encode_tweet(simplejson.loads(content)))
except Exception as e:
''' '''
else:
if hasattr(self, 'screen_name'):
content = self.api.request('https://api.twitter.com/1.1/statuses/user_timeline.json?screen_name=' + self.screen_name, "GET")
features = self.encode_user_tweets(simplejson.loads(content))
elif hasattr(self, 'user_id'):
content = self.api.request('https://api.twitter.com/1.1/statuses/user_timeline.json?user_id=' + self.user_id, "GET")
features = self.encode_user_tweets(simplejson.loads(content))
else:
params = {'count':'100'}
geocode = ''
if action.bbox:
# latitude, longitude
center = "%f,%f" % tuple([ (action.bbox[1] + action.bbox[3]) / 2, (action.bbox[0] + action.bbox[2]) / 2 ])
dLat = math.radians((action.bbox[3] - action.bbox[1]))
dLon = math.radians((action.bbox[2] - action.bbox[0]))
lat1 = math.radians(action.bbox[1])
lat2 = math.radians(action.bbox[3])
a = math.sin(dLat/2) * math.sin(dLat/2) + math.sin(dLon/2) * math.sin(dLon/2) * math.cos(lat1) * math.cos(lat2)
c = 2 * math.atan2(math.sqrt(a), math.sqrt(1-a))
d = 6371 * c
radius = "%ikm" % math.ceil(d/2)
params['geocode'] = center + ',' + radius
params['q'] = self.query
query = urllib.urlencode(params)
content = self.api.request('https://api.twitter.com/1.1/search/tweets.json?' + query, "GET")
features = self.encode_search_tweets(simplejson.loads(content))
return features
def encode_search_tweets(self, tweets):
features = []
for tweet in tweets['statuses']:
try:
features.append(self.encode_tweet(tweet))
except Exception as e:
continue
return features
def encode_user_tweets(self, tweets):
features = []
for tweet in tweets:
try:
features.append(self.encode_tweet(tweet))
except Exception as e:
continue
return features
def encode_tweet(self, tweet):
try:
geom = self.get_geometry(tweet)
except:
raise
props = {}
node_names = self.get_node_names(tweet)
for attribute in node_names:
keys = attribute.split(".")
value = tweet
for key in keys:
if value[key] is None:
break
value = value[key]
if type(value) is not dict and type(value) is not list:
if type(value) is unicode:
props[attribute] = value
else:
props[attribute] = unicode(str(value), self.encoding)
return Feature( id=tweet["id"], geometry=geom, geometry_attr="geometry", srs=self.srid_out, props=props )
def get_geometry(self, tweet):
if tweet["coordinates"] is not None:
return tweet["coordinates"]
# geo field is deprecated. Should be removed
if tweet["geo"] is not None:
return tweet["geo"]
if tweet["place"] is not None:
if tweet["place"]["bounding_box"] is not None:
return tweet["place"]["bounding_box"]
raise NoGeometryException(locator="Twitter", layer=self.name)
def get_node_names(self, tweet):
nodes = []
if self.attributes == '*':
for key in tweet.keys():
if key not in self.geo_keys:
childs = self.get_nodes(key, tweet[key], key)
nodes.extend(childs)
else:
nodes = self.attributes.split(",")
return nodes
def get_nodes(self, key, tweet, path):
nodes = []
if type(tweet) is dict:
for key in tweet.keys():
if key not in self.geo_keys:
childs = self.get_nodes(key, tweet[key], "%s.%s" % (path, key))
nodes.extend(childs)
else:
nodes.append("%s" % path)
return nodes
class TwitterAPI(object):
settings = {
'request_token_url' : 'https://api.twitter.com/oauth/request_token',
'authorize_url' : 'https://api.twitter.com/oauth/authorize',
'access_token_url' : 'https://api.twitter.com/oauth/access_token'
}
client = None
def __init__(self, consumer_key, consumer_secret, token_key, token_secret):
consumer = oauth.Consumer(key = consumer_key, secret = consumer_secret)
token = oauth.Token(key = token_key, secret = token_secret)
self.client = oauth.Client(consumer, token)
def request(self, url, http_method = "GET", post_body = "", http_headers = {}):
resp, content = self.client.request(url, method = http_method, body = post_body, headers = http_headers)
return content | en | 0.495402 | # latitude, longitude # geo field is deprecated. Should be removed | 2.917656 | 3 |
nsniff/widget.py | matham/nsniff | 0 | 6623833 | import numpy as np
from typing import List, Dict, Optional, Tuple
from matplotlib import cm
from kivy_trio.to_trio import kivy_run_in_async, mark, KivyEventCancelled
from pymoa_remote.threading import ThreadExecutor
from base_kivy_app.app import app_error
from kivy_garden.graph import Graph, ContourPlot, LinePlot
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, \
NumericProperty, ListProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from kivy.factory import Factory
from kivy.uix.widget import Widget
from kivy_garden.graph import Graph
from nsniff.device import StratuscentSensor, VirtualStratuscentSensor, \
StratuscentBase
__all__ = ('DeviceDisplay', )
class SniffGraph(Graph):
dev_display: 'DeviceDisplay' = None
pos_label: Widget = None
visible = BooleanProperty(False)
is_3d = True
def _scale_percent_pos(self, pos):
w, h = self.view_size
x, y = pos
x -= self.x + self.view_pos[0]
y -= self.y + self.view_pos[1]
x = x / w if w else 0
y = y / h if h else 0
return x, y
def show_pos_label(self):
label = self.pos_label
if label is None:
label = self.pos_label = Factory.GraphPosLabel()
if label.parent is None:
from kivy.core.window import Window
Window.add_widget(label)
def hide_pos_label(self):
label = self.pos_label
if label is not None and label.parent is not None:
from kivy.core.window import Window
Window.remove_widget(label)
def on_kv_post(self, base_widget):
from kivy.core.window import Window
Window.fbind('mouse_pos', self._set_hover_label)
def _set_hover_label(self, *args):
from kivy.core.window import Window
pos = self.to_parent(*self.to_widget(*Window.mouse_pos))
if not self.visible or \
len(Window.children) > 1 and \
Window.children[0] is not self.pos_label or \
not self.collide_point(*pos):
self.hide_pos_label()
return
x, y = self._scale_percent_pos(pos)
if x > 1 or x < 0 or y > 1 or y < 0:
self.hide_pos_label()
return
self.show_pos_label()
text = self.dev_display.get_data_from_graph_pos(x, y, self.is_3d)
if text:
self.pos_label.text = text
x_pos, y_pos = Window.mouse_pos
self.pos_label.pos = min(
x_pos + dp(20), Window.width - dp(200)), y_pos + dp(20)
else:
self.hide_pos_label()
def on_touch_down(self, touch):
if super().on_touch_down(touch):
return True
if not self.collide_point(*touch.pos):
return False
x, y = self._scale_percent_pos(touch.pos)
if x > 1 or x < 0 or y > 1 or y < 0:
return False
touch.ud[f'sniff_graph.{self.uid}'] = x, y
touch.grab(self)
return True
def on_touch_up(self, touch):
if super().on_touch_up(touch):
return True
opos = touch.ud.get(f'sniff_graph.{self.uid}', None)
if opos is not None:
touch.ungrab(self)
cpos = None
if self.collide_point(*touch.pos):
x, y = self._scale_percent_pos(touch.pos)
if x > 1 or x < 0 or y > 1 or y < 0:
cpos = None
else:
cpos = x, y
if opos or cpos:
self.dev_display.set_range_from_pos(opos, cpos, self.is_3d)
return True
return False
class DeviceDisplay(BoxLayout):
__events__ = ('on_data_update', )
_config_props_ = (
'com_port', 'virtual', 'log_z', 'auto_range', 'global_range',
'range_chan', 'n_channels')
com_port: str = StringProperty('')
device: Optional[StratuscentBase] = ObjectProperty(
None, allownone=True, rebind=True)
virtual = BooleanProperty(False)
n_channels = 32
t0 = NumericProperty(0)
t = NumericProperty(0)
t_start = NumericProperty(None, allownone=True)
t_end = NumericProperty(None, allownone=True)
t_last = NumericProperty(None, allownone=True)
done = False
graph_3d: Graph = None
plot_3d: ContourPlot = None
graph_2d: Graph = None
plots_2d: List[LinePlot] = []
_data: Optional[np.ndarray] = None
num_points: int = NumericProperty(0)
log_z = BooleanProperty(False)
auto_range = BooleanProperty(True)
scale_tex = ObjectProperty(None, allownone=True)
global_range = BooleanProperty(False)
min_val: Optional[np.ndarray] = None
max_val: Optional[np.ndarray] = None
range_chan: str = StringProperty('mouse')
active_channels = ListProperty([True, ] * n_channels)
channels_stats = []
_draw_trigger = None
_t_trigger = None
_plot_colors = []
_event_plots: Tuple[List[LinePlot], List[LinePlot]] = ([], [])
_event_plots_trigger = None
def __init__(self, **kwargs):
self._plot_colors = cm.get_cmap('tab20').colors + \
cm.get_cmap('tab20b').colors
super().__init__(**kwargs)
self._event_plots = [], []
self._event_plots_trigger = Clock.create_trigger(
self._move_events_to_top)
self._draw_trigger = Clock.create_trigger(self.draw_data)
self.fbind('log_z', self.recompute_bar)
self.fbind('log_z', self._draw_trigger)
self.fbind('auto_range', self._draw_trigger)
self.fbind('global_range', self._draw_trigger)
self.fbind('t_start', self._draw_trigger)
self.fbind('t_end', self._draw_trigger)
self.fbind('t_last', self._draw_trigger)
self.fbind('active_channels', self._draw_trigger)
self._t_trigger = Clock.create_trigger(self._set_graph_t_axis)
self.fbind('t_start', self._t_trigger)
self.fbind('t_end', self._t_trigger)
self.fbind('t_last', self._t_trigger)
self.fbind('t0', self._t_trigger)
self.fbind('t', self._t_trigger)
def _set_graph_t_axis(self, *args):
xmax = self.t_end if self.t_end is not None else self.t
if self.t_start is not None:
xmin = self.t_start
elif self.t_last is not None:
xmin = xmax - self.t_last
else:
xmin = self.t0
if xmin > xmax:
xmin = xmax
self.graph_2d.xmin = xmin
self.graph_2d.xmax = xmax
self.graph_3d.xmin = max(min(xmin, self.t), self.t0)
self.graph_3d.xmax = max(min(xmax, self.t), self.t0)
def _move_events_to_top(self, *args):
plots2, plots3 = self._event_plots
graph2 = self.graph_2d
graph3 = self.graph_3d
for plot in plots2:
graph2.remove_plot(plot)
graph2.add_plot(plot)
for plot in plots3:
graph3.remove_plot(plot)
graph3.add_plot(plot)
def on_data_update(self, instance):
pass
def create_plot(self, graph_3d, graph_2d):
self.graph_3d = graph_3d
self.plot_3d = plot = ContourPlot()
plot.mag_filter = 'nearest'
plot.min_filter = 'nearest'
graph_3d.add_plot(plot)
self.recompute_bar()
self.graph_2d = graph_2d
self.plots_2d = plots = []
for i in range(self.n_channels):
plot = LinePlot(color=self._plot_colors[i], line_width=dp(2))
graph_2d.add_plot(plot)
plots.append(plot)
def show_hide_channel(self, channel, visible):
self.active_channels[channel] = visible
if visible:
self.graph_2d.add_plot(self.plots_2d[channel])
self._event_plots_trigger()
else:
self.graph_2d.remove_plot(self.plots_2d[channel])
def recompute_bar(self, *args):
tex = self.scale_tex = Texture.create(size=(250, 1), colorfmt='rgb')
tex.mag_filter = tex.min_filter = 'linear'
if self.log_z:
points = (np.logspace(0, 1, 250, endpoint=True) - 1) / 9
else:
points = np.linspace(0, 1, 250, endpoint=True)
data = cm.get_cmap()(points, bytes=True)[:, :3]
tex.blit_buffer(data.tobytes(), colorfmt='rgb', bufferfmt='ubyte')
def process_data(self, device: StratuscentBase):
self.dispatch('on_data_update', self)
if self._data is None:
self.t0 = device.timestamp
self._data = np.empty(
(len(device.sensors_data) + 1, 10), dtype=np.float)
data = self._data
self.t = device.timestamp
data[:self.n_channels, self.num_points] = device.sensors_data
data[self.n_channels, self.num_points] = device.timestamp
self.num_points += 1
s = data.shape[1]
if self.num_points == s:
self._data = np.concatenate(
(data,
np.empty((len(device.sensors_data) + 1, s), dtype=np.float)),
axis=1
)
self._draw_trigger()
def time_to_index(self, t):
if self._data is None:
return 0
n = self.num_points
t0 = self.t0
total_t = self._data[self.n_channels, n - 1] - t0
if not total_t:
return 0
return max(min(int(n * (t - t0) / total_t), n - 1), 0)
def get_data_from_graph_pos(self, x_frac, y_frac, plot_3d):
data = self.get_visible_data()
if data is None:
return
n = data.shape[1]
i = min(int(x_frac * n), n - 1)
t = (self.graph_3d.xmax - self.graph_3d.xmin) * x_frac + \
self.graph_3d.xmin
if plot_3d:
channel = min(int(y_frac * self.n_channels), self.n_channels - 1)
value = data[channel, i]
return f'{t:0.1f}, {channel + 1}, {value:0.1f}'
if self.range_chan in ('mouse', 'all'):
if self.log_z:
y_frac = (np.power(10, y_frac) - 1) / 9
y = (self.graph_2d.ymax - self.graph_2d.ymin) * y_frac + \
self.graph_2d.ymin
return f'{t:0.1f}, {y:0.3f}'
channel = int(self.range_chan)
value = data[channel - 1, i]
return f'{t:0.1f}, {channel}, {value:0.1f}'
def get_data_indices_range(self):
s = 0
if self.t_start:
s = self.time_to_index(self.t_start)
e = self.num_points
if self.t_end:
e = self.time_to_index(self.t_end) + 1
if not self.t_start and self.t_last:
if self.t_end:
s = self.time_to_index(self.t_end - self.t_last)
else:
s = self.time_to_index(self.t - self.t_last)
return s, e
def get_visible_data(self):
data = self._data
if data is None:
return None
s, e = self.get_data_indices_range()
return data[:, s:e]
def draw_data(self, *args):
data = self.get_visible_data()
if data is None:
return
n_channels = self.n_channels
inactive_channels = np.logical_not(
np.asarray(self.active_channels, dtype=np.bool))
if self.auto_range or self.min_val is None or self.max_val is None:
min_val = self.min_val = np.min(
data[:n_channels, :], axis=1, keepdims=True)
max_val = self.max_val = np.max(
data[:n_channels, :], axis=1, keepdims=True)
for widget, mn, mx in zip(
self.channels_stats, min_val[:, 0], max_val[:, 0]):
widget.min_val = mn.item()
widget.max_val = mx.item()
else:
min_val = self.min_val
max_val = self.max_val
if self.global_range:
# reduce to scalar
min_val[:, 0] = np.min(min_val)
max_val[:, 0] = np.max(max_val)
zero_range = min_val[:, 0] == max_val[:, 0]
scaled_data = np.clip(data[:n_channels, :], min_val, max_val) - min_val
max_val = max_val - min_val
scaled_data[inactive_channels, :] = 0
scaled_data[zero_range, :] = 0
not_zero = np.logical_not(np.logical_or(zero_range, inactive_channels))
times = data[n_channels, :].tolist()
log_z = self.log_z
for i, plot in enumerate(self.plots_2d):
if not_zero[i]:
d = scaled_data[i, :] / max_val[i, 0]
if log_z:
d = d * .9 + .1
plot.points = list(zip(times, d.tolist()))
else:
plot.points = []
if np.any(not_zero):
if log_z:
# min val will be 1 (log 1 == 0)
max_val = np.log10(max_val + 1)
scaled_data[not_zero] = np.log10(scaled_data[not_zero] + 1)
scaled_data[not_zero] /= max_val[not_zero]
np_data = cm.get_cmap()(scaled_data, bytes=True)
self.plot_3d.rgb_data = np_data[:, :, :3]
def set_range_from_pos(self, open_pos, close_pos, plot_3d):
data = self.get_visible_data()
if data is None or self.min_val is None or self.max_val is None:
return
chan = self.range_chan
n = data.shape[1]
s = 0
e = n - 1
if open_pos is not None:
x, y = open_pos
s = min(int(x * n), n - 1)
if close_pos is not None:
x, y = close_pos
e = min(int(x * n), n - 1)
if s > e:
s, e = e, s
e += 1
if chan == 'all' or chan == 'mouse' and not plot_3d:
self.min_val = np.min(
data[:self.n_channels, s:e], axis=1, keepdims=True)
self.max_val = np.max(
data[:self.n_channels, s:e], axis=1, keepdims=True)
for widget, mn, mx in zip(
self.channels_stats, self.min_val[:, 0],
self.max_val[:, 0]):
widget.min_val = mn.item()
widget.max_val = mx.item()
else:
if chan == 'mouse':
_, y = open_pos or close_pos
i = min(int(y * self.n_channels), self.n_channels - 1)
else:
i = int(chan) - 1
self.min_val[i, 0] = np.min(data[i, s:e])
self.max_val[i, 0] = np.max(data[i, s:e])
widget = self.channels_stats[i]
widget.min_val = self.min_val[i, 0].item()
widget.max_val = self.max_val[i, 0].item()
self._draw_trigger()
async def run_device(self):
async with ThreadExecutor() as executor:
async with executor.remote_instance(self.device, 'sensor'):
async with self.device as device:
async with device.read_sensor_values() as aiter:
async for _ in aiter:
if self.done:
break
self.process_data(device)
@app_error
@kivy_run_in_async
def start(self):
for graph, plots in zip(
(self.graph_2d, self.graph_3d), self._event_plots):
for plot in plots:
graph.remove_plot(plot)
self._event_plots = [], []
self._data = None
self.num_points = 0
self.t0 = 0
self.done = False
self.min_val = self.max_val = None
self.t_start = None
self.t_end = None
self.t = 0
if self.virtual:
cls = VirtualStratuscentSensor
else:
cls = StratuscentSensor
self.device = cls(com_port=self.com_port)
try:
yield mark(self.run_device)
except KivyEventCancelled:
pass
finally:
self.device = None
@app_error
def stop(self):
self.done = True
def add_channel_selection(self, container):
ChannelControl = Factory.ChannelControl
channels = self.channels_stats = []
for i in range(self.n_channels):
widget = ChannelControl()
widget.dev = self
widget.channel = i
widget.plot_color = self._plot_colors[i]
container.add_widget(widget)
channels.append(widget)
def set_channel_min_val(self, channel, value):
if self.min_val is None:
return
value = float(value)
self.min_val[channel, 0] = value
self._draw_trigger()
def set_channel_max_val(self, channel, value):
if self.max_val is None:
return
value = float(value)
self.max_val[channel, 0] = value
self._draw_trigger()
@staticmethod
def get_data_header():
return StratuscentBase.get_data_header()
def add_event(self, t, name):
p = LinePlot(color=(0, 0, 0), line_width=dp(3))
p.points = [(t, .1), (t, 1)]
self.graph_2d.add_plot(p)
self._event_plots[0].append(p)
p = LinePlot(color=(0, 0, 0), line_width=dp(3))
p.points = [(t, 0), (t, self.graph_3d.ymax)]
self.graph_3d.add_plot(p)
self._event_plots[1].append(p)
| import numpy as np
from typing import List, Dict, Optional, Tuple
from matplotlib import cm
from kivy_trio.to_trio import kivy_run_in_async, mark, KivyEventCancelled
from pymoa_remote.threading import ThreadExecutor
from base_kivy_app.app import app_error
from kivy_garden.graph import Graph, ContourPlot, LinePlot
from kivy.metrics import dp
from kivy.properties import ObjectProperty, StringProperty, BooleanProperty, \
NumericProperty, ListProperty
from kivy.uix.boxlayout import BoxLayout
from kivy.clock import Clock
from kivy.graphics.texture import Texture
from kivy.factory import Factory
from kivy.uix.widget import Widget
from kivy_garden.graph import Graph
from nsniff.device import StratuscentSensor, VirtualStratuscentSensor, \
StratuscentBase
__all__ = ('DeviceDisplay', )
class SniffGraph(Graph):
dev_display: 'DeviceDisplay' = None
pos_label: Widget = None
visible = BooleanProperty(False)
is_3d = True
def _scale_percent_pos(self, pos):
w, h = self.view_size
x, y = pos
x -= self.x + self.view_pos[0]
y -= self.y + self.view_pos[1]
x = x / w if w else 0
y = y / h if h else 0
return x, y
def show_pos_label(self):
label = self.pos_label
if label is None:
label = self.pos_label = Factory.GraphPosLabel()
if label.parent is None:
from kivy.core.window import Window
Window.add_widget(label)
def hide_pos_label(self):
label = self.pos_label
if label is not None and label.parent is not None:
from kivy.core.window import Window
Window.remove_widget(label)
def on_kv_post(self, base_widget):
from kivy.core.window import Window
Window.fbind('mouse_pos', self._set_hover_label)
def _set_hover_label(self, *args):
from kivy.core.window import Window
pos = self.to_parent(*self.to_widget(*Window.mouse_pos))
if not self.visible or \
len(Window.children) > 1 and \
Window.children[0] is not self.pos_label or \
not self.collide_point(*pos):
self.hide_pos_label()
return
x, y = self._scale_percent_pos(pos)
if x > 1 or x < 0 or y > 1 or y < 0:
self.hide_pos_label()
return
self.show_pos_label()
text = self.dev_display.get_data_from_graph_pos(x, y, self.is_3d)
if text:
self.pos_label.text = text
x_pos, y_pos = Window.mouse_pos
self.pos_label.pos = min(
x_pos + dp(20), Window.width - dp(200)), y_pos + dp(20)
else:
self.hide_pos_label()
def on_touch_down(self, touch):
if super().on_touch_down(touch):
return True
if not self.collide_point(*touch.pos):
return False
x, y = self._scale_percent_pos(touch.pos)
if x > 1 or x < 0 or y > 1 or y < 0:
return False
touch.ud[f'sniff_graph.{self.uid}'] = x, y
touch.grab(self)
return True
def on_touch_up(self, touch):
if super().on_touch_up(touch):
return True
opos = touch.ud.get(f'sniff_graph.{self.uid}', None)
if opos is not None:
touch.ungrab(self)
cpos = None
if self.collide_point(*touch.pos):
x, y = self._scale_percent_pos(touch.pos)
if x > 1 or x < 0 or y > 1 or y < 0:
cpos = None
else:
cpos = x, y
if opos or cpos:
self.dev_display.set_range_from_pos(opos, cpos, self.is_3d)
return True
return False
class DeviceDisplay(BoxLayout):
__events__ = ('on_data_update', )
_config_props_ = (
'com_port', 'virtual', 'log_z', 'auto_range', 'global_range',
'range_chan', 'n_channels')
com_port: str = StringProperty('')
device: Optional[StratuscentBase] = ObjectProperty(
None, allownone=True, rebind=True)
virtual = BooleanProperty(False)
n_channels = 32
t0 = NumericProperty(0)
t = NumericProperty(0)
t_start = NumericProperty(None, allownone=True)
t_end = NumericProperty(None, allownone=True)
t_last = NumericProperty(None, allownone=True)
done = False
graph_3d: Graph = None
plot_3d: ContourPlot = None
graph_2d: Graph = None
plots_2d: List[LinePlot] = []
_data: Optional[np.ndarray] = None
num_points: int = NumericProperty(0)
log_z = BooleanProperty(False)
auto_range = BooleanProperty(True)
scale_tex = ObjectProperty(None, allownone=True)
global_range = BooleanProperty(False)
min_val: Optional[np.ndarray] = None
max_val: Optional[np.ndarray] = None
range_chan: str = StringProperty('mouse')
active_channels = ListProperty([True, ] * n_channels)
channels_stats = []
_draw_trigger = None
_t_trigger = None
_plot_colors = []
_event_plots: Tuple[List[LinePlot], List[LinePlot]] = ([], [])
_event_plots_trigger = None
def __init__(self, **kwargs):
self._plot_colors = cm.get_cmap('tab20').colors + \
cm.get_cmap('tab20b').colors
super().__init__(**kwargs)
self._event_plots = [], []
self._event_plots_trigger = Clock.create_trigger(
self._move_events_to_top)
self._draw_trigger = Clock.create_trigger(self.draw_data)
self.fbind('log_z', self.recompute_bar)
self.fbind('log_z', self._draw_trigger)
self.fbind('auto_range', self._draw_trigger)
self.fbind('global_range', self._draw_trigger)
self.fbind('t_start', self._draw_trigger)
self.fbind('t_end', self._draw_trigger)
self.fbind('t_last', self._draw_trigger)
self.fbind('active_channels', self._draw_trigger)
self._t_trigger = Clock.create_trigger(self._set_graph_t_axis)
self.fbind('t_start', self._t_trigger)
self.fbind('t_end', self._t_trigger)
self.fbind('t_last', self._t_trigger)
self.fbind('t0', self._t_trigger)
self.fbind('t', self._t_trigger)
def _set_graph_t_axis(self, *args):
xmax = self.t_end if self.t_end is not None else self.t
if self.t_start is not None:
xmin = self.t_start
elif self.t_last is not None:
xmin = xmax - self.t_last
else:
xmin = self.t0
if xmin > xmax:
xmin = xmax
self.graph_2d.xmin = xmin
self.graph_2d.xmax = xmax
self.graph_3d.xmin = max(min(xmin, self.t), self.t0)
self.graph_3d.xmax = max(min(xmax, self.t), self.t0)
def _move_events_to_top(self, *args):
plots2, plots3 = self._event_plots
graph2 = self.graph_2d
graph3 = self.graph_3d
for plot in plots2:
graph2.remove_plot(plot)
graph2.add_plot(plot)
for plot in plots3:
graph3.remove_plot(plot)
graph3.add_plot(plot)
def on_data_update(self, instance):
pass
def create_plot(self, graph_3d, graph_2d):
self.graph_3d = graph_3d
self.plot_3d = plot = ContourPlot()
plot.mag_filter = 'nearest'
plot.min_filter = 'nearest'
graph_3d.add_plot(plot)
self.recompute_bar()
self.graph_2d = graph_2d
self.plots_2d = plots = []
for i in range(self.n_channels):
plot = LinePlot(color=self._plot_colors[i], line_width=dp(2))
graph_2d.add_plot(plot)
plots.append(plot)
def show_hide_channel(self, channel, visible):
self.active_channels[channel] = visible
if visible:
self.graph_2d.add_plot(self.plots_2d[channel])
self._event_plots_trigger()
else:
self.graph_2d.remove_plot(self.plots_2d[channel])
def recompute_bar(self, *args):
tex = self.scale_tex = Texture.create(size=(250, 1), colorfmt='rgb')
tex.mag_filter = tex.min_filter = 'linear'
if self.log_z:
points = (np.logspace(0, 1, 250, endpoint=True) - 1) / 9
else:
points = np.linspace(0, 1, 250, endpoint=True)
data = cm.get_cmap()(points, bytes=True)[:, :3]
tex.blit_buffer(data.tobytes(), colorfmt='rgb', bufferfmt='ubyte')
def process_data(self, device: StratuscentBase):
self.dispatch('on_data_update', self)
if self._data is None:
self.t0 = device.timestamp
self._data = np.empty(
(len(device.sensors_data) + 1, 10), dtype=np.float)
data = self._data
self.t = device.timestamp
data[:self.n_channels, self.num_points] = device.sensors_data
data[self.n_channels, self.num_points] = device.timestamp
self.num_points += 1
s = data.shape[1]
if self.num_points == s:
self._data = np.concatenate(
(data,
np.empty((len(device.sensors_data) + 1, s), dtype=np.float)),
axis=1
)
self._draw_trigger()
def time_to_index(self, t):
if self._data is None:
return 0
n = self.num_points
t0 = self.t0
total_t = self._data[self.n_channels, n - 1] - t0
if not total_t:
return 0
return max(min(int(n * (t - t0) / total_t), n - 1), 0)
def get_data_from_graph_pos(self, x_frac, y_frac, plot_3d):
data = self.get_visible_data()
if data is None:
return
n = data.shape[1]
i = min(int(x_frac * n), n - 1)
t = (self.graph_3d.xmax - self.graph_3d.xmin) * x_frac + \
self.graph_3d.xmin
if plot_3d:
channel = min(int(y_frac * self.n_channels), self.n_channels - 1)
value = data[channel, i]
return f'{t:0.1f}, {channel + 1}, {value:0.1f}'
if self.range_chan in ('mouse', 'all'):
if self.log_z:
y_frac = (np.power(10, y_frac) - 1) / 9
y = (self.graph_2d.ymax - self.graph_2d.ymin) * y_frac + \
self.graph_2d.ymin
return f'{t:0.1f}, {y:0.3f}'
channel = int(self.range_chan)
value = data[channel - 1, i]
return f'{t:0.1f}, {channel}, {value:0.1f}'
def get_data_indices_range(self):
s = 0
if self.t_start:
s = self.time_to_index(self.t_start)
e = self.num_points
if self.t_end:
e = self.time_to_index(self.t_end) + 1
if not self.t_start and self.t_last:
if self.t_end:
s = self.time_to_index(self.t_end - self.t_last)
else:
s = self.time_to_index(self.t - self.t_last)
return s, e
def get_visible_data(self):
data = self._data
if data is None:
return None
s, e = self.get_data_indices_range()
return data[:, s:e]
def draw_data(self, *args):
data = self.get_visible_data()
if data is None:
return
n_channels = self.n_channels
inactive_channels = np.logical_not(
np.asarray(self.active_channels, dtype=np.bool))
if self.auto_range or self.min_val is None or self.max_val is None:
min_val = self.min_val = np.min(
data[:n_channels, :], axis=1, keepdims=True)
max_val = self.max_val = np.max(
data[:n_channels, :], axis=1, keepdims=True)
for widget, mn, mx in zip(
self.channels_stats, min_val[:, 0], max_val[:, 0]):
widget.min_val = mn.item()
widget.max_val = mx.item()
else:
min_val = self.min_val
max_val = self.max_val
if self.global_range:
# reduce to scalar
min_val[:, 0] = np.min(min_val)
max_val[:, 0] = np.max(max_val)
zero_range = min_val[:, 0] == max_val[:, 0]
scaled_data = np.clip(data[:n_channels, :], min_val, max_val) - min_val
max_val = max_val - min_val
scaled_data[inactive_channels, :] = 0
scaled_data[zero_range, :] = 0
not_zero = np.logical_not(np.logical_or(zero_range, inactive_channels))
times = data[n_channels, :].tolist()
log_z = self.log_z
for i, plot in enumerate(self.plots_2d):
if not_zero[i]:
d = scaled_data[i, :] / max_val[i, 0]
if log_z:
d = d * .9 + .1
plot.points = list(zip(times, d.tolist()))
else:
plot.points = []
if np.any(not_zero):
if log_z:
# min val will be 1 (log 1 == 0)
max_val = np.log10(max_val + 1)
scaled_data[not_zero] = np.log10(scaled_data[not_zero] + 1)
scaled_data[not_zero] /= max_val[not_zero]
np_data = cm.get_cmap()(scaled_data, bytes=True)
self.plot_3d.rgb_data = np_data[:, :, :3]
def set_range_from_pos(self, open_pos, close_pos, plot_3d):
data = self.get_visible_data()
if data is None or self.min_val is None or self.max_val is None:
return
chan = self.range_chan
n = data.shape[1]
s = 0
e = n - 1
if open_pos is not None:
x, y = open_pos
s = min(int(x * n), n - 1)
if close_pos is not None:
x, y = close_pos
e = min(int(x * n), n - 1)
if s > e:
s, e = e, s
e += 1
if chan == 'all' or chan == 'mouse' and not plot_3d:
self.min_val = np.min(
data[:self.n_channels, s:e], axis=1, keepdims=True)
self.max_val = np.max(
data[:self.n_channels, s:e], axis=1, keepdims=True)
for widget, mn, mx in zip(
self.channels_stats, self.min_val[:, 0],
self.max_val[:, 0]):
widget.min_val = mn.item()
widget.max_val = mx.item()
else:
if chan == 'mouse':
_, y = open_pos or close_pos
i = min(int(y * self.n_channels), self.n_channels - 1)
else:
i = int(chan) - 1
self.min_val[i, 0] = np.min(data[i, s:e])
self.max_val[i, 0] = np.max(data[i, s:e])
widget = self.channels_stats[i]
widget.min_val = self.min_val[i, 0].item()
widget.max_val = self.max_val[i, 0].item()
self._draw_trigger()
async def run_device(self):
async with ThreadExecutor() as executor:
async with executor.remote_instance(self.device, 'sensor'):
async with self.device as device:
async with device.read_sensor_values() as aiter:
async for _ in aiter:
if self.done:
break
self.process_data(device)
@app_error
@kivy_run_in_async
def start(self):
for graph, plots in zip(
(self.graph_2d, self.graph_3d), self._event_plots):
for plot in plots:
graph.remove_plot(plot)
self._event_plots = [], []
self._data = None
self.num_points = 0
self.t0 = 0
self.done = False
self.min_val = self.max_val = None
self.t_start = None
self.t_end = None
self.t = 0
if self.virtual:
cls = VirtualStratuscentSensor
else:
cls = StratuscentSensor
self.device = cls(com_port=self.com_port)
try:
yield mark(self.run_device)
except KivyEventCancelled:
pass
finally:
self.device = None
@app_error
def stop(self):
self.done = True
def add_channel_selection(self, container):
ChannelControl = Factory.ChannelControl
channels = self.channels_stats = []
for i in range(self.n_channels):
widget = ChannelControl()
widget.dev = self
widget.channel = i
widget.plot_color = self._plot_colors[i]
container.add_widget(widget)
channels.append(widget)
def set_channel_min_val(self, channel, value):
if self.min_val is None:
return
value = float(value)
self.min_val[channel, 0] = value
self._draw_trigger()
def set_channel_max_val(self, channel, value):
if self.max_val is None:
return
value = float(value)
self.max_val[channel, 0] = value
self._draw_trigger()
@staticmethod
def get_data_header():
return StratuscentBase.get_data_header()
def add_event(self, t, name):
p = LinePlot(color=(0, 0, 0), line_width=dp(3))
p.points = [(t, .1), (t, 1)]
self.graph_2d.add_plot(p)
self._event_plots[0].append(p)
p = LinePlot(color=(0, 0, 0), line_width=dp(3))
p.points = [(t, 0), (t, self.graph_3d.ymax)]
self.graph_3d.add_plot(p)
self._event_plots[1].append(p)
| en | 0.816254 | # reduce to scalar # min val will be 1 (log 1 == 0) | 1.934966 | 2 |
practical-penguins/trivia_tavern/trivia_runner/models.py | Vthechamp22/summer-code-jam-2021 | 40 | 6623834 | import random
import string
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from trivia_builder.models import TriviaQuiz, TriviaQuestion
from phonenumber_field.modelfields import PhoneNumberField
class Player(models.Model):
team_name = models.CharField(max_length=24, default='')
phone_number = models.CharField(max_length=12)
# Model name needs to be in quotes according to
# https://docs.djangoproject.com/en/3.0/ref/models/fields/#foreignkey
active_quiz = models.ForeignKey('ActiveTriviaQuiz', on_delete=models.CASCADE)
def get_answers(self):
answer_set = Answer.objects.filter(player=self)
answers = ""
for i, answer in enumerate(answer_set, start=1):
if answer.is_correct():
answers += f'Question {i}: your answer: {answer.value} is correct\n'
else:
answers += f'Question {i}: your answer: {answer.value} ' \
f'does not match {answer.question.question_answer}\n'
return answers
def __str__(self):
return f'{self.phone_number} playing {self.active_quiz.trivia_quiz.name}'
class Answer(models.Model):
value = models.CharField(max_length=500, default='')
player = models.ForeignKey(Player, on_delete=models.CASCADE)
question = models.ForeignKey(TriviaQuestion, on_delete=models.CASCADE)
def is_correct(self):
return self.value.upper() == self.question.question_answer.upper()
def gen_session_code():
session_code_val = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
return session_code_val
class ActiveTriviaQuiz(models.Model):
trivia_quiz = models.ForeignKey(TriviaQuiz, on_delete=models.CASCADE)
session_code = models.CharField(max_length=6, unique=True,
default=gen_session_code, editable=False)
current_question_index = models.IntegerField(default=0)
session_master = models.ForeignKey(User, on_delete=models.CASCADE, related_name='quiz_master')
start_time = models.DateTimeField(default=timezone.now)
players = models.ManyToManyField(Player, related_name='quiz_players')
def __str__(self):
return (f'Active Quiz:{self.trivia_quiz.name} '
f'q#:{self.current_question_index} '
f' players:{self.players.count()}'
)
class PhoneNumber(models.Model):
phone_number = PhoneNumberField()
| import random
import string
from django.contrib.auth.models import User
from django.db import models
from django.utils import timezone
from trivia_builder.models import TriviaQuiz, TriviaQuestion
from phonenumber_field.modelfields import PhoneNumberField
class Player(models.Model):
team_name = models.CharField(max_length=24, default='')
phone_number = models.CharField(max_length=12)
# Model name needs to be in quotes according to
# https://docs.djangoproject.com/en/3.0/ref/models/fields/#foreignkey
active_quiz = models.ForeignKey('ActiveTriviaQuiz', on_delete=models.CASCADE)
def get_answers(self):
answer_set = Answer.objects.filter(player=self)
answers = ""
for i, answer in enumerate(answer_set, start=1):
if answer.is_correct():
answers += f'Question {i}: your answer: {answer.value} is correct\n'
else:
answers += f'Question {i}: your answer: {answer.value} ' \
f'does not match {answer.question.question_answer}\n'
return answers
def __str__(self):
return f'{self.phone_number} playing {self.active_quiz.trivia_quiz.name}'
class Answer(models.Model):
value = models.CharField(max_length=500, default='')
player = models.ForeignKey(Player, on_delete=models.CASCADE)
question = models.ForeignKey(TriviaQuestion, on_delete=models.CASCADE)
def is_correct(self):
return self.value.upper() == self.question.question_answer.upper()
def gen_session_code():
session_code_val = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6))
return session_code_val
class ActiveTriviaQuiz(models.Model):
trivia_quiz = models.ForeignKey(TriviaQuiz, on_delete=models.CASCADE)
session_code = models.CharField(max_length=6, unique=True,
default=gen_session_code, editable=False)
current_question_index = models.IntegerField(default=0)
session_master = models.ForeignKey(User, on_delete=models.CASCADE, related_name='quiz_master')
start_time = models.DateTimeField(default=timezone.now)
players = models.ManyToManyField(Player, related_name='quiz_players')
def __str__(self):
return (f'Active Quiz:{self.trivia_quiz.name} '
f'q#:{self.current_question_index} '
f' players:{self.players.count()}'
)
class PhoneNumber(models.Model):
phone_number = PhoneNumberField()
| en | 0.737439 | # Model name needs to be in quotes according to # https://docs.djangoproject.com/en/3.0/ref/models/fields/#foreignkey #:{self.current_question_index} ' | 2.646549 | 3 |
unittests/configloaders/test_json_schema_validation.py | ONS-OpenData/gss-utils | 0 | 6623835 | <gh_stars>0
import json
from gssutils.csvcubedintegration.configloaders.jsonschemavalidation import (
validate_dict_against_schema_url,
)
def test_json_schema_validation_passes():
value: dict = json.loads(
"""
{
"id": "some-id",
"published": "2020-01-01",
"landingPage": "http://example.com/landing-page",
"title" : "some title",
"description" : "some description",
"publisher" : "some publisher",
"families" : ["some family"]
}
"""
)
schema_url = "https://raw.githubusercontent.com/GSS-Cogs/family-schemas/main/dataset-schema-1.1.0.json"
validation_errors = validate_dict_against_schema_url(value, schema_url)
assert len(validation_errors) == 0, validation_errors
def test_json_schema_validation_fails():
value: dict = json.loads(
"""
{
"id": "some-id",
"published": "2020-01-01",
"landingPage": "http://example.com/landing-page",
"title" : "some title",
"description" : 3728,
"publisher" : "some publisher",
"families" : ["some family"]
}
"""
)
schema_url = "https://raw.githubusercontent.com/GSS-Cogs/family-schemas/main/dataset-schema-1.1.0.json"
validation_errors = validate_dict_against_schema_url(value, schema_url)
assert len(validation_errors) == 1, validation_errors
error = validation_errors[0]
assert error.message == "3728 is not of type 'string'"
| import json
from gssutils.csvcubedintegration.configloaders.jsonschemavalidation import (
validate_dict_against_schema_url,
)
def test_json_schema_validation_passes():
value: dict = json.loads(
"""
{
"id": "some-id",
"published": "2020-01-01",
"landingPage": "http://example.com/landing-page",
"title" : "some title",
"description" : "some description",
"publisher" : "some publisher",
"families" : ["some family"]
}
"""
)
schema_url = "https://raw.githubusercontent.com/GSS-Cogs/family-schemas/main/dataset-schema-1.1.0.json"
validation_errors = validate_dict_against_schema_url(value, schema_url)
assert len(validation_errors) == 0, validation_errors
def test_json_schema_validation_fails():
value: dict = json.loads(
"""
{
"id": "some-id",
"published": "2020-01-01",
"landingPage": "http://example.com/landing-page",
"title" : "some title",
"description" : 3728,
"publisher" : "some publisher",
"families" : ["some family"]
}
"""
)
schema_url = "https://raw.githubusercontent.com/GSS-Cogs/family-schemas/main/dataset-schema-1.1.0.json"
validation_errors = validate_dict_against_schema_url(value, schema_url)
assert len(validation_errors) == 1, validation_errors
error = validation_errors[0]
assert error.message == "3728 is not of type 'string'" | en | 0.632064 | { "id": "some-id", "published": "2020-01-01", "landingPage": "http://example.com/landing-page", "title" : "some title", "description" : "some description", "publisher" : "some publisher", "families" : ["some family"] } { "id": "some-id", "published": "2020-01-01", "landingPage": "http://example.com/landing-page", "title" : "some title", "description" : 3728, "publisher" : "some publisher", "families" : ["some family"] } | 2.75659 | 3 |
question_classifier.py | Night0mistery/Knowledged_QA | 0 | 6623836 | <reponame>Night0mistery/Knowledged_QA
#!/usr/bin/env python3
# coding: utf-8
import os
import ahocorasick
from src.redis_helper import RedisHelper
from src.tireTree import Trie
from src.KeywordProcessor import KeywordProcessor
import copy
from backinfo import BackInfo
class QuestionClassifier:
def __init__(self, entities, qwds_dict, question_judge_dict):
cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])
# redis
self.prefix = 'kg_'
self.redis = RedisHelper()
# 特征词路径
self.entities = entities
self.path_dict = dict()
for entity in entities:
self.path_dict[entity] = os.path.join(cur_dir, 'dict/%s.txt' % entity)
self.path_dict['deny'] = os.path.join(cur_dir, 'dict/deny.txt')
# 加载特征词,根据特征词确定实体
# 目前有疾病、科室、药品、实物、并发症、诊断检查项目、在售药品
self.region_words = []
self.deny_words = [i.strip() for i in open(self.path_dict['deny'], encoding='UTF-8') if i.strip()]
self.wds_dict = dict()
for entity in self.entities:
self.wds_dict[entity] = [i.strip() for i in open(self.path_dict[entity], encoding='UTF-8') if i.strip()]
for words in self.wds_dict.values():
self.region_words = self.region_words + words
self.region_words = set(self.region_words)
# 构建字典树
self.region_tree = Trie()
for word in list(self.region_words):
self.region_tree.add(word)
# self.region_tree = self.build_actree(list(self.region_words))
# 构建词典 词:类型
self.wdtype_dict = self.build_wdtype_dict()
# 问句疑问词
qwds_dict['deny'] = self.deny_words
self.qwds_dict = qwds_dict
# self.qwds_type = list(qwds_dict.keys())
self.question_judge_dict = question_judge_dict
# 构建关键词
self.kp = KeywordProcessor()
print('model init successfully!')
return
def judge_qes(self, entity_types, key_word_types, ls_state):
# TODO 问答类型这一部分可以用flashtext加快查找速度
question_types = []
# question_type = 'others'
# 无实体有问题类型,向用户查询问题类型
if entity_types and not key_word_types:
question_types = ['no_key_word']
# 有实体无问题类型,向用户查询问题类型
elif key_word_types and not entity_types:
question_types = ['no_entity']
else:
for q_type, v in self.question_judge_dict.items():
key_word_list = v[0]
entity_type_list = v[1]
if key_word_list and entity_type_list:
flag = 1
for word in key_word_list:
if word not in key_word_types:
flag = 0
for e_type in entity_type_list:
if e_type not in entity_types:
flag = 0
# print('check entity:',q_type, flag)
if flag:
question_types.append(q_type)
"""
if question_types == []:
for q_type, v in self.question_judge_dict.items():
key_word_list = v[0]
entity_type_list = v[1]
if key_word_list == [] and entity_type_list:
flag = 1
for e_type in entity_type_list:
if e_type not in types:
flag = 0
if flag:
question_types.append(q_type)
"""
"""
# 症状
if self.check_words(self.symptom_qwds, question) and ('disease' in types):
question_type = 'disease_symptom'
question_types.append(question_type)
# 症状可能的疾病
if self.check_words(self.symptom_qwds, question) and ('symptom' in types):
question_type = 'symptom_disease'
question_types.append(question_type)
# 原因
if self.check_words(self.cause_qwds, question) and ('disease' in types):
question_type = 'disease_cause'
question_types.append(question_type)
# 并发症
if self.check_words(self.acompany_qwds, question) and ('disease' in types):
question_type = 'disease_acompany'
question_types.append(question_type)
# 推荐食品(某种疾病可以吃,不能吃)
if self.check_words(self.food_qwds, question) and 'disease' in types:
deny_status = self.check_words(self.deny_words, question)
if deny_status:
question_type = 'disease_not_food'
else:
question_type = 'disease_do_food'
question_types.append(question_type)
# 已知食物找疾病(哪些人最好(不)吃某种food)
if self.check_words(self.food_qwds + self.cure_qwds, question) and 'food' in types:
deny_status = self.check_words(self.deny_words, question)
if deny_status:
question_type = 'food_not_disease'
else:
question_type = 'food_do_disease'
question_types.append(question_type)
# 推荐药品(啥病要吃啥药)
if self.check_words(self.drug_qwds, question) and 'disease' in types:
question_type = 'disease_drug'
question_types.append(question_type)
# 药品治啥病(啥药可以治啥病)
if self.check_words(self.cure_qwds, question) and 'drug' in types:
question_type = 'drug_disease'
question_types.append(question_type)
# 疾病接受检查项目
if self.check_words(self.check_qwds, question) and 'disease' in types:
question_type = 'disease_check'
question_types.append(question_type)
# 已知检查项目查相应疾病
if self.check_words(self.check_qwds + self.cure_qwds, question) and 'check' in types:
question_type = 'check_disease'
question_types.append(question_type)
# 症状防御
if self.check_words(self.prevent_qwds, question) and 'disease' in types:
question_type = 'disease_prevent'
question_types.append(question_type)
# 疾病医疗周期
if self.check_words(self.lasttime_qwds, question) and 'disease' in types:
question_type = 'disease_lasttime'
question_types.append(question_type)
# 疾病治疗方式
if self.check_words(self.cureway_qwds, question) and 'disease' in types:
question_type = 'disease_cureway'
question_types.append(question_type)
# 疾病治愈可能性
if self.check_words(self.cureprob_qwds, question) and 'disease' in types:
question_type = 'disease_cureprob'
question_types.append(question_type)
# 疾病易感染人群
if self.check_words(self.easyget_qwds, question) and 'disease' in types:
question_type = 'disease_easyget'
question_types.append(question_type)
"""
# 没有查询到问句信息,从上一轮中拉取
if not question_types:
question_types = ls_state['question_types']
"""
# 若没有查到相关的外部查询信息,那么则将该疾病的描述信息返回
if question_types == [] and 'disease' in types:
question_types = ['disease_desc']
# 若没有查到相关的外部查询信息,那么则将该疾病的描述信息返回
if question_types == [] and 'symptom' in types:
question_types = ['symptom_disease']
"""
return question_types
def check_key_words(self, question):
keys = list()
for key, values in self.qwds_dict.items():
for value in values:
if value in question:
keys.append(key)
return keys
def classify(self, question, user_id):
"""
问题分类主函数
传入用户问题、redis类、用户id
"""
ls_state = self.redis.key_get(self.prefix + user_id)
cur_state = copy.deepcopy(ls_state)
# 提取问题中的实体
question_entity_dict = self.check_entity(question)
# 提取问题中的关键词类型
question_key_word_types = self.check_key_words(question)
# 若当前句子无实体也无问题类型,判断为chitchat,不更新状态
if not question_entity_dict and not question_key_word_types:
return {'args': {}, 'key_word_types': [], 'question_types': ['chitchat']}
# 若当前句子无关键词有实体
elif not question_key_word_types:
# 拉取上轮关键词类型
if ls_state['key_word_types']:
question_key_word_types = ls_state['key_word_types']
# 关键词缺失
else:
cur_state['key_word_types'] = []
cur_state['args'] = question_entity_dict
# 若当前句子无实体有关键词
elif not question_entity_dict:
# 拉取上轮实体
if ls_state['args']:
question_entity_dict = ls_state['args']
# 实体缺失
else:
cur_state['args'] = {}
cur_state['key_word_types'] = question_key_word_types
else:
cur_state['args'] = question_entity_dict
cur_state['key_word_types'] = question_key_word_types
# 收集问句当中所涉及到的实体类型
types = []
for type_ in question_entity_dict.values():
types.extend(list(type_))
types = list(set(types))
# 更新当前问题类型
cur_state['question_types'] = self.judge_qes(types, question_key_word_types, ls_state)
# 更新状态
self.redis.key_insert(self.prefix + user_id, cur_state)
# TODO 如果ls_state == cur_state默认为用户当前句并没有提及到任何有用的信息
# if ls_state == cur_state:
# return {}
#print(cur_state)
return cur_state
def build_wdtype_dict(self):
"""构造词对应的类型"""
wd_dict = dict()
for wd in self.region_words:
wd_dict[wd] = []
"""
if wd in self.name_wds:
wd_dict[wd].append('disease')
if wd in self.department_wds:
wd_dict[wd].append('department')
if wd in self.check_wds:
wd_dict[wd].append('check')
if wd in self.drug_wds:
wd_dict[wd].append('drug')
if wd in self.food_wds:
wd_dict[wd].append('food')
if wd in self.symptom_wds:
wd_dict[wd].append('symptom')
if wd in self.producer_wds:
wd_dict[wd].append('producer')
"""
for entity in self.entities:
if wd in self.wds_dict[entity]:
wd_dict[wd].append(entity)
return wd_dict
def build_actree(self, wordlist):
"""构造actree,加速过滤"""
actree = ahocorasick.Automaton()
for index, word in enumerate(wordlist):
actree.add_word(word, (index, word))
actree.make_automaton()
return actree
def check_medical(self, question):
"""问句过滤"""
region_wds = []
for i in self.region_tree.iter(question):
wd = i[1][1]
region_wds.append(wd)
stop_wds = []
for wd1 in region_wds:
for wd2 in region_wds:
if wd1 in wd2 and wd1 != wd2:
stop_wds.append(wd1)
final_wds = [i for i in region_wds if i not in stop_wds]
final_dict = {i: self.wdtype_dict.get(i) for i in final_wds}
return final_dict
def check_entity(self, question):
entity = self.region_tree.find_entity(str(question), longest=True, drop_duplicates=True)
final_dict = {item: self.wdtype_dict.get(item) for item in entity.values()}
return final_dict
if __name__ == '__main__':
"""
sent:豆仁饭感冒可以吃吗
res_classify: {'args': {'豆仁饭': ['food'], '感冒': ['disease']},
'question_types': ['disease_do_food', 'food_do_disease']}
"""
backinfo = BackInfo()
handler = QuestionClassifier(backinfo.entities, backinfo.qwds_dict, backinfo.question_judge_dict)
while 1:
question = input('input an question:')
data = handler.classify(question, user_id='0000')
print(data)
| #!/usr/bin/env python3
# coding: utf-8
import os
import ahocorasick
from src.redis_helper import RedisHelper
from src.tireTree import Trie
from src.KeywordProcessor import KeywordProcessor
import copy
from backinfo import BackInfo
class QuestionClassifier:
def __init__(self, entities, qwds_dict, question_judge_dict):
cur_dir = '/'.join(os.path.abspath(__file__).split('/')[:-1])
# redis
self.prefix = 'kg_'
self.redis = RedisHelper()
# 特征词路径
self.entities = entities
self.path_dict = dict()
for entity in entities:
self.path_dict[entity] = os.path.join(cur_dir, 'dict/%s.txt' % entity)
self.path_dict['deny'] = os.path.join(cur_dir, 'dict/deny.txt')
# 加载特征词,根据特征词确定实体
# 目前有疾病、科室、药品、实物、并发症、诊断检查项目、在售药品
self.region_words = []
self.deny_words = [i.strip() for i in open(self.path_dict['deny'], encoding='UTF-8') if i.strip()]
self.wds_dict = dict()
for entity in self.entities:
self.wds_dict[entity] = [i.strip() for i in open(self.path_dict[entity], encoding='UTF-8') if i.strip()]
for words in self.wds_dict.values():
self.region_words = self.region_words + words
self.region_words = set(self.region_words)
# 构建字典树
self.region_tree = Trie()
for word in list(self.region_words):
self.region_tree.add(word)
# self.region_tree = self.build_actree(list(self.region_words))
# 构建词典 词:类型
self.wdtype_dict = self.build_wdtype_dict()
# 问句疑问词
qwds_dict['deny'] = self.deny_words
self.qwds_dict = qwds_dict
# self.qwds_type = list(qwds_dict.keys())
self.question_judge_dict = question_judge_dict
# 构建关键词
self.kp = KeywordProcessor()
print('model init successfully!')
return
def judge_qes(self, entity_types, key_word_types, ls_state):
# TODO 问答类型这一部分可以用flashtext加快查找速度
question_types = []
# question_type = 'others'
# 无实体有问题类型,向用户查询问题类型
if entity_types and not key_word_types:
question_types = ['no_key_word']
# 有实体无问题类型,向用户查询问题类型
elif key_word_types and not entity_types:
question_types = ['no_entity']
else:
for q_type, v in self.question_judge_dict.items():
key_word_list = v[0]
entity_type_list = v[1]
if key_word_list and entity_type_list:
flag = 1
for word in key_word_list:
if word not in key_word_types:
flag = 0
for e_type in entity_type_list:
if e_type not in entity_types:
flag = 0
# print('check entity:',q_type, flag)
if flag:
question_types.append(q_type)
"""
if question_types == []:
for q_type, v in self.question_judge_dict.items():
key_word_list = v[0]
entity_type_list = v[1]
if key_word_list == [] and entity_type_list:
flag = 1
for e_type in entity_type_list:
if e_type not in types:
flag = 0
if flag:
question_types.append(q_type)
"""
"""
# 症状
if self.check_words(self.symptom_qwds, question) and ('disease' in types):
question_type = 'disease_symptom'
question_types.append(question_type)
# 症状可能的疾病
if self.check_words(self.symptom_qwds, question) and ('symptom' in types):
question_type = 'symptom_disease'
question_types.append(question_type)
# 原因
if self.check_words(self.cause_qwds, question) and ('disease' in types):
question_type = 'disease_cause'
question_types.append(question_type)
# 并发症
if self.check_words(self.acompany_qwds, question) and ('disease' in types):
question_type = 'disease_acompany'
question_types.append(question_type)
# 推荐食品(某种疾病可以吃,不能吃)
if self.check_words(self.food_qwds, question) and 'disease' in types:
deny_status = self.check_words(self.deny_words, question)
if deny_status:
question_type = 'disease_not_food'
else:
question_type = 'disease_do_food'
question_types.append(question_type)
# 已知食物找疾病(哪些人最好(不)吃某种food)
if self.check_words(self.food_qwds + self.cure_qwds, question) and 'food' in types:
deny_status = self.check_words(self.deny_words, question)
if deny_status:
question_type = 'food_not_disease'
else:
question_type = 'food_do_disease'
question_types.append(question_type)
# 推荐药品(啥病要吃啥药)
if self.check_words(self.drug_qwds, question) and 'disease' in types:
question_type = 'disease_drug'
question_types.append(question_type)
# 药品治啥病(啥药可以治啥病)
if self.check_words(self.cure_qwds, question) and 'drug' in types:
question_type = 'drug_disease'
question_types.append(question_type)
# 疾病接受检查项目
if self.check_words(self.check_qwds, question) and 'disease' in types:
question_type = 'disease_check'
question_types.append(question_type)
# 已知检查项目查相应疾病
if self.check_words(self.check_qwds + self.cure_qwds, question) and 'check' in types:
question_type = 'check_disease'
question_types.append(question_type)
# 症状防御
if self.check_words(self.prevent_qwds, question) and 'disease' in types:
question_type = 'disease_prevent'
question_types.append(question_type)
# 疾病医疗周期
if self.check_words(self.lasttime_qwds, question) and 'disease' in types:
question_type = 'disease_lasttime'
question_types.append(question_type)
# 疾病治疗方式
if self.check_words(self.cureway_qwds, question) and 'disease' in types:
question_type = 'disease_cureway'
question_types.append(question_type)
# 疾病治愈可能性
if self.check_words(self.cureprob_qwds, question) and 'disease' in types:
question_type = 'disease_cureprob'
question_types.append(question_type)
# 疾病易感染人群
if self.check_words(self.easyget_qwds, question) and 'disease' in types:
question_type = 'disease_easyget'
question_types.append(question_type)
"""
# 没有查询到问句信息,从上一轮中拉取
if not question_types:
question_types = ls_state['question_types']
"""
# 若没有查到相关的外部查询信息,那么则将该疾病的描述信息返回
if question_types == [] and 'disease' in types:
question_types = ['disease_desc']
# 若没有查到相关的外部查询信息,那么则将该疾病的描述信息返回
if question_types == [] and 'symptom' in types:
question_types = ['symptom_disease']
"""
return question_types
def check_key_words(self, question):
keys = list()
for key, values in self.qwds_dict.items():
for value in values:
if value in question:
keys.append(key)
return keys
def classify(self, question, user_id):
"""
问题分类主函数
传入用户问题、redis类、用户id
"""
ls_state = self.redis.key_get(self.prefix + user_id)
cur_state = copy.deepcopy(ls_state)
# 提取问题中的实体
question_entity_dict = self.check_entity(question)
# 提取问题中的关键词类型
question_key_word_types = self.check_key_words(question)
# 若当前句子无实体也无问题类型,判断为chitchat,不更新状态
if not question_entity_dict and not question_key_word_types:
return {'args': {}, 'key_word_types': [], 'question_types': ['chitchat']}
# 若当前句子无关键词有实体
elif not question_key_word_types:
# 拉取上轮关键词类型
if ls_state['key_word_types']:
question_key_word_types = ls_state['key_word_types']
# 关键词缺失
else:
cur_state['key_word_types'] = []
cur_state['args'] = question_entity_dict
# 若当前句子无实体有关键词
elif not question_entity_dict:
# 拉取上轮实体
if ls_state['args']:
question_entity_dict = ls_state['args']
# 实体缺失
else:
cur_state['args'] = {}
cur_state['key_word_types'] = question_key_word_types
else:
cur_state['args'] = question_entity_dict
cur_state['key_word_types'] = question_key_word_types
# 收集问句当中所涉及到的实体类型
types = []
for type_ in question_entity_dict.values():
types.extend(list(type_))
types = list(set(types))
# 更新当前问题类型
cur_state['question_types'] = self.judge_qes(types, question_key_word_types, ls_state)
# 更新状态
self.redis.key_insert(self.prefix + user_id, cur_state)
# TODO 如果ls_state == cur_state默认为用户当前句并没有提及到任何有用的信息
# if ls_state == cur_state:
# return {}
#print(cur_state)
return cur_state
def build_wdtype_dict(self):
"""构造词对应的类型"""
wd_dict = dict()
for wd in self.region_words:
wd_dict[wd] = []
"""
if wd in self.name_wds:
wd_dict[wd].append('disease')
if wd in self.department_wds:
wd_dict[wd].append('department')
if wd in self.check_wds:
wd_dict[wd].append('check')
if wd in self.drug_wds:
wd_dict[wd].append('drug')
if wd in self.food_wds:
wd_dict[wd].append('food')
if wd in self.symptom_wds:
wd_dict[wd].append('symptom')
if wd in self.producer_wds:
wd_dict[wd].append('producer')
"""
for entity in self.entities:
if wd in self.wds_dict[entity]:
wd_dict[wd].append(entity)
return wd_dict
def build_actree(self, wordlist):
"""构造actree,加速过滤"""
actree = ahocorasick.Automaton()
for index, word in enumerate(wordlist):
actree.add_word(word, (index, word))
actree.make_automaton()
return actree
def check_medical(self, question):
"""问句过滤"""
region_wds = []
for i in self.region_tree.iter(question):
wd = i[1][1]
region_wds.append(wd)
stop_wds = []
for wd1 in region_wds:
for wd2 in region_wds:
if wd1 in wd2 and wd1 != wd2:
stop_wds.append(wd1)
final_wds = [i for i in region_wds if i not in stop_wds]
final_dict = {i: self.wdtype_dict.get(i) for i in final_wds}
return final_dict
def check_entity(self, question):
entity = self.region_tree.find_entity(str(question), longest=True, drop_duplicates=True)
final_dict = {item: self.wdtype_dict.get(item) for item in entity.values()}
return final_dict
if __name__ == '__main__':
"""
sent:豆仁饭感冒可以吃吗
res_classify: {'args': {'豆仁饭': ['food'], '感冒': ['disease']},
'question_types': ['disease_do_food', 'food_do_disease']}
"""
backinfo = BackInfo()
handler = QuestionClassifier(backinfo.entities, backinfo.qwds_dict, backinfo.question_judge_dict)
while 1:
question = input('input an question:')
data = handler.classify(question, user_id='0000')
print(data) | en | 0.260549 | #!/usr/bin/env python3 # coding: utf-8 # redis # 特征词路径 # 加载特征词,根据特征词确定实体 # 目前有疾病、科室、药品、实物、并发症、诊断检查项目、在售药品 # 构建字典树 # self.region_tree = self.build_actree(list(self.region_words)) # 构建词典 词:类型 # 问句疑问词 # self.qwds_type = list(qwds_dict.keys()) # 构建关键词 # TODO 问答类型这一部分可以用flashtext加快查找速度 # question_type = 'others' # 无实体有问题类型,向用户查询问题类型 # 有实体无问题类型,向用户查询问题类型 # print('check entity:',q_type, flag) if question_types == []: for q_type, v in self.question_judge_dict.items(): key_word_list = v[0] entity_type_list = v[1] if key_word_list == [] and entity_type_list: flag = 1 for e_type in entity_type_list: if e_type not in types: flag = 0 if flag: question_types.append(q_type) # 症状 if self.check_words(self.symptom_qwds, question) and ('disease' in types): question_type = 'disease_symptom' question_types.append(question_type) # 症状可能的疾病 if self.check_words(self.symptom_qwds, question) and ('symptom' in types): question_type = 'symptom_disease' question_types.append(question_type) # 原因 if self.check_words(self.cause_qwds, question) and ('disease' in types): question_type = 'disease_cause' question_types.append(question_type) # 并发症 if self.check_words(self.acompany_qwds, question) and ('disease' in types): question_type = 'disease_acompany' question_types.append(question_type) # 推荐食品(某种疾病可以吃,不能吃) if self.check_words(self.food_qwds, question) and 'disease' in types: deny_status = self.check_words(self.deny_words, question) if deny_status: question_type = 'disease_not_food' else: question_type = 'disease_do_food' question_types.append(question_type) # 已知食物找疾病(哪些人最好(不)吃某种food) if self.check_words(self.food_qwds + self.cure_qwds, question) and 'food' in types: deny_status = self.check_words(self.deny_words, question) if deny_status: question_type = 'food_not_disease' else: question_type = 'food_do_disease' question_types.append(question_type) # 推荐药品(啥病要吃啥药) if self.check_words(self.drug_qwds, question) and 'disease' in types: question_type = 'disease_drug' question_types.append(question_type) # 药品治啥病(啥药可以治啥病) if self.check_words(self.cure_qwds, question) and 'drug' in types: question_type = 'drug_disease' question_types.append(question_type) # 疾病接受检查项目 if self.check_words(self.check_qwds, question) and 'disease' in types: question_type = 'disease_check' question_types.append(question_type) # 已知检查项目查相应疾病 if self.check_words(self.check_qwds + self.cure_qwds, question) and 'check' in types: question_type = 'check_disease' question_types.append(question_type) # 症状防御 if self.check_words(self.prevent_qwds, question) and 'disease' in types: question_type = 'disease_prevent' question_types.append(question_type) # 疾病医疗周期 if self.check_words(self.lasttime_qwds, question) and 'disease' in types: question_type = 'disease_lasttime' question_types.append(question_type) # 疾病治疗方式 if self.check_words(self.cureway_qwds, question) and 'disease' in types: question_type = 'disease_cureway' question_types.append(question_type) # 疾病治愈可能性 if self.check_words(self.cureprob_qwds, question) and 'disease' in types: question_type = 'disease_cureprob' question_types.append(question_type) # 疾病易感染人群 if self.check_words(self.easyget_qwds, question) and 'disease' in types: question_type = 'disease_easyget' question_types.append(question_type) # 没有查询到问句信息,从上一轮中拉取 # 若没有查到相关的外部查询信息,那么则将该疾病的描述信息返回 if question_types == [] and 'disease' in types: question_types = ['disease_desc'] # 若没有查到相关的外部查询信息,那么则将该疾病的描述信息返回 if question_types == [] and 'symptom' in types: question_types = ['symptom_disease'] 问题分类主函数 传入用户问题、redis类、用户id # 提取问题中的实体 # 提取问题中的关键词类型 # 若当前句子无实体也无问题类型,判断为chitchat,不更新状态 # 若当前句子无关键词有实体 # 拉取上轮关键词类型 # 关键词缺失 # 若当前句子无实体有关键词 # 拉取上轮实体 # 实体缺失 # 收集问句当中所涉及到的实体类型 # 更新当前问题类型 # 更新状态 # TODO 如果ls_state == cur_state默认为用户当前句并没有提及到任何有用的信息 # if ls_state == cur_state: # return {} #print(cur_state) 构造词对应的类型 if wd in self.name_wds: wd_dict[wd].append('disease') if wd in self.department_wds: wd_dict[wd].append('department') if wd in self.check_wds: wd_dict[wd].append('check') if wd in self.drug_wds: wd_dict[wd].append('drug') if wd in self.food_wds: wd_dict[wd].append('food') if wd in self.symptom_wds: wd_dict[wd].append('symptom') if wd in self.producer_wds: wd_dict[wd].append('producer') 构造actree,加速过滤 问句过滤 sent:豆仁饭感冒可以吃吗 res_classify: {'args': {'豆仁饭': ['food'], '感冒': ['disease']}, 'question_types': ['disease_do_food', 'food_do_disease']} | 2.307956 | 2 |
test/augmentation/test_torchaudio.py | cnheider/lhotse | 0 | 6623837 | import math
import pytest
import torch
torchaudio = pytest.importorskip('torchaudio', minversion='0.6')
from lhotse.augmentation import SoxEffectTransform, pitch, reverb, speed
SAMPLING_RATE = 16000
@pytest.fixture
def audio():
return torch.sin(2 * math.pi * torch.linspace(0, 1, 16000)).unsqueeze(0).numpy()
@pytest.mark.parametrize('effect', [reverb, pitch, speed])
def test_example_augmentation(audio, effect):
augment_fn = SoxEffectTransform(effects=effect(SAMPLING_RATE))
augmented_audio = augment_fn(audio, sampling_rate=SAMPLING_RATE)
assert augmented_audio.shape == audio.shape
assert augmented_audio != audio
def test_speed_does_not_change_num_samples(audio):
augment_fn = SoxEffectTransform(effects=speed(SAMPLING_RATE))
# Since speed() is not deterministic and between 0.9x - 1.1x, multiple invocations
# will yield either slower (more samples) or faster (less samples) signal.
# The truncation/padding is performed inside of SoxEffectTransform so the user should not
# see these changes.
for _ in range(10):
augmented_audio = augment_fn(audio, sampling_rate=SAMPLING_RATE)
assert augmented_audio.shape == audio.shape
assert augmented_audio != audio
| import math
import pytest
import torch
torchaudio = pytest.importorskip('torchaudio', minversion='0.6')
from lhotse.augmentation import SoxEffectTransform, pitch, reverb, speed
SAMPLING_RATE = 16000
@pytest.fixture
def audio():
return torch.sin(2 * math.pi * torch.linspace(0, 1, 16000)).unsqueeze(0).numpy()
@pytest.mark.parametrize('effect', [reverb, pitch, speed])
def test_example_augmentation(audio, effect):
augment_fn = SoxEffectTransform(effects=effect(SAMPLING_RATE))
augmented_audio = augment_fn(audio, sampling_rate=SAMPLING_RATE)
assert augmented_audio.shape == audio.shape
assert augmented_audio != audio
def test_speed_does_not_change_num_samples(audio):
augment_fn = SoxEffectTransform(effects=speed(SAMPLING_RATE))
# Since speed() is not deterministic and between 0.9x - 1.1x, multiple invocations
# will yield either slower (more samples) or faster (less samples) signal.
# The truncation/padding is performed inside of SoxEffectTransform so the user should not
# see these changes.
for _ in range(10):
augmented_audio = augment_fn(audio, sampling_rate=SAMPLING_RATE)
assert augmented_audio.shape == audio.shape
assert augmented_audio != audio
| en | 0.848424 | # Since speed() is not deterministic and between 0.9x - 1.1x, multiple invocations # will yield either slower (more samples) or faster (less samples) signal. # The truncation/padding is performed inside of SoxEffectTransform so the user should not # see these changes. | 2.514395 | 3 |
py-data/salmon/problems/api-related/1/correct-usages/Command.py | ualberta-smr/NFBugs | 3 | 6623838 | <filename>py-data/salmon/problems/api-related/1/correct-usages/Command.py
import json
import subprocess
from optparse import make_option
import yaml
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
from salmon.apps.monitor import models, utils
class Command(BaseCommand):
def pattern(self):
now = datetime.datetime.now()
expiration_date = now - datetime.timedelta(
minutes=settings.EXPIRE_RESULTS)
models.Results.objects.filter(timestamp__lt=expiration_date).delete()
| <filename>py-data/salmon/problems/api-related/1/correct-usages/Command.py
import json
import subprocess
from optparse import make_option
import yaml
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
from salmon.apps.monitor import models, utils
class Command(BaseCommand):
def pattern(self):
now = datetime.datetime.now()
expiration_date = now - datetime.timedelta(
minutes=settings.EXPIRE_RESULTS)
models.Results.objects.filter(timestamp__lt=expiration_date).delete()
| none | 1 | 1.924746 | 2 | |
src/orco_bot/tasks/delete_branch.py | openforceit/oca-github-bot | 1 | 6623839 | # Copyright (c) <NAME>/NV 2018
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
import re
from .. import github
from ..config import switchable
from ..github import gh_call
from ..queue import getLogger, task
_logger = getLogger(__name__)
TEST_BRANCH_REGEX = '^[0-9][0-9]?\.[0-9]-test(ing)?$'
@task()
@switchable()
def delete_branch(org, repo, branch, dry_run=False):
with github.repository(org, repo) as gh_repo:
gh_branch = gh_call(gh_repo.ref, f"heads/{branch}")
regex = re.compile(TEST_BRANCH_REGEX)
if dry_run:
_logger.info(f"DRY-RUN delete branch {branch} in {org}/{repo}")
elif regex.match(branch):
_logger.info(f"{branch} is a test branch. Not deleting it")
else:
_logger.info(f"deleting branch {branch} in {org}/{repo}")
gh_call(gh_branch.delete)
| # Copyright (c) <NAME>/NV 2018
# Distributed under the MIT License (http://opensource.org/licenses/MIT).
import re
from .. import github
from ..config import switchable
from ..github import gh_call
from ..queue import getLogger, task
_logger = getLogger(__name__)
TEST_BRANCH_REGEX = '^[0-9][0-9]?\.[0-9]-test(ing)?$'
@task()
@switchable()
def delete_branch(org, repo, branch, dry_run=False):
with github.repository(org, repo) as gh_repo:
gh_branch = gh_call(gh_repo.ref, f"heads/{branch}")
regex = re.compile(TEST_BRANCH_REGEX)
if dry_run:
_logger.info(f"DRY-RUN delete branch {branch} in {org}/{repo}")
elif regex.match(branch):
_logger.info(f"{branch} is a test branch. Not deleting it")
else:
_logger.info(f"deleting branch {branch} in {org}/{repo}")
gh_call(gh_branch.delete)
| en | 0.710522 | # Copyright (c) <NAME>/NV 2018 # Distributed under the MIT License (http://opensource.org/licenses/MIT). | 2.297283 | 2 |
backend/reinforcepy/agent/q_learning.py | DerekDick/reinforce-py | 1 | 6623840 | # Copyright 2020 <NAME> <<EMAIL>>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Agents using the Q-learning algorithm.
"""
import numpy as np
from .agent import Agent
from reinforcepy.util.random_utils import sample_from_distribution
class QLearningAgent(Agent):
def __init__(self, name="QLearningAgent", alpha=0.1, epsilon=0.1, **kwds):
super().__init__(name=name, **kwds)
self.alpha = alpha
self.epsilon = epsilon
self.reset()
def reset(self):
super().reset()
self.current_state = self.env.starting_index
self.q_2darray = np.zeros((len(self.env.state_space), len(self.env.action_space)), dtype=float) # The q(s, a) can be retrieved by calling self.q_2darray[state_index][action_index]
self.policy_2darray = np.zeros((len(self.env.state_space), len(self.env.action_space)), dtype=float) # The policy \pi(a|s) can be retrieved by calling self.policy_2darray[state_index][action_index]
def new_episode(self):
self.current_step = 0
self.current_state = self.env.starting_index
# Align the environment state
self.env.current_state = self.current_state
def take_action(self):
# Increment the current step
self.current_step += 1
# Get the index of the current state in the state space
current_state_index = self.env.state_space.index(self.current_state)
# Get the actions given state, i.e., A(s)
actions = self.env.actions_given_state(self.current_state)
# Update the policy for the current state from q(current_state, .)
self.__update_policy(current_state_index, actions)
# Sample the action from the latest policy
sampled_action = sample_from_distribution({ action: self.policy_2darray[current_state_index][self.env.action_space.index(action)] for action in actions})
sampled_action_index = self.env.action_space.index(sampled_action)
# Take the action by interacting with the environment and observe the reward and the next state
observation, reward, done, info = self.env.step(sampled_action)
state_to = observation
state_to_index = self.env.state_space.index(state_to)
# Update the q value
old_q = self.q_2darray[current_state_index][sampled_action_index]
new_q = old_q + self.alpha * (reward + self.discount * max([ self.q_2darray[state_to_index][self.env.action_space.index(a)] for a in self.env.actions_given_state(state_to) ]) - old_q)
self.q_2darray[current_state_index][sampled_action_index] = new_q
# # Calculate the two new state values
# newStateValue = sum([grid_data_list[current_state]['policy'][action] * grid_data_list[current_state]['q'][action] for action in self.env.actions_given_state(current_state)])
# Move on to the next state
self.current_state = state_to
return done
def __update_policy(self, current_state_index, actions):
# Get the optimal q value
q_list = []
for action in actions:
action_index = self.env.action_space.index(action)
q_list.append(self.q_2darray[current_state_index][action_index])
optimal_q = max(q_list)
# Count the number of actions with the optimal q value
count = 0
for action in actions:
action_index = self.env.action_space.index(action)
if self.q_2darray[current_state_index][action_index] == optimal_q:
count += 1
# Update the policy distribution
for action in actions:
action_index = self.env.action_space.index(action)
if self.q_2darray[current_state_index][action_index] == optimal_q:
self.policy_2darray[current_state_index][action] = self.epsilon / len(actions) + (1 - self.epsilon) / count
else:
self.policy_2darray[current_state_index][action_index] = self.epsilon / len(actions)
# print(grid_data_list[current_state]['policy'][action])
| # Copyright 2020 <NAME> <<EMAIL>>
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Agents using the Q-learning algorithm.
"""
import numpy as np
from .agent import Agent
from reinforcepy.util.random_utils import sample_from_distribution
class QLearningAgent(Agent):
def __init__(self, name="QLearningAgent", alpha=0.1, epsilon=0.1, **kwds):
super().__init__(name=name, **kwds)
self.alpha = alpha
self.epsilon = epsilon
self.reset()
def reset(self):
super().reset()
self.current_state = self.env.starting_index
self.q_2darray = np.zeros((len(self.env.state_space), len(self.env.action_space)), dtype=float) # The q(s, a) can be retrieved by calling self.q_2darray[state_index][action_index]
self.policy_2darray = np.zeros((len(self.env.state_space), len(self.env.action_space)), dtype=float) # The policy \pi(a|s) can be retrieved by calling self.policy_2darray[state_index][action_index]
def new_episode(self):
self.current_step = 0
self.current_state = self.env.starting_index
# Align the environment state
self.env.current_state = self.current_state
def take_action(self):
# Increment the current step
self.current_step += 1
# Get the index of the current state in the state space
current_state_index = self.env.state_space.index(self.current_state)
# Get the actions given state, i.e., A(s)
actions = self.env.actions_given_state(self.current_state)
# Update the policy for the current state from q(current_state, .)
self.__update_policy(current_state_index, actions)
# Sample the action from the latest policy
sampled_action = sample_from_distribution({ action: self.policy_2darray[current_state_index][self.env.action_space.index(action)] for action in actions})
sampled_action_index = self.env.action_space.index(sampled_action)
# Take the action by interacting with the environment and observe the reward and the next state
observation, reward, done, info = self.env.step(sampled_action)
state_to = observation
state_to_index = self.env.state_space.index(state_to)
# Update the q value
old_q = self.q_2darray[current_state_index][sampled_action_index]
new_q = old_q + self.alpha * (reward + self.discount * max([ self.q_2darray[state_to_index][self.env.action_space.index(a)] for a in self.env.actions_given_state(state_to) ]) - old_q)
self.q_2darray[current_state_index][sampled_action_index] = new_q
# # Calculate the two new state values
# newStateValue = sum([grid_data_list[current_state]['policy'][action] * grid_data_list[current_state]['q'][action] for action in self.env.actions_given_state(current_state)])
# Move on to the next state
self.current_state = state_to
return done
def __update_policy(self, current_state_index, actions):
# Get the optimal q value
q_list = []
for action in actions:
action_index = self.env.action_space.index(action)
q_list.append(self.q_2darray[current_state_index][action_index])
optimal_q = max(q_list)
# Count the number of actions with the optimal q value
count = 0
for action in actions:
action_index = self.env.action_space.index(action)
if self.q_2darray[current_state_index][action_index] == optimal_q:
count += 1
# Update the policy distribution
for action in actions:
action_index = self.env.action_space.index(action)
if self.q_2darray[current_state_index][action_index] == optimal_q:
self.policy_2darray[current_state_index][action] = self.epsilon / len(actions) + (1 - self.epsilon) / count
else:
self.policy_2darray[current_state_index][action_index] = self.epsilon / len(actions)
# print(grid_data_list[current_state]['policy'][action])
| en | 0.781042 | # Copyright 2020 <NAME> <<EMAIL>> # All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Agents using the Q-learning algorithm. # The q(s, a) can be retrieved by calling self.q_2darray[state_index][action_index] # The policy \pi(a|s) can be retrieved by calling self.policy_2darray[state_index][action_index] # Align the environment state # Increment the current step # Get the index of the current state in the state space # Get the actions given state, i.e., A(s) # Update the policy for the current state from q(current_state, .) # Sample the action from the latest policy # Take the action by interacting with the environment and observe the reward and the next state # Update the q value # # Calculate the two new state values # newStateValue = sum([grid_data_list[current_state]['policy'][action] * grid_data_list[current_state]['q'][action] for action in self.env.actions_given_state(current_state)]) # Move on to the next state # Get the optimal q value # Count the number of actions with the optimal q value # Update the policy distribution # print(grid_data_list[current_state]['policy'][action]) | 2.444518 | 2 |
src/package/constants.py | Y-oHr-N/m5-forecasting | 2 | 6623841 | import pathlib
from .utils import *
module_path = pathlib.Path(__file__)
package_dir_path = module_path.parent
src_dir_path = package_dir_path.parent
root_dir_path = src_dir_path.parent
data_dir_path = root_dir_path / "data"
raw_dir_path = data_dir_path / "raw"
calendar_path = raw_dir_path / "calendar.csv"
sales_train_validation_path = raw_dir_path / "sales_train_validation.csv"
sales_train_evaluation_path = raw_dir_path / "sales_train_evaluation.csv"
sample_submission_path = raw_dir_path / "sample_submission.csv"
sell_prices_path = raw_dir_path / "sell_prices.csv"
interim_dir_path = data_dir_path / "interim"
interim_path = interim_dir_path / "interim.parquet"
processed_dir_path = data_dir_path / "processed"
processed_path = processed_dir_path / "processed.parquet"
models_dir_path = root_dir_path / "models"
lgbm_reg_path = models_dir_path / "lgbm_reg.joblib"
prediction_path = models_dir_path / "prediction.parquet"
submission_accuracy_path = models_dir_path / "submission_accuracy.csv.gz"
submission_uncertainty_path = models_dir_path / "submission_uncertainty.csv.gz"
notebooks_dir_path = root_dir_path / "notebooks"
inputs_dir_path = notebooks_dir_path / "inputs"
outputs_dir_path = notebooks_dir_path / "outputs"
train_days = 1913
evaluation_days = 28
train_start_date = "2011-01-29"
train_end_date = "2016-04-24"
validation_start_date = "2016-04-25"
validation_end_date = "2016-05-22"
evaluation_start_date = "2016-05-23"
evaluation_end_date = "2016-06-19"
events = [
# {
# "event_name": "ChineseNewYear",
# "event_type": "Religious",
# "dates": [
# "2011-02-03",
# "2012-01-23",
# "2013-02-10",
# "2014-01-31",
# "2015-02-19",
# "2016-02-08",
# ],
# },
# {
# "event_name": "NBAFinals",
# "event_type": "Sporting",
# "dates": [
# "2011-05-31",
# "2011-06-02",
# "2011-06-05",
# "2011-06-07",
# "2011-06-09",
# "2011-06-12",
# "2012-06-12",
# "2012-06-14",
# "2012-06-17",
# "2012-06-19",
# "2012-06-21",
# "2013-06-06",
# "2013-06-09",
# "2013-06-11",
# "2013-06-13",
# "2013-06-16",
# "2013-06-18",
# "2013-06-20",
# "2014-06-05",
# "2014-06-08",
# "2014-06-10",
# "2014-06-12",
# "2014-06-15",
# "2015-06-04",
# "2015-06-07",
# "2015-06-09",
# "2015-06-11",
# "2015-06-14",
# "2015-06-16",
# "2016-06-02",
# "2016-06-05",
# "2016-06-08",
# "2016-06-10",
# "2016-06-13",
# "2016-06-16",
# "2016-06-19",
# ],
# },
# {
# "event_name": "OrthodoxPentecost",
# "event_type": "Religious",
# "dates": [
# "2011-06-12",
# "2012-06-03",
# "2013-06-23",
# "2014-06-08",
# "2015-05-31",
# "2016-06-19",
# ],
# },
# {
# "event_name": "Pentecost",
# "event_type": "Cultural",
# "dates": [
# "2011-06-12",
# "2012-05-27",
# "2013-05-19",
# "2014-06-08",
# "2015-05-24",
# "2016-05-15",
# ],
# },
# {
# "event_name": "PesachStart",
# "event_type": "Religious",
# "dates": [
# "2011-04-18",
# "2012-04-06",
# "2013-03-25",
# "2014-04-14",
# "2015-04-03",
# "2016-04-22",
# ],
# },
# {
# "event_name": "RamadanEnd",
# "event_type": "Religious",
# "dates": [
# "2011-08-29",
# "2012-08-18",
# "2013-08-07",
# "2014-07-27",
# "2015-07-16",
# "2016-07-05",
# ],
# },
]
dtype = {
"wm_yr_wk": "int16",
"year": "int16",
"month": "int8",
"wday": "int8",
"event_name_1": "category",
"event_name_2": "category",
"event_type_1": "category",
"event_type_2": "category",
"snap_CA": "bool",
"snap_TX": "bool",
"snap_WI": "bool",
"state_id": "category",
"store_id": "category",
"cat_id": "category",
"dept_id": "category",
"item_id": "category",
"sell_price": "float16",
}
for i in range(1, train_days + 1):
dtype[f"d_{i}"] = "int16"
parse_dates = ["date"]
level_ids = [
["all_id"],
["state_id"],
["store_id"],
["cat_id"],
["dept_id"],
["state_id", "cat_id"],
["state_id", "dept_id"],
["store_id", "cat_id"],
["store_id", "dept_id"],
["item_id"],
["item_id", "state_id"],
["item_id", "store_id"],
]
level_targets = [f"level_{i + 1}_sales" for i in range(12)]
target = level_targets[-1]
transformed_target = "revenue"
attrs = [
"year",
"dayofyear",
"weekofyear",
"month",
"quarter",
"day",
"weekofmonth",
"weekday",
]
agg_funcs = {
# "min": "min",
# "max": "max",
"mean": "mean",
"std": "std",
# "nunique": "nunique",
}
agg_funcs_for_ewm = {
"mean": "mean",
"std": "std",
}
agg_funcs_for_expanding = {
"min": "min",
"max": "max",
"mean": "mean",
"std": "std",
}
agg_funcs_for_rolling = {
# "min": "min",
# "max": "max",
"mean": "mean",
"std": "std",
}
periods_batch = [28]
periods_online = [7]
periods = periods_online + periods_batch
windows = [7, 14, 28, 56]
prediction_step = min(periods_online)
max_lags = max(periods_online) + max(windows) - 1
aggregate_feature_name_format = "groupby_{}_{}_{}".format
calendar_feature_name_format = "{}_{}".format
count_up_until_nonzero_feature_format = "{}_count_up_until_nonzero".format
diff_feature_name_format = "{}_diff_{}".format
expanding_feature_name_format = "groupby_{}_{}_expanding_{}".format
ewm_feature_name_format = "groupby_{}_{}_ewm_{}_{}".format
pct_change_feature_name_format = "{}_pct_change_{}".format
scaled_feature_name_format = "groupby_{}_scaled_{}".format
shift_feature_name_format = "{}_shift_{}".format
rolling_feature_name_format = "groupby_{}_{}_rolling_{}_{}".format
binary_features = [
"snap",
"is_working_day",
]
categorical_features = [
"state_id",
"store_id",
"cat_id",
"dept_id",
"item_id",
"event_name_1",
"event_name_2",
"event_type_1",
"event_type_2",
]
raw_numerical_features = ["sell_price"]
aggregate_features = [
aggregate_feature_name_format(to_str(by_col), raw_numerical_feature, agg_func_name)
for by_col in level_ids[1:11]
for raw_numerical_feature in raw_numerical_features
for agg_func_name in agg_funcs
]
calendar_features = [f"{col}_{attr}" for col in parse_dates for attr in attrs]
expanding_features = [
expanding_feature_name_format(to_str(by_col), raw_numerical_feature, agg_func_name)
for by_col in level_ids[11:]
for raw_numerical_feature in raw_numerical_features
for agg_func_name in agg_funcs_for_expanding
]
pct_change_features = [
pct_change_feature_name_format(raw_numerical_feature, i)
for raw_numerical_feature in raw_numerical_features
for i in periods
]
scaled_features = [
scaled_feature_name_format(to_str(by_col), raw_numerical_feature)
for by_col in level_ids[11:]
for raw_numerical_feature in raw_numerical_features
]
shift_features_batch = [
shift_feature_name_format(level_target, i)
for level_target in level_targets[9:]
for i in periods_batch
]
shift_features_online = [
shift_feature_name_format(level_target, i)
for level_target in level_targets[9:]
for i in periods_online
]
shift_features = shift_features_online + shift_features_batch
count_up_until_nonzero_features = [
count_up_until_nonzero_feature_format(shift_feature)
for shift_feature in shift_features_batch
]
rolling_features = [
rolling_feature_name_format(to_str(by_col), shift_feature, j, agg_func_name)
for by_col in level_ids[11:]
for shift_feature in shift_features
for j in windows
for agg_func_name in agg_funcs_for_rolling
]
numerical_features = (
["days_since_release", "moon_phase", "sell_price_ending"]
+ raw_numerical_features
+ aggregate_features
+ calendar_features
+ count_up_until_nonzero_features
+ expanding_features
+ pct_change_features
+ rolling_features
+ scaled_features
+ shift_features
)
features = binary_features + categorical_features + numerical_features
random_state = 1
lgb_params = {
"bagging_fraction": 0.8,
"bagging_freq": 1,
"feature_fraction": 0.8,
"force_row_wise": True,
"lambda_l2": 0.001,
"learning_rate": 0.03,
"metric": "None",
"min_data_in_leaf": 1_500,
"n_jobs": -1,
"num_leaves": 512,
"objective": "tweedie",
"seed": random_state,
"tweedie_variance_power": 1.2,
}
| import pathlib
from .utils import *
module_path = pathlib.Path(__file__)
package_dir_path = module_path.parent
src_dir_path = package_dir_path.parent
root_dir_path = src_dir_path.parent
data_dir_path = root_dir_path / "data"
raw_dir_path = data_dir_path / "raw"
calendar_path = raw_dir_path / "calendar.csv"
sales_train_validation_path = raw_dir_path / "sales_train_validation.csv"
sales_train_evaluation_path = raw_dir_path / "sales_train_evaluation.csv"
sample_submission_path = raw_dir_path / "sample_submission.csv"
sell_prices_path = raw_dir_path / "sell_prices.csv"
interim_dir_path = data_dir_path / "interim"
interim_path = interim_dir_path / "interim.parquet"
processed_dir_path = data_dir_path / "processed"
processed_path = processed_dir_path / "processed.parquet"
models_dir_path = root_dir_path / "models"
lgbm_reg_path = models_dir_path / "lgbm_reg.joblib"
prediction_path = models_dir_path / "prediction.parquet"
submission_accuracy_path = models_dir_path / "submission_accuracy.csv.gz"
submission_uncertainty_path = models_dir_path / "submission_uncertainty.csv.gz"
notebooks_dir_path = root_dir_path / "notebooks"
inputs_dir_path = notebooks_dir_path / "inputs"
outputs_dir_path = notebooks_dir_path / "outputs"
train_days = 1913
evaluation_days = 28
train_start_date = "2011-01-29"
train_end_date = "2016-04-24"
validation_start_date = "2016-04-25"
validation_end_date = "2016-05-22"
evaluation_start_date = "2016-05-23"
evaluation_end_date = "2016-06-19"
events = [
# {
# "event_name": "ChineseNewYear",
# "event_type": "Religious",
# "dates": [
# "2011-02-03",
# "2012-01-23",
# "2013-02-10",
# "2014-01-31",
# "2015-02-19",
# "2016-02-08",
# ],
# },
# {
# "event_name": "NBAFinals",
# "event_type": "Sporting",
# "dates": [
# "2011-05-31",
# "2011-06-02",
# "2011-06-05",
# "2011-06-07",
# "2011-06-09",
# "2011-06-12",
# "2012-06-12",
# "2012-06-14",
# "2012-06-17",
# "2012-06-19",
# "2012-06-21",
# "2013-06-06",
# "2013-06-09",
# "2013-06-11",
# "2013-06-13",
# "2013-06-16",
# "2013-06-18",
# "2013-06-20",
# "2014-06-05",
# "2014-06-08",
# "2014-06-10",
# "2014-06-12",
# "2014-06-15",
# "2015-06-04",
# "2015-06-07",
# "2015-06-09",
# "2015-06-11",
# "2015-06-14",
# "2015-06-16",
# "2016-06-02",
# "2016-06-05",
# "2016-06-08",
# "2016-06-10",
# "2016-06-13",
# "2016-06-16",
# "2016-06-19",
# ],
# },
# {
# "event_name": "OrthodoxPentecost",
# "event_type": "Religious",
# "dates": [
# "2011-06-12",
# "2012-06-03",
# "2013-06-23",
# "2014-06-08",
# "2015-05-31",
# "2016-06-19",
# ],
# },
# {
# "event_name": "Pentecost",
# "event_type": "Cultural",
# "dates": [
# "2011-06-12",
# "2012-05-27",
# "2013-05-19",
# "2014-06-08",
# "2015-05-24",
# "2016-05-15",
# ],
# },
# {
# "event_name": "PesachStart",
# "event_type": "Religious",
# "dates": [
# "2011-04-18",
# "2012-04-06",
# "2013-03-25",
# "2014-04-14",
# "2015-04-03",
# "2016-04-22",
# ],
# },
# {
# "event_name": "RamadanEnd",
# "event_type": "Religious",
# "dates": [
# "2011-08-29",
# "2012-08-18",
# "2013-08-07",
# "2014-07-27",
# "2015-07-16",
# "2016-07-05",
# ],
# },
]
dtype = {
"wm_yr_wk": "int16",
"year": "int16",
"month": "int8",
"wday": "int8",
"event_name_1": "category",
"event_name_2": "category",
"event_type_1": "category",
"event_type_2": "category",
"snap_CA": "bool",
"snap_TX": "bool",
"snap_WI": "bool",
"state_id": "category",
"store_id": "category",
"cat_id": "category",
"dept_id": "category",
"item_id": "category",
"sell_price": "float16",
}
for i in range(1, train_days + 1):
dtype[f"d_{i}"] = "int16"
parse_dates = ["date"]
level_ids = [
["all_id"],
["state_id"],
["store_id"],
["cat_id"],
["dept_id"],
["state_id", "cat_id"],
["state_id", "dept_id"],
["store_id", "cat_id"],
["store_id", "dept_id"],
["item_id"],
["item_id", "state_id"],
["item_id", "store_id"],
]
level_targets = [f"level_{i + 1}_sales" for i in range(12)]
target = level_targets[-1]
transformed_target = "revenue"
attrs = [
"year",
"dayofyear",
"weekofyear",
"month",
"quarter",
"day",
"weekofmonth",
"weekday",
]
agg_funcs = {
# "min": "min",
# "max": "max",
"mean": "mean",
"std": "std",
# "nunique": "nunique",
}
agg_funcs_for_ewm = {
"mean": "mean",
"std": "std",
}
agg_funcs_for_expanding = {
"min": "min",
"max": "max",
"mean": "mean",
"std": "std",
}
agg_funcs_for_rolling = {
# "min": "min",
# "max": "max",
"mean": "mean",
"std": "std",
}
periods_batch = [28]
periods_online = [7]
periods = periods_online + periods_batch
windows = [7, 14, 28, 56]
prediction_step = min(periods_online)
max_lags = max(periods_online) + max(windows) - 1
aggregate_feature_name_format = "groupby_{}_{}_{}".format
calendar_feature_name_format = "{}_{}".format
count_up_until_nonzero_feature_format = "{}_count_up_until_nonzero".format
diff_feature_name_format = "{}_diff_{}".format
expanding_feature_name_format = "groupby_{}_{}_expanding_{}".format
ewm_feature_name_format = "groupby_{}_{}_ewm_{}_{}".format
pct_change_feature_name_format = "{}_pct_change_{}".format
scaled_feature_name_format = "groupby_{}_scaled_{}".format
shift_feature_name_format = "{}_shift_{}".format
rolling_feature_name_format = "groupby_{}_{}_rolling_{}_{}".format
binary_features = [
"snap",
"is_working_day",
]
categorical_features = [
"state_id",
"store_id",
"cat_id",
"dept_id",
"item_id",
"event_name_1",
"event_name_2",
"event_type_1",
"event_type_2",
]
raw_numerical_features = ["sell_price"]
aggregate_features = [
aggregate_feature_name_format(to_str(by_col), raw_numerical_feature, agg_func_name)
for by_col in level_ids[1:11]
for raw_numerical_feature in raw_numerical_features
for agg_func_name in agg_funcs
]
calendar_features = [f"{col}_{attr}" for col in parse_dates for attr in attrs]
expanding_features = [
expanding_feature_name_format(to_str(by_col), raw_numerical_feature, agg_func_name)
for by_col in level_ids[11:]
for raw_numerical_feature in raw_numerical_features
for agg_func_name in agg_funcs_for_expanding
]
pct_change_features = [
pct_change_feature_name_format(raw_numerical_feature, i)
for raw_numerical_feature in raw_numerical_features
for i in periods
]
scaled_features = [
scaled_feature_name_format(to_str(by_col), raw_numerical_feature)
for by_col in level_ids[11:]
for raw_numerical_feature in raw_numerical_features
]
shift_features_batch = [
shift_feature_name_format(level_target, i)
for level_target in level_targets[9:]
for i in periods_batch
]
shift_features_online = [
shift_feature_name_format(level_target, i)
for level_target in level_targets[9:]
for i in periods_online
]
shift_features = shift_features_online + shift_features_batch
count_up_until_nonzero_features = [
count_up_until_nonzero_feature_format(shift_feature)
for shift_feature in shift_features_batch
]
rolling_features = [
rolling_feature_name_format(to_str(by_col), shift_feature, j, agg_func_name)
for by_col in level_ids[11:]
for shift_feature in shift_features
for j in windows
for agg_func_name in agg_funcs_for_rolling
]
numerical_features = (
["days_since_release", "moon_phase", "sell_price_ending"]
+ raw_numerical_features
+ aggregate_features
+ calendar_features
+ count_up_until_nonzero_features
+ expanding_features
+ pct_change_features
+ rolling_features
+ scaled_features
+ shift_features
)
features = binary_features + categorical_features + numerical_features
random_state = 1
lgb_params = {
"bagging_fraction": 0.8,
"bagging_freq": 1,
"feature_fraction": 0.8,
"force_row_wise": True,
"lambda_l2": 0.001,
"learning_rate": 0.03,
"metric": "None",
"min_data_in_leaf": 1_500,
"n_jobs": -1,
"num_leaves": 512,
"objective": "tweedie",
"seed": random_state,
"tweedie_variance_power": 1.2,
}
| ko | 0.269876 | # { # "event_name": "ChineseNewYear", # "event_type": "Religious", # "dates": [ # "2011-02-03", # "2012-01-23", # "2013-02-10", # "2014-01-31", # "2015-02-19", # "2016-02-08", # ], # }, # { # "event_name": "NBAFinals", # "event_type": "Sporting", # "dates": [ # "2011-05-31", # "2011-06-02", # "2011-06-05", # "2011-06-07", # "2011-06-09", # "2011-06-12", # "2012-06-12", # "2012-06-14", # "2012-06-17", # "2012-06-19", # "2012-06-21", # "2013-06-06", # "2013-06-09", # "2013-06-11", # "2013-06-13", # "2013-06-16", # "2013-06-18", # "2013-06-20", # "2014-06-05", # "2014-06-08", # "2014-06-10", # "2014-06-12", # "2014-06-15", # "2015-06-04", # "2015-06-07", # "2015-06-09", # "2015-06-11", # "2015-06-14", # "2015-06-16", # "2016-06-02", # "2016-06-05", # "2016-06-08", # "2016-06-10", # "2016-06-13", # "2016-06-16", # "2016-06-19", # ], # }, # { # "event_name": "OrthodoxPentecost", # "event_type": "Religious", # "dates": [ # "2011-06-12", # "2012-06-03", # "2013-06-23", # "2014-06-08", # "2015-05-31", # "2016-06-19", # ], # }, # { # "event_name": "Pentecost", # "event_type": "Cultural", # "dates": [ # "2011-06-12", # "2012-05-27", # "2013-05-19", # "2014-06-08", # "2015-05-24", # "2016-05-15", # ], # }, # { # "event_name": "PesachStart", # "event_type": "Religious", # "dates": [ # "2011-04-18", # "2012-04-06", # "2013-03-25", # "2014-04-14", # "2015-04-03", # "2016-04-22", # ], # }, # { # "event_name": "RamadanEnd", # "event_type": "Religious", # "dates": [ # "2011-08-29", # "2012-08-18", # "2013-08-07", # "2014-07-27", # "2015-07-16", # "2016-07-05", # ], # }, # "min": "min", # "max": "max", # "nunique": "nunique", # "min": "min", # "max": "max", | 1.85367 | 2 |
jinja2_loader.py | jecki/SchnelleSeite | 1 | 6623842 | """jinja2_loader.py -- loader for jinja2 templates
Copyright 2015 by <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import jinja2
import markdown
import sitetree
##############################################################################
#
# jinja2 environment filters
#
##############################################################################
def jinja2_current_date():
"""Returns the current date as YYYY-MM-DD."""
return time.strftime('%Y-%m-%d')
@jinja2.pass_environment
def jinja2_translate(env, expression):
"""Translates expression within the given jinja2 environment.
This requires that the variables 'local', 'language' and 'root' are
defined in the jinja2 environment.
"""
return sitetree.translate(expression, env.globals)
@jinja2.pass_environment
def jinja2_targetpage(env, target):
"""Returns the page basename (without ".html") of a link target.
E.g. "authors.html#Shakespeare" yields "authors"
"""
return (target.split("#")[0]).split(".")[0]
@jinja2.pass_environment
def jinja2_linktarget(env, target):
"""Makes sure that target is a proper link target."""
parts = target.split("#")
if parts[0] and not parts[0].endswith(".html"):
parts[0] += ".html"
return "#".join(parts)
@jinja2.pass_environment
def jinja2_getcontent(env, datasource):
"""Returns the content of a data source."""
return sitetree.getentry(env.globals['local'], datasource,
env.globals['language'])['content']
@jinja2.pass_environment
def jinja2_getmetadata(env, datasource, key):
"""Returns a particular item from the metadata of an entry."""
return sitetree.getentry(env.globals['local'], datasource,
env.globals['language'])['metadata'][key]
@jinja2.pass_environment
def jinja2_getitem(env, datasource, key):
"""Returns a paritcular item from a data source that is a dictionary."""
return sitetree.getitem(key, env.globals['local'], datasource,
env.globals['language'])
@jinja2.pass_environment
def jinja2_fragments(env, directory, orderby=None):
"""Returns a list of pathnames pathnames (starting from directory) of all
fragments in a directory.
Parameters:
directory(string): The directory from which the fragments shall be taken.
orderby(string): A metadata parameter which determines the order of
the fragments. Instead of supplying a function for this
parameter it may also be set in the metadata of the template
or in the "__config" file of the fragments directory. The orderby
parameter in the template metadata (if present) overrides the same
parameter in the fragment's directories' "__config" file. The
orderby argument passed to this function overrides all both.
"""
folder = env.globals['local'][directory]
order = orderby or env.globals.get('orderby') or \
env.globals['local'][directory].get('orderby')
return sitetree.collect_fragments(folder, directory, order)
@jinja2.pass_environment
def jinja2_multicast_pagename(env, subpage):
"""Returns the basename of the output page on which a particular subpage
appears.
"""
return env.globals['MC_PAGENAMES'][subpage]
def other_lang_URL(folder, basename, lang):
"""Returns a relative link from the file 'basename' in 'folder' to the
the same file in the language version 'lang'.
"""
path = []
while folder.parent:
path.append(folder.metadata['foldername'])
folder = folder.parent
path.append(lang)
path.extend(['..'] * len(path))
path.reverse()
path.append(basename + ".html")
return "/".join(path)
@jinja2.pass_environment
def jinja2_other_lang_URL(env, lang):
"""Returns the URL to a different language version of the current page.
"""
return other_lang_URL(env.globals['local'], env.globals['basename'], lang)
@jinja2.pass_environment
def jinja2_markdownify(env, text):
"""Runs 'text' through a markdown processor and returns the resultant
html.
"""
return markdown.markdown(text)
@jinja2.pass_environment
def jinja2_filepath_basename(env, filepath):
"""Returns the base name, i.e. the filename w/o path and extension, of
'filepath'. Note the semantics of this filter differ from
python's os.path.basename!.
"""
return os.path.splitext(os.path.basename(filepath))[0]
@jinja2.pass_environment
def jinja2_filepath_ext(env, filename):
"""Returns the extension of filename.
"""
return os.path.splitext(filename)[1]
@jinja2.pass_environment
def jinja2_split(env, s, ch):
"""Splits string 's' with character 'ch' as delimiter into a list of parts.
"""
return s.split(ch)
@jinja2.pass_environment
def jinja2_lower(env, s):
"""Converts string `s` to lowercase letters.
"""
return s.lower()
@jinja2.pass_environment
def jinja2_upper(env, s):
"""Converts string `s` to lowercase letters.
"""
return s.upper()
##############################################################################
#
# jinja2 loader
#
##############################################################################
class CustomJinja2Loader(jinja2.FileSystemLoader):
"""A custom jinja2 loader that returns the page templates and reads
further templates from the disk if requested.
Attributes:
data(string): The page template
"""
def __init__(self, data, template_paths):
paths = ["./"]
if template_paths:
paths.extend(template_paths)
jinja2.FileSystemLoader.__init__(self, paths)
self.data = data
def get_source(self, environment, template):
if template:
return jinja2.FileSystemLoader.get_source(self, environment,
template)
else:
return (self.data, "", lambda: True)
def jinja2_loader(text, metadata):
"""A loader for jinja2 templates.
"""
templ_paths = ""
if "config" in metadata and "template_paths" in metadata["config"]:
templ_paths = metadata["config"]["template_paths"]
env = jinja2.Environment(loader=CustomJinja2Loader(text, templ_paths))
env.globals.update(metadata)
# TODO: catch errors because of use of reserved keywords
env.globals['current_date'] = jinja2_current_date
env.filters['CONTENT'] = jinja2_getcontent
env.filters['DATA'] = jinja2_getitem
env.filters['MD'] = jinja2_getmetadata
env.filters['FRAGMENTS'] = jinja2_fragments
env.filters['MC_PAGENAME'] = jinja2_multicast_pagename
env.filters['PAGE_URL'] = jinja2_other_lang_URL
env.filters['TR'] = jinja2_translate
env.filters['LINK_TARGET'] = jinja2_linktarget
env.filters['TARGET_PAGE'] = jinja2_targetpage
env.filters['MARKDOWNIFY'] = jinja2_markdownify
env.filters['SPLIT'] = jinja2_split
env.filters['LOWER'] = jinja2_lower
env.filters['UPPER'] = jinja2_upper
env.filters['basename'] = jinja2_filepath_basename
env.filters['ext'] = jinja2_filepath_ext
templ = env.get_template("")
try:
result = templ.render() # tmpl.render(metadata)
except jinja2.exceptions.TemplateNotFound:
# TEST CODE to be removed...
print(os.getcwd())
print(os.path.abspath(os.getcwd()))
assert False
return result
| """jinja2_loader.py -- loader for jinja2 templates
Copyright 2015 by <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import jinja2
import markdown
import sitetree
##############################################################################
#
# jinja2 environment filters
#
##############################################################################
def jinja2_current_date():
"""Returns the current date as YYYY-MM-DD."""
return time.strftime('%Y-%m-%d')
@jinja2.pass_environment
def jinja2_translate(env, expression):
"""Translates expression within the given jinja2 environment.
This requires that the variables 'local', 'language' and 'root' are
defined in the jinja2 environment.
"""
return sitetree.translate(expression, env.globals)
@jinja2.pass_environment
def jinja2_targetpage(env, target):
"""Returns the page basename (without ".html") of a link target.
E.g. "authors.html#Shakespeare" yields "authors"
"""
return (target.split("#")[0]).split(".")[0]
@jinja2.pass_environment
def jinja2_linktarget(env, target):
"""Makes sure that target is a proper link target."""
parts = target.split("#")
if parts[0] and not parts[0].endswith(".html"):
parts[0] += ".html"
return "#".join(parts)
@jinja2.pass_environment
def jinja2_getcontent(env, datasource):
"""Returns the content of a data source."""
return sitetree.getentry(env.globals['local'], datasource,
env.globals['language'])['content']
@jinja2.pass_environment
def jinja2_getmetadata(env, datasource, key):
"""Returns a particular item from the metadata of an entry."""
return sitetree.getentry(env.globals['local'], datasource,
env.globals['language'])['metadata'][key]
@jinja2.pass_environment
def jinja2_getitem(env, datasource, key):
"""Returns a paritcular item from a data source that is a dictionary."""
return sitetree.getitem(key, env.globals['local'], datasource,
env.globals['language'])
@jinja2.pass_environment
def jinja2_fragments(env, directory, orderby=None):
"""Returns a list of pathnames pathnames (starting from directory) of all
fragments in a directory.
Parameters:
directory(string): The directory from which the fragments shall be taken.
orderby(string): A metadata parameter which determines the order of
the fragments. Instead of supplying a function for this
parameter it may also be set in the metadata of the template
or in the "__config" file of the fragments directory. The orderby
parameter in the template metadata (if present) overrides the same
parameter in the fragment's directories' "__config" file. The
orderby argument passed to this function overrides all both.
"""
folder = env.globals['local'][directory]
order = orderby or env.globals.get('orderby') or \
env.globals['local'][directory].get('orderby')
return sitetree.collect_fragments(folder, directory, order)
@jinja2.pass_environment
def jinja2_multicast_pagename(env, subpage):
"""Returns the basename of the output page on which a particular subpage
appears.
"""
return env.globals['MC_PAGENAMES'][subpage]
def other_lang_URL(folder, basename, lang):
"""Returns a relative link from the file 'basename' in 'folder' to the
the same file in the language version 'lang'.
"""
path = []
while folder.parent:
path.append(folder.metadata['foldername'])
folder = folder.parent
path.append(lang)
path.extend(['..'] * len(path))
path.reverse()
path.append(basename + ".html")
return "/".join(path)
@jinja2.pass_environment
def jinja2_other_lang_URL(env, lang):
"""Returns the URL to a different language version of the current page.
"""
return other_lang_URL(env.globals['local'], env.globals['basename'], lang)
@jinja2.pass_environment
def jinja2_markdownify(env, text):
"""Runs 'text' through a markdown processor and returns the resultant
html.
"""
return markdown.markdown(text)
@jinja2.pass_environment
def jinja2_filepath_basename(env, filepath):
"""Returns the base name, i.e. the filename w/o path and extension, of
'filepath'. Note the semantics of this filter differ from
python's os.path.basename!.
"""
return os.path.splitext(os.path.basename(filepath))[0]
@jinja2.pass_environment
def jinja2_filepath_ext(env, filename):
"""Returns the extension of filename.
"""
return os.path.splitext(filename)[1]
@jinja2.pass_environment
def jinja2_split(env, s, ch):
"""Splits string 's' with character 'ch' as delimiter into a list of parts.
"""
return s.split(ch)
@jinja2.pass_environment
def jinja2_lower(env, s):
"""Converts string `s` to lowercase letters.
"""
return s.lower()
@jinja2.pass_environment
def jinja2_upper(env, s):
"""Converts string `s` to lowercase letters.
"""
return s.upper()
##############################################################################
#
# jinja2 loader
#
##############################################################################
class CustomJinja2Loader(jinja2.FileSystemLoader):
"""A custom jinja2 loader that returns the page templates and reads
further templates from the disk if requested.
Attributes:
data(string): The page template
"""
def __init__(self, data, template_paths):
paths = ["./"]
if template_paths:
paths.extend(template_paths)
jinja2.FileSystemLoader.__init__(self, paths)
self.data = data
def get_source(self, environment, template):
if template:
return jinja2.FileSystemLoader.get_source(self, environment,
template)
else:
return (self.data, "", lambda: True)
def jinja2_loader(text, metadata):
"""A loader for jinja2 templates.
"""
templ_paths = ""
if "config" in metadata and "template_paths" in metadata["config"]:
templ_paths = metadata["config"]["template_paths"]
env = jinja2.Environment(loader=CustomJinja2Loader(text, templ_paths))
env.globals.update(metadata)
# TODO: catch errors because of use of reserved keywords
env.globals['current_date'] = jinja2_current_date
env.filters['CONTENT'] = jinja2_getcontent
env.filters['DATA'] = jinja2_getitem
env.filters['MD'] = jinja2_getmetadata
env.filters['FRAGMENTS'] = jinja2_fragments
env.filters['MC_PAGENAME'] = jinja2_multicast_pagename
env.filters['PAGE_URL'] = jinja2_other_lang_URL
env.filters['TR'] = jinja2_translate
env.filters['LINK_TARGET'] = jinja2_linktarget
env.filters['TARGET_PAGE'] = jinja2_targetpage
env.filters['MARKDOWNIFY'] = jinja2_markdownify
env.filters['SPLIT'] = jinja2_split
env.filters['LOWER'] = jinja2_lower
env.filters['UPPER'] = jinja2_upper
env.filters['basename'] = jinja2_filepath_basename
env.filters['ext'] = jinja2_filepath_ext
templ = env.get_template("")
try:
result = templ.render() # tmpl.render(metadata)
except jinja2.exceptions.TemplateNotFound:
# TEST CODE to be removed...
print(os.getcwd())
print(os.path.abspath(os.getcwd()))
assert False
return result
| en | 0.610142 | jinja2_loader.py -- loader for jinja2 templates Copyright 2015 by <NAME> Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. ############################################################################## # # jinja2 environment filters # ############################################################################## Returns the current date as YYYY-MM-DD. Translates expression within the given jinja2 environment. This requires that the variables 'local', 'language' and 'root' are defined in the jinja2 environment. Returns the page basename (without ".html") of a link target. E.g. "authors.html#Shakespeare" yields "authors" Makes sure that target is a proper link target. Returns the content of a data source. Returns a particular item from the metadata of an entry. Returns a paritcular item from a data source that is a dictionary. Returns a list of pathnames pathnames (starting from directory) of all fragments in a directory. Parameters: directory(string): The directory from which the fragments shall be taken. orderby(string): A metadata parameter which determines the order of the fragments. Instead of supplying a function for this parameter it may also be set in the metadata of the template or in the "__config" file of the fragments directory. The orderby parameter in the template metadata (if present) overrides the same parameter in the fragment's directories' "__config" file. The orderby argument passed to this function overrides all both. Returns the basename of the output page on which a particular subpage appears. Returns a relative link from the file 'basename' in 'folder' to the the same file in the language version 'lang'. Returns the URL to a different language version of the current page. Runs 'text' through a markdown processor and returns the resultant html. Returns the base name, i.e. the filename w/o path and extension, of 'filepath'. Note the semantics of this filter differ from python's os.path.basename!. Returns the extension of filename. Splits string 's' with character 'ch' as delimiter into a list of parts. Converts string `s` to lowercase letters. Converts string `s` to lowercase letters. ############################################################################## # # jinja2 loader # ############################################################################## A custom jinja2 loader that returns the page templates and reads further templates from the disk if requested. Attributes: data(string): The page template A loader for jinja2 templates. # TODO: catch errors because of use of reserved keywords # tmpl.render(metadata) # TEST CODE to be removed... | 2.498237 | 2 |
petitions/profanity.py | sosumi/IowaIdeas | 15 | 6623843 | <filename>petitions/profanity.py<gh_stars>10-100
"""
Provides functionality to see if a petition contains profanities.
Author: <NAME>
"""
import csv
import os
import re
def load_words(filename):
"""
Loads words from csv to list
"""
words = []
dirname = os.path.dirname(__file__)
csvfile = open(os.path.join(dirname, filename), 'r')
for line in csvfile:
words.append(line.strip())
csvfile.close()
return words
def has_profanity(petition_body):
profanities = load_words('profanity.csv')
petition_body = re.sub(r"<[^<]+?>", "", petition_body)
body = petition_body.split(' ')
index = 0
for word in body:
word = re.sub(r"[^a-zA-Z]+", "", word)
word = word.lower()
if word in profanities:
return True
index += 1
return False
| <filename>petitions/profanity.py<gh_stars>10-100
"""
Provides functionality to see if a petition contains profanities.
Author: <NAME>
"""
import csv
import os
import re
def load_words(filename):
"""
Loads words from csv to list
"""
words = []
dirname = os.path.dirname(__file__)
csvfile = open(os.path.join(dirname, filename), 'r')
for line in csvfile:
words.append(line.strip())
csvfile.close()
return words
def has_profanity(petition_body):
profanities = load_words('profanity.csv')
petition_body = re.sub(r"<[^<]+?>", "", petition_body)
body = petition_body.split(' ')
index = 0
for word in body:
word = re.sub(r"[^a-zA-Z]+", "", word)
word = word.lower()
if word in profanities:
return True
index += 1
return False
| en | 0.845572 | Provides functionality to see if a petition contains profanities. Author: <NAME> Loads words from csv to list | 3.540835 | 4 |
workspace/module/python-2.7/LxData/datCfg.py | no7hings/Lynxi | 2 | 6623844 | <reponame>no7hings/Lynxi
# coding:utf-8
import os
import copy
class DatUtility(object):
MOD_os = os
MOD_copy = copy
DEF_dat__datatype_pathsep = u'/'
DEF_dat__node_namespace_pathsep = u':'
DEF_dat__node_type_pathsep = u'/'
DEF_dat__node_pathsep = u'/'
DEF_dat__node_port_pathsep = u'.'
DEF_dat__node_variant_pathsep = u'@'
DEF_dat__file_extsep = u'.'
DEF_dat__file_pathsep = u'/'
DEF_dat__raw_strsep = u','
DEF_dat__compraw_strsep = u', '
DEF_dat__datatype__boolean = u'boolean'
DEF_dat__datatype__booleanarray = u'booleanarray'
DEF_dat__datatype__Integer = u'integer'
DEF_dat__datatype__integerarray = u'integerarray'
DEF_dat__datatype__float = u'float'
DEF_dat__datatype__floatarray = u'floatarray'
DEF_dat__datatype__float2 = u'float2'
DEF_dat__datatype__float2array = u'float2array'
DEF_dat__datatype__float3 = u'float3'
DEF_dat__datatype__float3array = u'float3array'
DEF_dat__datatype__float4 = u'float4'
DEF_dat__datatype__float4array = u'float4array'
DEF_dat__datatype__color2 = u'color2'
DEF_dat__datatype__color2array = u'color2array'
DEF_dat__datatype__color3 = u'color3'
DEF_dat__datatype__color3array = u'color3array'
DEF_dat__datatype__color4 = u'color4'
DEF_dat__datatype__color4array = u'color4array'
DEF_dat__datatype__vector2 = u'vector2'
DEF_dat__datatype__vector2array = u'vector2array'
DEF_dat__datatype__vector3 = u'vector3'
DEF_dat__datatype__vector3array = u'vector3array'
DEF_dat__datatype__vector4 = u'vector4'
DEF_dat__datatype__vector4array = u'vector4array'
DEF_dat__datatype__matrix33 = u'matrix33'
DEF_dat__datatype__matrix44 = u'matrix44'
DEF_dat__datatype__string = u'string'
DEF_dat__datatype__stringarray = u'stringarray'
DEF_dat__datatype__category_digit = u'digit'
DEF_dat__datatype__category_digitarray = u'digitarray'
DEF_dat__datatype__category_digit2array = u'digit2array'
DEF_dat__datatype__category_digit3array = u'digit3array'
DEF_dat__datatype__category_digit4array = u'digit4array'
DEF_dat__datatype__role__color = u'color'
DEF_dat__datatype__role__vector = u'vector'
DEF_dat__datatype__role__matrix = u'matrix'
DEF_dat__datatype__category_dict = {
DEF_dat__datatype__color2: DEF_dat__datatype__float2,
DEF_dat__datatype__color2array: DEF_dat__datatype__float2array,
DEF_dat__datatype__color3: DEF_dat__datatype__float3,
DEF_dat__datatype__color3array: DEF_dat__datatype__float3array,
DEF_dat__datatype__color4: DEF_dat__datatype__float4,
DEF_dat__datatype__color4array: DEF_dat__datatype__float4array,
DEF_dat__datatype__vector2: DEF_dat__datatype__float2,
DEF_dat__datatype__vector2array: DEF_dat__datatype__float2array,
DEF_dat__datatype__vector3: DEF_dat__datatype__float3,
DEF_dat__datatype__vector3array: DEF_dat__datatype__float3array,
DEF_dat__datatype__vector4: DEF_dat__datatype__float4,
DEF_dat__datatype__vector4array: DEF_dat__datatype__float4array
}
DEF_dat__datatype__role_dict = {
DEF_dat__datatype__color2: DEF_dat__datatype__role__color,
DEF_dat__datatype__color2array: DEF_dat__datatype__role__color,
DEF_dat__datatype__color3: DEF_dat__datatype__role__color,
DEF_dat__datatype__color3array: DEF_dat__datatype__role__color,
DEF_dat__datatype__color4: DEF_dat__datatype__role__color,
DEF_dat__datatype__color4array: DEF_dat__datatype__role__color,
DEF_dat__datatype__vector2: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector2array: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector3: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector3array: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector4: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector4array: DEF_dat__datatype__role__vector
}
DEF_dat__datatype__rawtype_pattern_dict = {
DEF_dat__datatype__float: float,
DEF_dat__datatype__floatarray: (list, float),
DEF_dat__datatype__float2array: (list, tuple, float),
DEF_dat__datatype__float3array: (list, tuple, float),
DEF_dat__datatype__float4array: (list, tuple, float)
}
DEF_dat__datatype__rawsize_pattern_dict = {
DEF_dat__datatype__float: 1,
DEF_dat__datatype__floatarray: (float(u'inf'), 1),
DEF_dat__datatype__float2array: (float(u'inf'), 2, 1),
DEF_dat__datatype__float3array: (float(u'inf'), 3, 1),
DEF_dat__datatype__float4array: (float(u'inf'), 4, 1)
}
class DatDatatype(object):
boolean = DatUtility.DEF_dat__datatype__boolean
booleanarray = DatUtility.DEF_dat__datatype__booleanarray
integer = DatUtility.DEF_dat__datatype__Integer
integerarray = DatUtility.DEF_dat__datatype__integerarray
float = DatUtility.DEF_dat__datatype__float
floatarray = DatUtility.DEF_dat__datatype__floatarray
float2 = DatUtility.DEF_dat__datatype__float2
float2array = DatUtility.DEF_dat__datatype__float2array
float3 = DatUtility.DEF_dat__datatype__float3
float3array = DatUtility.DEF_dat__datatype__float3array
float4 = DatUtility.DEF_dat__datatype__float4
float4array = DatUtility.DEF_dat__datatype__float4array
color2 = DatUtility.DEF_dat__datatype__color2
color2array = DatUtility.DEF_dat__datatype__color2array
color3 = DatUtility.DEF_dat__datatype__color3
color3array = DatUtility.DEF_dat__datatype__color3array
color4 = DatUtility.DEF_dat__datatype__color4
color4array = DatUtility.DEF_dat__datatype__color4array
vector2 = DatUtility.DEF_dat__datatype__vector2
vector2array = DatUtility.DEF_dat__datatype__vector2array
vector3 = DatUtility.DEF_dat__datatype__vector3
vector3array = DatUtility.DEF_dat__datatype__vector3array
vector4 = DatUtility.DEF_dat__datatype__vector4
vector4array = DatUtility.DEF_dat__datatype__vector4array
matrix33 = DatUtility.DEF_dat__datatype__matrix33
matrix44 = DatUtility.DEF_dat__datatype__matrix44
string = DatUtility.DEF_dat__datatype__string
stringarray = DatUtility.DEF_dat__datatype__stringarray
| # coding:utf-8
import os
import copy
class DatUtility(object):
MOD_os = os
MOD_copy = copy
DEF_dat__datatype_pathsep = u'/'
DEF_dat__node_namespace_pathsep = u':'
DEF_dat__node_type_pathsep = u'/'
DEF_dat__node_pathsep = u'/'
DEF_dat__node_port_pathsep = u'.'
DEF_dat__node_variant_pathsep = u'@'
DEF_dat__file_extsep = u'.'
DEF_dat__file_pathsep = u'/'
DEF_dat__raw_strsep = u','
DEF_dat__compraw_strsep = u', '
DEF_dat__datatype__boolean = u'boolean'
DEF_dat__datatype__booleanarray = u'booleanarray'
DEF_dat__datatype__Integer = u'integer'
DEF_dat__datatype__integerarray = u'integerarray'
DEF_dat__datatype__float = u'float'
DEF_dat__datatype__floatarray = u'floatarray'
DEF_dat__datatype__float2 = u'float2'
DEF_dat__datatype__float2array = u'float2array'
DEF_dat__datatype__float3 = u'float3'
DEF_dat__datatype__float3array = u'float3array'
DEF_dat__datatype__float4 = u'float4'
DEF_dat__datatype__float4array = u'float4array'
DEF_dat__datatype__color2 = u'color2'
DEF_dat__datatype__color2array = u'color2array'
DEF_dat__datatype__color3 = u'color3'
DEF_dat__datatype__color3array = u'color3array'
DEF_dat__datatype__color4 = u'color4'
DEF_dat__datatype__color4array = u'color4array'
DEF_dat__datatype__vector2 = u'vector2'
DEF_dat__datatype__vector2array = u'vector2array'
DEF_dat__datatype__vector3 = u'vector3'
DEF_dat__datatype__vector3array = u'vector3array'
DEF_dat__datatype__vector4 = u'vector4'
DEF_dat__datatype__vector4array = u'vector4array'
DEF_dat__datatype__matrix33 = u'matrix33'
DEF_dat__datatype__matrix44 = u'matrix44'
DEF_dat__datatype__string = u'string'
DEF_dat__datatype__stringarray = u'stringarray'
DEF_dat__datatype__category_digit = u'digit'
DEF_dat__datatype__category_digitarray = u'digitarray'
DEF_dat__datatype__category_digit2array = u'digit2array'
DEF_dat__datatype__category_digit3array = u'digit3array'
DEF_dat__datatype__category_digit4array = u'digit4array'
DEF_dat__datatype__role__color = u'color'
DEF_dat__datatype__role__vector = u'vector'
DEF_dat__datatype__role__matrix = u'matrix'
DEF_dat__datatype__category_dict = {
DEF_dat__datatype__color2: DEF_dat__datatype__float2,
DEF_dat__datatype__color2array: DEF_dat__datatype__float2array,
DEF_dat__datatype__color3: DEF_dat__datatype__float3,
DEF_dat__datatype__color3array: DEF_dat__datatype__float3array,
DEF_dat__datatype__color4: DEF_dat__datatype__float4,
DEF_dat__datatype__color4array: DEF_dat__datatype__float4array,
DEF_dat__datatype__vector2: DEF_dat__datatype__float2,
DEF_dat__datatype__vector2array: DEF_dat__datatype__float2array,
DEF_dat__datatype__vector3: DEF_dat__datatype__float3,
DEF_dat__datatype__vector3array: DEF_dat__datatype__float3array,
DEF_dat__datatype__vector4: DEF_dat__datatype__float4,
DEF_dat__datatype__vector4array: DEF_dat__datatype__float4array
}
DEF_dat__datatype__role_dict = {
DEF_dat__datatype__color2: DEF_dat__datatype__role__color,
DEF_dat__datatype__color2array: DEF_dat__datatype__role__color,
DEF_dat__datatype__color3: DEF_dat__datatype__role__color,
DEF_dat__datatype__color3array: DEF_dat__datatype__role__color,
DEF_dat__datatype__color4: DEF_dat__datatype__role__color,
DEF_dat__datatype__color4array: DEF_dat__datatype__role__color,
DEF_dat__datatype__vector2: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector2array: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector3: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector3array: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector4: DEF_dat__datatype__role__vector,
DEF_dat__datatype__vector4array: DEF_dat__datatype__role__vector
}
DEF_dat__datatype__rawtype_pattern_dict = {
DEF_dat__datatype__float: float,
DEF_dat__datatype__floatarray: (list, float),
DEF_dat__datatype__float2array: (list, tuple, float),
DEF_dat__datatype__float3array: (list, tuple, float),
DEF_dat__datatype__float4array: (list, tuple, float)
}
DEF_dat__datatype__rawsize_pattern_dict = {
DEF_dat__datatype__float: 1,
DEF_dat__datatype__floatarray: (float(u'inf'), 1),
DEF_dat__datatype__float2array: (float(u'inf'), 2, 1),
DEF_dat__datatype__float3array: (float(u'inf'), 3, 1),
DEF_dat__datatype__float4array: (float(u'inf'), 4, 1)
}
class DatDatatype(object):
boolean = DatUtility.DEF_dat__datatype__boolean
booleanarray = DatUtility.DEF_dat__datatype__booleanarray
integer = DatUtility.DEF_dat__datatype__Integer
integerarray = DatUtility.DEF_dat__datatype__integerarray
float = DatUtility.DEF_dat__datatype__float
floatarray = DatUtility.DEF_dat__datatype__floatarray
float2 = DatUtility.DEF_dat__datatype__float2
float2array = DatUtility.DEF_dat__datatype__float2array
float3 = DatUtility.DEF_dat__datatype__float3
float3array = DatUtility.DEF_dat__datatype__float3array
float4 = DatUtility.DEF_dat__datatype__float4
float4array = DatUtility.DEF_dat__datatype__float4array
color2 = DatUtility.DEF_dat__datatype__color2
color2array = DatUtility.DEF_dat__datatype__color2array
color3 = DatUtility.DEF_dat__datatype__color3
color3array = DatUtility.DEF_dat__datatype__color3array
color4 = DatUtility.DEF_dat__datatype__color4
color4array = DatUtility.DEF_dat__datatype__color4array
vector2 = DatUtility.DEF_dat__datatype__vector2
vector2array = DatUtility.DEF_dat__datatype__vector2array
vector3 = DatUtility.DEF_dat__datatype__vector3
vector3array = DatUtility.DEF_dat__datatype__vector3array
vector4 = DatUtility.DEF_dat__datatype__vector4
vector4array = DatUtility.DEF_dat__datatype__vector4array
matrix33 = DatUtility.DEF_dat__datatype__matrix33
matrix44 = DatUtility.DEF_dat__datatype__matrix44
string = DatUtility.DEF_dat__datatype__string
stringarray = DatUtility.DEF_dat__datatype__stringarray | en | 0.786515 | # coding:utf-8 | 2.00684 | 2 |
ltable.py | LionCoder4ever/pylua | 0 | 6623845 | <reponame>LionCoder4ever/pylua<filename>ltable.py
import collections
from lmath import FloatToInteger
from lvalue import LuaValue, LUATYPE, LuaNil
class LuaDict(collections.Mapping):
def __init__(self):
self.map = {}
def __setitem__(self, key, value):
if not isinstance(key, LuaValue):
raise TypeError('key must be instance of LuaValue')
if not isinstance(value, LuaValue):
raise TypeError('value must be instance of LuaValue')
if key.typeOf() is LUATYPE.LUA_TSTRING.value:
self.map[key.value] = value
else:
self.map[key] = value
def __getitem__(self, item):
if item.typeOf() is LUATYPE.LUA_TSTRING.value:
item = item.value
return self.map.get(item,LuaNil())
def __iter__(self):
return iter(self.map)
def __len__(self):
return len(self.map)
class LuaArray(collections.MutableSequence):
def __init__(self):
self.arr = []
def __delitem__(self, key):
del self.arr[key]
def __getitem__(self, item):
return self.arr[item]
def __len__(self):
return len(self.arr)
def __setitem__(self, key, value):
LuaArray.assertValue(value)
self.arr[key] = value
def insert(self, index, value):
LuaArray.assertValue(value)
self.arr.insert(index, value)
@staticmethod
def assertValue(value):
if not isinstance(value, LuaValue):
raise TypeError('value must be instance of LuaValue')
class LuaTable(LuaValue):
LFIELDS_PER_FLUSH = 50
def __init__(self, narr: int, nrec: int):
super().__init__(LUATYPE.LUA_TTABLE.value, self)
if narr > 0:
self.arr = LuaArray()
if nrec > 0:
self.map = LuaDict()
def get(self, key: LuaValue) -> LuaValue:
"""
if key is int or can be convert to int,get value from array
:param key:
:return:
"""
key = self.floatToInteger(key)
if type(key.value) is int and (1 <= key.value <= len(self.arr)):
return self.arr[key.value - 1]
return self.map.get(key)
def put(self, key, value):
key = self.floatToInteger(key)
if type(key.value) is int and key.value >= 1:
if not hasattr(self,'arr'):
self.arr = LuaArray()
if key.value <= len(self.arr):
self.arr[key.value - 1] = value
if key.value == len(self.arr) and value.value is None:
self.shrinkArray()
return
if key.value == len(self.arr) + 1:
if hasattr(self, 'map'):
del self.map[key]
if value.value is not None:
self.arr.append(value)
self.expandArray()
return
if value.value is not None:
if not hasattr(self, 'map'):
self.map = LuaDict()
self.map[key] = value
else:
del self.map[key]
def floatToInteger(self, key):
"""
if key is float,try convert to int
:param key:
:return:
"""
if key.typeOf() is LUATYPE.LUA_TNUMBER.value:
if type(key.value) is float:
keytoint, convert = FloatToInteger(key.value)
if convert:
key.value = keytoint
return key
return key
def shrinkArray(self):
for i in range(len(self.arr) - 1, -1, -1):
if self.arr[i].value is None:
self.arr.pop()
def expandArray(self):
"""
move item in map to arr
:return:
"""
idx = len(self.arr) + 1
if hasattr(self, 'map'):
for i in self.map.keys():
if int(i.value) is idx:
self.arr.append(self.map[i])
del self.map[i]
idx += 1
else:
break
def len(self) -> int:
return len(self.arr)
| import collections
from lmath import FloatToInteger
from lvalue import LuaValue, LUATYPE, LuaNil
class LuaDict(collections.Mapping):
def __init__(self):
self.map = {}
def __setitem__(self, key, value):
if not isinstance(key, LuaValue):
raise TypeError('key must be instance of LuaValue')
if not isinstance(value, LuaValue):
raise TypeError('value must be instance of LuaValue')
if key.typeOf() is LUATYPE.LUA_TSTRING.value:
self.map[key.value] = value
else:
self.map[key] = value
def __getitem__(self, item):
if item.typeOf() is LUATYPE.LUA_TSTRING.value:
item = item.value
return self.map.get(item,LuaNil())
def __iter__(self):
return iter(self.map)
def __len__(self):
return len(self.map)
class LuaArray(collections.MutableSequence):
def __init__(self):
self.arr = []
def __delitem__(self, key):
del self.arr[key]
def __getitem__(self, item):
return self.arr[item]
def __len__(self):
return len(self.arr)
def __setitem__(self, key, value):
LuaArray.assertValue(value)
self.arr[key] = value
def insert(self, index, value):
LuaArray.assertValue(value)
self.arr.insert(index, value)
@staticmethod
def assertValue(value):
if not isinstance(value, LuaValue):
raise TypeError('value must be instance of LuaValue')
class LuaTable(LuaValue):
LFIELDS_PER_FLUSH = 50
def __init__(self, narr: int, nrec: int):
super().__init__(LUATYPE.LUA_TTABLE.value, self)
if narr > 0:
self.arr = LuaArray()
if nrec > 0:
self.map = LuaDict()
def get(self, key: LuaValue) -> LuaValue:
"""
if key is int or can be convert to int,get value from array
:param key:
:return:
"""
key = self.floatToInteger(key)
if type(key.value) is int and (1 <= key.value <= len(self.arr)):
return self.arr[key.value - 1]
return self.map.get(key)
def put(self, key, value):
key = self.floatToInteger(key)
if type(key.value) is int and key.value >= 1:
if not hasattr(self,'arr'):
self.arr = LuaArray()
if key.value <= len(self.arr):
self.arr[key.value - 1] = value
if key.value == len(self.arr) and value.value is None:
self.shrinkArray()
return
if key.value == len(self.arr) + 1:
if hasattr(self, 'map'):
del self.map[key]
if value.value is not None:
self.arr.append(value)
self.expandArray()
return
if value.value is not None:
if not hasattr(self, 'map'):
self.map = LuaDict()
self.map[key] = value
else:
del self.map[key]
def floatToInteger(self, key):
"""
if key is float,try convert to int
:param key:
:return:
"""
if key.typeOf() is LUATYPE.LUA_TNUMBER.value:
if type(key.value) is float:
keytoint, convert = FloatToInteger(key.value)
if convert:
key.value = keytoint
return key
return key
def shrinkArray(self):
for i in range(len(self.arr) - 1, -1, -1):
if self.arr[i].value is None:
self.arr.pop()
def expandArray(self):
"""
move item in map to arr
:return:
"""
idx = len(self.arr) + 1
if hasattr(self, 'map'):
for i in self.map.keys():
if int(i.value) is idx:
self.arr.append(self.map[i])
del self.map[i]
idx += 1
else:
break
def len(self) -> int:
return len(self.arr) | en | 0.521291 | if key is int or can be convert to int,get value from array :param key: :return: if key is float,try convert to int :param key: :return: move item in map to arr :return: | 2.507551 | 3 |
queue_/queue_stack_test.py | MilanaShhanukova/programming-2021-19fpl | 0 | 6623846 | <filename>queue_/queue_stack_test.py
"""
Programming for linguists
Tests for Queue class.
"""
import unittest
from queue_.queue_stack import QueueStack
class QueueStackTestCase(unittest.TestCase):
"""
This Case of tests checks the functionality of the implementation of Queue
"""
def test_new_queue_is_empty(self):
"""
Create an empty QueueStack.
Test that its size is 0.
"""
queue_stack = QueueStack()
self.assertTrue(queue_stack.empty())
self.assertEqual(queue_stack.size(), 0)
def test_get_element(self):
"""
Get an element from a queue_stack.
Test that it is 1.
"""
data = (1, 2, 3, 4)
queue_stack = QueueStack(data)
self.assertEqual(queue_stack.top(), data[0])
def test_new_queue_from_tuple(self):
"""
Create a QueueStack from an iterable object.
Check that the size of queue_stack equals to the size of the given tuple.
"""
data = (1, 2, 3, 4)
queue_stack = QueueStack(data)
self.assertFalse(queue_stack.empty())
self.assertEqual(queue_stack.size(), len(data))
for value in data:
test_value = queue_stack.top()
queue_stack.pop()
self.assertEqual(test_value, value)
self.assertTrue(queue_stack.empty())
self.assertEqual(queue_stack.size(), 0)
def test_new_queue_from_list(self):
"""
Create a QueueStack from a list.
Check that the size of queue_stack equals to the size of the queue.
Check that the top element of queue equals to the latest element of the list.
"""
data = [1, 3, 5, 7, 2, 4]
queue_stack = QueueStack(data)
self.assertFalse(queue_stack.empty())
self.assertEqual(queue_stack.size(), len(data))
self.assertEqual(queue_stack.top(), data[0])
def test_new_queue_from_generator(self):
"""
Create a QueueStack from a generator.
Test that its size equals to the number provided in the generator.
"""
queue_stack = QueueStack(range(10))
self.assertFalse(queue_stack.empty())
self.assertEqual(queue_stack.size(), 10)
self.assertEqual(queue_stack.top(), 0)
def test_put_element(self):
"""
Put an element in queue_stack.
Test that its size is 1.
"""
queue = QueueStack()
queue.push(1)
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), 1)
self.assertEqual(queue.top(), 1)
def test_merge_order(self):
"""
Create two QueueStack.
Test the top of changed Stack
"""
stack_1 = QueueStack([1, 2, 3])
stack_2 = QueueStack([4, 5, 6])
stack_1.merge(stack_2)
self.assertEqual(stack_1.top(), 4)
| <filename>queue_/queue_stack_test.py
"""
Programming for linguists
Tests for Queue class.
"""
import unittest
from queue_.queue_stack import QueueStack
class QueueStackTestCase(unittest.TestCase):
"""
This Case of tests checks the functionality of the implementation of Queue
"""
def test_new_queue_is_empty(self):
"""
Create an empty QueueStack.
Test that its size is 0.
"""
queue_stack = QueueStack()
self.assertTrue(queue_stack.empty())
self.assertEqual(queue_stack.size(), 0)
def test_get_element(self):
"""
Get an element from a queue_stack.
Test that it is 1.
"""
data = (1, 2, 3, 4)
queue_stack = QueueStack(data)
self.assertEqual(queue_stack.top(), data[0])
def test_new_queue_from_tuple(self):
"""
Create a QueueStack from an iterable object.
Check that the size of queue_stack equals to the size of the given tuple.
"""
data = (1, 2, 3, 4)
queue_stack = QueueStack(data)
self.assertFalse(queue_stack.empty())
self.assertEqual(queue_stack.size(), len(data))
for value in data:
test_value = queue_stack.top()
queue_stack.pop()
self.assertEqual(test_value, value)
self.assertTrue(queue_stack.empty())
self.assertEqual(queue_stack.size(), 0)
def test_new_queue_from_list(self):
"""
Create a QueueStack from a list.
Check that the size of queue_stack equals to the size of the queue.
Check that the top element of queue equals to the latest element of the list.
"""
data = [1, 3, 5, 7, 2, 4]
queue_stack = QueueStack(data)
self.assertFalse(queue_stack.empty())
self.assertEqual(queue_stack.size(), len(data))
self.assertEqual(queue_stack.top(), data[0])
def test_new_queue_from_generator(self):
"""
Create a QueueStack from a generator.
Test that its size equals to the number provided in the generator.
"""
queue_stack = QueueStack(range(10))
self.assertFalse(queue_stack.empty())
self.assertEqual(queue_stack.size(), 10)
self.assertEqual(queue_stack.top(), 0)
def test_put_element(self):
"""
Put an element in queue_stack.
Test that its size is 1.
"""
queue = QueueStack()
queue.push(1)
self.assertFalse(queue.empty())
self.assertEqual(queue.size(), 1)
self.assertEqual(queue.top(), 1)
def test_merge_order(self):
"""
Create two QueueStack.
Test the top of changed Stack
"""
stack_1 = QueueStack([1, 2, 3])
stack_2 = QueueStack([4, 5, 6])
stack_1.merge(stack_2)
self.assertEqual(stack_1.top(), 4)
| en | 0.874276 | Programming for linguists Tests for Queue class. This Case of tests checks the functionality of the implementation of Queue Create an empty QueueStack. Test that its size is 0. Get an element from a queue_stack. Test that it is 1. Create a QueueStack from an iterable object. Check that the size of queue_stack equals to the size of the given tuple. Create a QueueStack from a list. Check that the size of queue_stack equals to the size of the queue. Check that the top element of queue equals to the latest element of the list. Create a QueueStack from a generator. Test that its size equals to the number provided in the generator. Put an element in queue_stack. Test that its size is 1. Create two QueueStack. Test the top of changed Stack | 3.947501 | 4 |
pycrest/test/private/test_cffi.py | Andlon/crest | 0 | 6623847 | from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from pycrest.mesh import Mesh2d
from pycrest.private.cffi import _mesh_to_flat_mesh_data, _flat_mesh_data_to_mesh
def test_mesh_flat_data_roundtrip():
vertices = [
(0.0, 0.0),
(1.0, 0.0),
(1.0, 1.0),
(0.0, 1.0)
]
elements = [
(0, 1, 3),
(1, 2, 3)
]
mesh = Mesh2d(vertices, elements)
flat = _mesh_to_flat_mesh_data(mesh)
converted_mesh = _flat_mesh_data_to_mesh(flat)
assert_array_almost_equal(mesh.vertices, converted_mesh.vertices)
assert_array_equal(mesh.elements, converted_mesh.elements)
| from numpy.testing import assert_array_almost_equal
from numpy.testing import assert_array_equal
from pycrest.mesh import Mesh2d
from pycrest.private.cffi import _mesh_to_flat_mesh_data, _flat_mesh_data_to_mesh
def test_mesh_flat_data_roundtrip():
vertices = [
(0.0, 0.0),
(1.0, 0.0),
(1.0, 1.0),
(0.0, 1.0)
]
elements = [
(0, 1, 3),
(1, 2, 3)
]
mesh = Mesh2d(vertices, elements)
flat = _mesh_to_flat_mesh_data(mesh)
converted_mesh = _flat_mesh_data_to_mesh(flat)
assert_array_almost_equal(mesh.vertices, converted_mesh.vertices)
assert_array_equal(mesh.elements, converted_mesh.elements)
| none | 1 | 2.379604 | 2 | |
shipStation/Test_ServoControl.py | LBCC-SpaceClub/HAB2017 | 3 | 6623848 | <filename>shipStation/Test_ServoControl.py<gh_stars>1-10
import unittest
import ServoControl
class Test_ServoControl(unittest.TestCase):
def test_bearing(self):
aLat = 44.564939
aLon = -123.241243
bLat = 44.565973
bLon = -123.239418
new_bearing = ServoControl.bearing(aLat, aLon, bLat, bLon)
old_bearing = ServoControl.original_bearing(aLat, aLon, bLat, bLon)
self.assertEqual(new_bearing, old_bearing)
def test_degToServo(self):
testValue = 360
print ServoControl.degToServo(testValue)
if __name__ == '__main__':
unittest.main()
| <filename>shipStation/Test_ServoControl.py<gh_stars>1-10
import unittest
import ServoControl
class Test_ServoControl(unittest.TestCase):
def test_bearing(self):
aLat = 44.564939
aLon = -123.241243
bLat = 44.565973
bLon = -123.239418
new_bearing = ServoControl.bearing(aLat, aLon, bLat, bLon)
old_bearing = ServoControl.original_bearing(aLat, aLon, bLat, bLon)
self.assertEqual(new_bearing, old_bearing)
def test_degToServo(self):
testValue = 360
print ServoControl.degToServo(testValue)
if __name__ == '__main__':
unittest.main()
| none | 1 | 3.0546 | 3 | |
ExceptionHandlingElseFinally.py | EdgarVallejo96/pyEdureka | 0 | 6623849 | <filename>ExceptionHandlingElseFinally.py
# The else Clause
# try: run this code
# except: execute this code when there is an exception
# else: no exceptions? run this code
try:
# a = 0 / 0 # Try this
a = 10
except AssertionError as error:
print(error)
else:
print('Executing the else clause')
try:
with open('file.log') as file:
read_data = file.read()
except FileNotFoundError as fnf_error:
print(fnf_error)
finally:
print('This always runs, even with exceptions')
# SUMMARY
# raise: allows you to throw an exception at any time
# assert: enables you to verify if a certain condition is met and throw an exception if it isn't
# try: all statements are executed until an exception is encountered
# except: is used to catch and handle the exception(s) that are encountred in the try clase
# else: lets you code sections that should run only when no exceptions are encountered in the try clause
# finally: enables you to execute sections of code that should always run, with or without any previously encountered exceptions
| <filename>ExceptionHandlingElseFinally.py
# The else Clause
# try: run this code
# except: execute this code when there is an exception
# else: no exceptions? run this code
try:
# a = 0 / 0 # Try this
a = 10
except AssertionError as error:
print(error)
else:
print('Executing the else clause')
try:
with open('file.log') as file:
read_data = file.read()
except FileNotFoundError as fnf_error:
print(fnf_error)
finally:
print('This always runs, even with exceptions')
# SUMMARY
# raise: allows you to throw an exception at any time
# assert: enables you to verify if a certain condition is met and throw an exception if it isn't
# try: all statements are executed until an exception is encountered
# except: is used to catch and handle the exception(s) that are encountred in the try clase
# else: lets you code sections that should run only when no exceptions are encountered in the try clause
# finally: enables you to execute sections of code that should always run, with or without any previously encountered exceptions
| en | 0.878762 | # The else Clause # try: run this code # except: execute this code when there is an exception # else: no exceptions? run this code # a = 0 / 0 # Try this # SUMMARY # raise: allows you to throw an exception at any time # assert: enables you to verify if a certain condition is met and throw an exception if it isn't # try: all statements are executed until an exception is encountered # except: is used to catch and handle the exception(s) that are encountred in the try clase # else: lets you code sections that should run only when no exceptions are encountered in the try clause # finally: enables you to execute sections of code that should always run, with or without any previously encountered exceptions | 3.934359 | 4 |
src/sorting/__main__.py | haihala/pvl-algot2021 | 0 | 6623850 | <gh_stars>0
from sys import argv as command_line_args
from tabulate import tabulate
from bogo import bogo_benchmark, test_bogo
from stalin import stalin_benchmark, test_stalin
from bubble import bubble_benchmark, test_bubble
from insertion import insertion_benchmark, test_insertion
from quick import quick_benchmark, test_quick
from default import sorted_benchmark
def main():
if 'test' in command_line_args[1:]:
tests()
if 'bench' in command_line_args[1:]:
benchmarks()
def benchmarks():
print(tabulate([
bogo_benchmark(),
stalin_benchmark(),
bubble_benchmark(),
insertion_benchmark(),
quick_benchmark(),
sorted_benchmark(),
], headers=['Algoritmi', 'Kippauspiste', '+-'],
))
def tests():
test_bogo()
test_stalin()
test_bubble()
test_insertion()
test_quick()
if __name__ == '__main__':
main()
| from sys import argv as command_line_args
from tabulate import tabulate
from bogo import bogo_benchmark, test_bogo
from stalin import stalin_benchmark, test_stalin
from bubble import bubble_benchmark, test_bubble
from insertion import insertion_benchmark, test_insertion
from quick import quick_benchmark, test_quick
from default import sorted_benchmark
def main():
if 'test' in command_line_args[1:]:
tests()
if 'bench' in command_line_args[1:]:
benchmarks()
def benchmarks():
print(tabulate([
bogo_benchmark(),
stalin_benchmark(),
bubble_benchmark(),
insertion_benchmark(),
quick_benchmark(),
sorted_benchmark(),
], headers=['Algoritmi', 'Kippauspiste', '+-'],
))
def tests():
test_bogo()
test_stalin()
test_bubble()
test_insertion()
test_quick()
if __name__ == '__main__':
main() | none | 1 | 2.605301 | 3 |