id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
3394381 | # Author: <NAME> <<EMAIL>>
#
# Licence: BSD 3-clause
"""Run single-trial mass-univariate analyses in source space for each subject
separately"""
import numpy as np
from mne.minimum_norm import apply_inverse, apply_inverse_epochs
from conditions import analyses
from config import load, save, bad_mri, subjects_id
from base import nested_analysis
# params
inv_params = dict(lambda2=1.0 / (2 ** 3.0),
method='dSPM',
pick_ori='normal',
verbose=False)
for meg_subject, subject in zip(range(1, 21), subjects_id):
# load single subject effects (across trials)
if subject in bad_mri:
continue
epochs = load('epochs_decim', subject=meg_subject, preload=True)
events = load('behavior', subject=meg_subject)
epochs.apply_baseline((None, 0))
epochs.pick_types(meg=True, eeg=False, eog=False)
# Setup source data container
evoked = epochs.average()
inv = load('inv', subject=meg_subject)
stc = apply_inverse(evoked, inv, **inv_params)
# run each analysis within subject
for analysis in analyses:
# source transforming should be applied as early as possible,
# but here I'm struggling on memory
coefs = list()
n_chunk = 20
for time in np.array_split(epochs.times, n_chunk):
stcs = apply_inverse_epochs(epochs.copy().crop(time[0], time[-1]),
inv, **inv_params)
stcs_data = np.array([ii.data for ii in stcs])
# then we applied the same function as for the sensor analysis
# FIXME this nested_analysis is here an overkill since we only
# 1 level analysis
coef, sub = nested_analysis(
stcs_data, events, analysis['condition'],
function=analysis.get('erf_function', None),
query=analysis.get('query', None),
single_trial=analysis.get('single_trial', False),
y=analysis.get('y', None),
n_jobs=-1)
coefs.append(coef)
stc._data = np.hstack(coefs)
# Save all_evokeds
save([stc, sub, analysis], 'evoked_source', subject=meg_subject,
analysis=analysis['name'], overwrite=True)
# Clean memory
for ii in stcs:
ii._data = None
del stcs, stcs_data, coef, sub
epochs._data = None
del epochs
| StarcoderdataPython |
3257462 | <filename>diff/operation_type.py
from enum import Enum
class OperationType(Enum):
UNKNOWN = 0
ADD = 1
CHANGE = 2
REMOVE = 3
ADD_PROPERTY = 4
CHANGE_PROPERTY = 5
REMOVE_PROPERTY = 6
| StarcoderdataPython |
99388 |
import joblib
import numpy as np
import pandas as pd
import streamlit as st
APP_FILE = "app.py"
MODEL_JOBLIB_FILE = "model.joblib"
def main():
"""This function runs/ orchestrates the Machine Learning App Registry"""
st.markdown(
"""
# Machine Learning App
The main objective of this app is building a customer segmentation based on credit card
payments behavior during the last six months to define marketing strategies.
You can find the source code for this project in the following [Github repository](https://github.com/andreshugueth/credit_card_clustering).
"""
)
html_temp = """
<div style="text-align: right"> <strong> Author: </strong> <a href=https://www.linkedin.com/in/carlosbarros7/ target="_blank"><NAME></a> </div>
"""
st.markdown(html_temp, unsafe_allow_html=True)
st.markdown('## Dataset')
if st.checkbox('Show sample data'):
st.write(show_data)
customer_predictor()
def customer_predictor():
"""## Customer predictor
A user may have to input data about the customer's finances to predict which cluster he belongs to.
"""
st.markdown("## Customer segmentation model based on credit behavior")
balance = st.number_input("Balance")
purchases = st.number_input("Purchases")
cash_advance = st.number_input("Cash Advance")
credit_limit = st.number_input("Credit Limit")
payments = st.number_input("Payments")
prediction = 0
if st.button("Predict"):
model = joblib.load(MODEL_JOBLIB_FILE)
features = [balance, purchases, cash_advance, credit_limit, payments]
final_features = [np.array(features)]
prediction = model.predict(final_features)
st.balloons()
st.success(f"The client belongs to the cluster: {prediction[0]:.0f}")
if(prediction[0] == 0):
st.markdown("""
These kinds of customers pay a minimum amount in advance and their payment is proportional to the
movement of their purchases, this means that they are good customers **paying the debts** :hand: they incur
with their credit cards.
""")
if(prediction[0] == 1):
st.markdown("""
In this group are presented the customers who pay the **most in advance before** :ok_hand: the loan starts with
a balanced balance statement because their purchases are minimal compared to the other groups,
also it is the **second-best paying**. :hand:
""")
if(prediction[0] == 2):
st.markdown("""
Customers in this cluster pay the minimum amount in advance, however, it is the **group that buys the most**:gift: :sunglasses:,
and it is also the **group that pays the most** :moneybag:. In other words, these types of customers are quite
active regarding the number of purchases they make with their credit cards.
""")
if(prediction[0] == 3):
st.markdown("""These clients are the ones with the highest balance status, in addition to that,
they are the second group that pays the most in advance before starting their credit.
However, they are the customers who make the **least purchases** :sleepy: and following the same idea,
they are the seconds when it comes to making payments on the debt with their credit card. This
makes sense since they have an amount of the loan provided in advance. It can be concluded that
they are **conservative** and **meticulous** customers when buying. :expressionless:""")
if(prediction[0] == 4):
st.markdown("""
This group of customers has **low-frequency usage** :sleeping: of their credit cards since it is the second group that
purchases the least, in addition to that, they are customers who pay well in proportion to
their purchases. As for the advance payment before starting the loan, it is minimal compared to the other groups.
""")
@st.cache
def load_data():
data = pd.read_csv('final_data.csv')
data = data.drop(['Unnamed: 0'], axis=1)
data = data.sort_index(axis=1)
return data
show_data = load_data()
if __name__ == "__main__":
main()
| StarcoderdataPython |
1759284 | __version__ = "1.0.2"
from . import andromeda
from . import preproc
from . import conf
from . import fits
from . import frdiff
from . import leastsq
from . import llsg
from . import medsub
from . import negfc
from . import nmf
from . import pca
from . import metrics
from . import specfit
from . import stats
from . import var
from .hci_dataset import *
from .hci_postproc import *
from .vip_ds9 import *
| StarcoderdataPython |
3370220 | <reponame>LinkGeoML/LGM-Classification
import numpy as np
import pandas as pd
import geopandas as gpd
from shapely.geometry import Point
from shapely.wkt import loads
import itertools
import os
from collections import Counter
import pickle
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from sklearn.feature_selection import SelectPercentile, chi2
import adjacency_features as af
import textual_features as tf
# import geometric_features as gf
# import matching as m
import osm_utilities as osm_ut
import writers as wrtrs
from config import config
feature_module_map = {
'classes_in_radius_bln': af,
'classes_in_radius_cnt': af,
'classes_in_street_and_radius_bln': af,
'classes_in_street_and_radius_cnt': af,
'classes_in_neighbors_bln': af,
'classes_in_neighbors_cnt': af,
'classes_in_street_radius_bln': af,
'classes_in_street_radius_cnt': af,
'similarity_per_class': tf,
'top_k_terms': tf,
'top_k_trigrams': tf,
'top_k_fourgrams': tf
}
features_getter_map = {
'classes_in_radius_bln': 'get_classes_in_radius_bln',
'classes_in_radius_cnt': 'get_classes_in_radius_cnt',
'classes_in_street_and_radius_bln': 'get_classes_in_street_and_radius_bln',
'classes_in_street_and_radius_cnt': 'get_classes_in_street_and_radius_cnt',
'classes_in_neighbors_bln': 'get_classes_in_neighbors_bln',
'classes_in_neighbors_cnt': 'get_classes_in_neighbors_cnt',
'classes_in_street_radius_bln': 'get_classes_in_street_radius_bln',
'classes_in_street_radius_cnt': 'get_classes_in_street_radius_cnt',
'similarity_per_class': 'get_similarity_per_class',
'top_k_terms': 'get_top_k_terms',
'top_k_trigrams': 'get_top_k_trigrams',
'top_k_fourgrams': 'get_top_k_fourgrams'
}
features_params_map = {
'classes_in_radius_bln': 'classes_in_radius_thr',
'classes_in_radius_cnt': 'classes_in_radius_thr',
'classes_in_street_and_radius_bln': 'classes_in_street_and_radius_thr',
'classes_in_street_and_radius_cnt': 'classes_in_street_and_radius_thr',
'classes_in_neighbors_bln': 'classes_in_neighbors_thr',
'classes_in_neighbors_cnt': 'classes_in_neighbors_thr',
'classes_in_street_radius_bln': 'classes_in_street_radius_thr',
'classes_in_street_radius_cnt': 'classes_in_street_radius_thr',
'top_k_terms': 'top_k_terms_pct',
'top_k_trigrams': 'top_k_trigrams_pct',
'top_k_fourgrams': 'top_k_fourgrams_pct'
}
features_getter_args_map = {
'classes_in_radius_bln': ('poi_gdf', 'poi_index_path', 'nlabels', 'label_map', 'param'),
'classes_in_radius_cnt': ('poi_gdf', 'poi_index_path', 'nlabels', 'label_map', 'param'),
'classes_in_street_and_radius_bln': ('poi_gdf', 'street_gdf', 'pois_by_street', 'nlabels', 'label_map', 'geometry_map', 'param'),
'classes_in_street_and_radius_cnt': ('poi_gdf', 'street_gdf', 'pois_by_street', 'nlabels', 'label_map', 'geometry_map', 'param'),
'classes_in_neighbors_bln': ('poi_gdf', 'poi_index_path', 'nlabels', 'label_map', 'param'),
'classes_in_neighbors_cnt': ('poi_gdf', 'poi_index_path', 'nlabels', 'label_map', 'param'),
'classes_in_street_radius_bln': ('poi_gdf', 'street_gdf', 'nlabels', 'label_map', 'geometry_map', 'param'),
'classes_in_street_radius_cnt': ('poi_gdf', 'street_gdf', 'nlabels', 'label_map', 'geometry_map', 'param'),
'similarity_per_class': ('poi_gdf', 'textual_index_path', 'nlabels'),
'top_k_terms': ('poi_gdf', 'names', 'param'),
'top_k_trigrams': ('poi_gdf', 'names', 'param'),
'top_k_fourgrams': ('poi_gdf', 'names', 'param')
}
def load_poi_gdf(poi_fpath):
"""
Loads pois in *poi_fpath* into a geopandas.GeoDataFrame and project their \
geometries.
Args:
poi_fpath (str): Path to file containing the pois
Returns:
geopandas.GeoDataFrame
"""
poi_df = pd.read_csv(poi_fpath)
poi_df['geometry'] = poi_df.apply(
lambda x: Point(x[config.lon_col], x[config.lat_col]), axis=1)
poi_gdf = gpd.GeoDataFrame(poi_df, geometry='geometry')
poi_gdf.crs = {'init': f'epsg:{config.poi_crs}'}
poi_gdf = poi_gdf.to_crs({'init': 'epsg:3857'})
poi_gdf['lon'] = poi_gdf.apply(lambda p: p.geometry.coords[0][0], axis=1)
poi_gdf['lat'] = poi_gdf.apply(lambda p: p.geometry.coords[0][1], axis=1)
return poi_gdf
def encode_labels(poi_gdf, encoder=None):
"""
Encodes target column to with integer values.
Args:
poi_gdf (geopandas.GeoDataFrame): The GeoDataFrame containing the \
column to be encoded
encoder (sklearn.preprocessing.LabelEncoder, optional): The label \
encoder to be utilized
Returns:
tuple:
geopandas.GeoDataFrame: The GeoDataFrame with the encoded column
sklearn.preprocessing.LabelEncoder: The label encoder utilized
"""
if encoder is None:
encoder = LabelEncoder()
poi_gdf['label'] = encoder.fit_transform(poi_gdf[config.label_col])
else:
poi_gdf = poi_gdf[poi_gdf[config.label_col].isin(encoder.classes_)].reset_index(drop=True)
poi_gdf['label'] = encoder.transform(poi_gdf[config.label_col])
return poi_gdf, encoder
def load_street_gdf(street_fpath):
"""
Loads streets in *street_fpath* into a geopandas.GeoDataFrame and project \
their geometries.
Args:
street_fpath (str): Path to file containing the streets
Returns:
geopandas.GeoDataFrame
"""
street_df = pd.read_csv(street_fpath)
street_df['geometry'] = street_df['geometry'].apply(lambda x: loads(x))
street_gdf = gpd.GeoDataFrame(street_df, geometry='geometry')
street_gdf.crs = {'init': f'epsg:{config.osm_crs}'}
street_gdf = street_gdf.to_crs({'init': 'epsg:3857'})
return street_gdf
# def load_poly_gdf(poly_fpath):
# poly_df = pd.read_csv(poly_fpath)
# poly_df['geometry'] = poly_df['geometry'].apply(lambda x: loads(x))
# poly_gdf = gpd.GeoDataFrame(poly_df, geometry='geometry')
# poly_gdf.crs = {'init': f'epsg:{config.osm_crs}'}
# poly_gdf = poly_gdf.to_crs({'init': 'epsg:3857'})
# return poly_gdf
def get_bbox_coords(poi_gdf):
"""
Returns a bounding box containing all *poi_gdf*'s pois.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois
Returns:
tuple: The bounding box coords as (south, west, north, east)
"""
poi_gdf = poi_gdf.to_crs({'init': f'epsg:{config.osm_crs}'})
min_lon, min_lat, max_lon, max_lat = poi_gdf.geometry.total_bounds
return (min_lat, min_lon, max_lat, max_lon)
def get_required_external_files(poi_gdf, feature_sets_path):
"""
Checks if external files are required and if so, downloads them using the \
Overpass API.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains pois in order to define \
the area to query with Overpass API
feature_sets_path (str): Path to store the downloaded elements
Returns:
None
"""
if (
'classes_in_street_and_radius_bln' in config.included_adjacency_features or
'classes_in_street_and_radius_cnt' in config.included_adjacency_features or
'classes_in_street_radius_bln' in config.included_adjacency_features or
'classes_in_street_radius_cnt' in config.included_adjacency_features
):
osm_ut.download_osm_streets(get_bbox_coords(poi_gdf), feature_sets_path)
# if config.included_geometric_features:
# osm_ut.download_osm_polygons(get_bbox_coords(poi_gdf), feature_sets_path)
return
def ngrams(n, word):
"""
Generator of all *n*-grams of *word*.
Args:
n (int): The length of character ngrams to be extracted
word (str): The word of which the ngrams are to be extracted
Yields:
str: ngram
"""
for i in range(len(word)-n-1):
yield word[i:i+n]
def get_top_k(names, k, mode='term'):
"""
Extracts the top *k* % terms or ngrams of *names*, based on *mode*.
Args:
names (list): Contains the names to be considered
k (float): Percentage of top terms or ngrams to be considered
mode (str, optional): May be 'term', 'trigram' or 'fourgram'
Returns:
list: Contains the top k terms or ngrams
"""
if mode == 'trigram':
cnt = Counter(ngram for word in names for ngram in ngrams(3, word))
elif mode == 'fourgram':
cnt = Counter(ngram for word in names for ngram in ngrams(4, word))
else:
cnt = Counter(names)
return [t[0] for t in cnt.most_common(int(len(cnt) * k))]
def normalize_features(X, train_idxs, scaler=None):
"""
Normalize features to [0, 1].
Args:
X (numpy.ndarray): Features array to be normalized
train_idxs (numpy.ndarray): Contains the train indexes
scaler (sklearn.preprocessing.MinMaxScaler, optional): Scaler to be \
utilized
Returns:
tuple:
numpy.ndarray: The normalized features array
sklearn.preprocessing.MinMaxScaler: The scaler utilized
"""
if scaler is None:
scaler = MinMaxScaler()
X_ = scaler.fit_transform(X[train_idxs])
for idx, i in enumerate(train_idxs):
X[i] = X_[idx]
test_idxs = [r for r in range(len(X)) if r not in train_idxs]
if test_idxs:
X_ = scaler.transform(X[test_idxs])
for idx, i in enumerate(test_idxs):
X[i] = X_[idx]
else:
X = scaler.transform(X)
return X, scaler
def get_pois_by_street(poi_gdf, street_gdf):
"""
Matches each poi in *poi_gdf* to its nearest street.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains pois to be matched to \
a street
street_gdf (geopandas.GeoDataFrame): Contains streets to search among \
them for the nearest to each poi
Returns:
dict: Has streets ids as keys and a list containing the pois which \
belong to each street as values
"""
street_index = street_gdf.sindex
pois_by_street = dict((s, []) for s in range(len(street_gdf)))
for poi in poi_gdf.itertuples():
poi_coords = (poi.lon, poi.lat)
candidates = list(street_index.nearest(poi_coords))
nearest = candidates[np.argmin([
Point(poi_coords).distance(street_gdf.iloc[c]['geometry'])
for c in candidates
])]
pois_by_street[nearest].append(poi.Index)
return pois_by_street
def create_args_dict(poi_gdf, train_idxs, required_args, read_path, write_path):
"""
Initializes and prepares structures required during features extraction.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois for which \
features will be created
train_idxs (numpy.ndarray): Contains the train indexes
required_args (set): Contains the names of the required args
read_path (str): Path to read from
write_path (str): Path to write to
Returns:
dict: Containing arguments names as keys and their corresponding \
structures as values
"""
args = {'poi_gdf': poi_gdf, 'nlabels': poi_gdf['label'].nunique()}
if 'label_map' in required_args:
args['label_map'] = poi_gdf.iloc[train_idxs]['label'].values.tolist()
if 'geometry_map' in required_args:
args['geometry_map'] = list(poi_gdf.iloc[train_idxs]['geometry'].values)
if 'poi_index_path' in required_args:
args['poi_index_path'] = write_path + '/poi_index.pkl'
af.create_poi_index(poi_gdf.iloc[train_idxs].reset_index(), args['poi_index_path'])
if 'street_gdf' in required_args:
street_csv_path = read_path + '/osm_streets.csv'
args['street_gdf'] = load_street_gdf(street_csv_path)
args['pois_by_street'] = get_pois_by_street(poi_gdf.iloc[train_idxs].reset_index(), args['street_gdf'])
if 'textual_index_path' in required_args:
args['textual_index_path'] = write_path + '/textual_index'
tf.create_textual_index(poi_gdf.iloc[train_idxs].reset_index(), args['textual_index_path'])
if 'names' in required_args:
args['names'] = ' '.join(list(poi_gdf.iloc[train_idxs][config.name_col])).split()
return args
def create_single_feature(f, args, train_idxs, norm, scaler):
"""
Creates the features array given a feature's name *f*.
Args:
f (str): Feature name to be created
args (dict): Containing the required arguments for feature *f*
train_idxs (numpy.ndarray): Contains the train indexes
norm (boolean): Indicating whether the feature should be normalized \
or not
scaler (sklearn.preprocessing.MinMaxScaler): The scaler to be utilized
Returns:
tuple:
numpy.ndarray: The features array of feature *f*
sklearn.preprocessing.MinMaxScaler: The scaler utilized
"""
X = getattr(feature_module_map[f], features_getter_map[f])(
*[args[arg] for arg in features_getter_args_map[f]])
if scaler is not None:
return normalize_features(X, None, scaler)
elif norm is True:
return normalize_features(X, train_idxs)
else:
return X, None
def create_single_features(poi_gdf, train_idxs, fold_path):
"""
Creates all the included features arrays and saves them in *fold_path*.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois for which the \
features will be created
train_idxs (numpy.ndarray): Contains the train indexes
fold_path (str): Path to save features arrays
Returns:
None
"""
os.makedirs(fold_path + '/tmp')
included_features = config.included_adjacency_features + config.included_textual_features
required_args = set([arg for f in included_features for arg in features_getter_args_map[f]])
args = create_args_dict(poi_gdf, train_idxs, required_args, os.path.dirname(fold_path), fold_path)
for f in included_features:
norm = True if f in config.normalized_features else False
if f not in features_params_map:
X, _ = create_single_feature(f, args, train_idxs, norm, None)
np.save(fold_path + f'/tmp/{f}.npy', X)
else:
for p in getattr(config, features_params_map[f]):
args['param'] = p
X, _ = create_single_feature(f, args, train_idxs, norm, None)
np.save(fold_path + f'/tmp/{f}_{p}.npy', X)
return
def create_concatenated_features(poi_gdf, train_idxs, test_idxs, fold_path):
"""
Loads a list of included features arrays in order to concatenate them \
into the final X_train and X_test arrays. Then saves these arrays as well \
as the corresponding y_train and y_test arrays. Finally, writes the \
included features configuration into a file.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois for which the \
features will be created
train_idxs (numpy.ndarray): Contains the train indexes
test_idxs (numpy.ndarray): Contains the test indexes
fold_path (str): Path to save features arrays
Returns:
None
"""
included_features = config.included_adjacency_features + config.included_textual_features
params_names = list(set([features_params_map[f] for f in included_features if f in features_params_map]))
params_vals = [getattr(config, param) for param in params_names]
y = poi_gdf['label']
for idx, params in enumerate(itertools.product(*params_vals)):
features_params = dict(zip(params_names, params))
Xs = []
for f in included_features:
if f in features_params_map:
p = features_params[features_params_map[f]]
Xs.append(np.load(fold_path + f'/tmp/{f}_{p}.npy'))
else:
Xs.append(np.load(fold_path + f'/tmp/{f}.npy'))
X = np.hstack(Xs)
# X = SelectPercentile(chi2, percentile=75).fit_transform(X, y)
X_train, X_test = X[train_idxs], X[test_idxs]
np.save(fold_path + f'/X_train_{idx}.npy', X_train)
np.save(fold_path + f'/X_test_{idx}.npy', X_test)
y_train, y_test = y[train_idxs], y[test_idxs]
np.save(fold_path + '/y_train.npy', y_train)
np.save(fold_path + '/y_test.npy', y_test)
path = os.path.dirname(fold_path)
wrtrs.write_feature_params_info(path + '/params_per_feature_set.csv', params_names, params_vals)
return
def create_finetuned_features(poi_gdf, features_info, best_feature_params, features_path, results_path):
"""
Creates and saves the X_train features array for the model_training step.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois for which the \
features will be created
features_info (list): Containing the features (and whether they \
should be normalized or not) to be extracted
best_feature_params (dict): Containing the best found features \
parameters values
features_path (str): Path in order to read required external files \
(like osm streets file)
results_path (str): Path to write to
Returns:
numpy.ndarray: The features array for model_training step
"""
included_features = [f[0] for f in features_info]
required_args = set([arg for f in included_features for arg in features_getter_args_map[f]])
args = create_args_dict(poi_gdf, np.arange(len(poi_gdf)), required_args, features_path, results_path + '/pickled_objects')
Xs = []
for f in features_info:
feat, norm = f[0], f[1]
if feat in features_params_map:
args['param'] = best_feature_params[features_params_map[feat]]
X, scaler = create_single_feature(feat, args, np.arange(len(poi_gdf)), norm, None)
if norm is True:
pickle.dump(scaler, open(results_path + '/pickled_objects' + f'/{feat}_scaler.pkl', 'wb'))
Xs.append(X)
X = np.hstack(Xs)
np.save(results_path + '/X_train.npy', X)
return X
def create_test_args_dict(test_poi_gdf, required_args, read_path1, read_path2):
"""
Instantiate and prepare structures required during features extraction in \
model_deployment step.
Args:
test_poi_gdf (geopandas.GeoDataFrame): Contains the pois for which \
features will be created
required_args (set): Contains the names of the required args
read_path1 (str): Path to features_extraction step results
read_path2 (str): Path to model_training step results
Returns:
dict: Containing arguments names as keys and their corresponding \
structures as values
"""
train_poi_gdf = load_poi_gdf(read_path1 + '/train_poi_gdf.csv')
encoder = pickle.load(open(read_path1 + '/encoder.pkl', 'rb'))
train_poi_gdf, _ = encode_labels(train_poi_gdf, encoder)
args = {'poi_gdf': test_poi_gdf, 'nlabels': train_poi_gdf['label'].nunique()}
if 'label_map' in required_args:
args['label_map'] = train_poi_gdf['label'].values.tolist()
if 'geometry_map' in required_args:
args['geometry_map'] = list(train_poi_gdf['geometry'].values)
if 'poi_index_path' in required_args:
args['poi_index_path'] = read_path2 + '/poi_index.pkl'
if 'street_gdf' in required_args:
street_csv_path = read_path1 + '/osm_streets.csv'
args['street_gdf'] = load_street_gdf(street_csv_path)
args['pois_by_street'] = get_pois_by_street(train_poi_gdf, args['street_gdf'])
if 'textual_index_path' in required_args:
args['textual_index_path'] = read_path2 + '/textual_index'
if 'names' in required_args:
args['names'] = ' '.join(list(train_poi_gdf[config.name_col])).split()
return args
def create_test_features(poi_gdf, features, features_path, model_training_path, results_path):
"""
Creates and saves the X_test features array for the model_deployment step.
Args:
poi_gdf (geopandas.GeoDataFrame): Contains the pois for which the \
features will be created
features (list): Containing the features (as well as their best found \
configuration) to be extracted
features_path (str): Path to features_extraction step results
model_training_path (str): Path to model_training step results
results_path (str): Path to write to
Returns:
numpy.ndarray: The features array for model_deployment step
"""
included_features = [f[0] for f in features]
required_args = set([arg for f in included_features for arg in features_getter_args_map[f]])
args = create_test_args_dict(poi_gdf, required_args, features_path, model_training_path + '/pickled_objects')
Xs = []
for f in features:
feat, _, param_value, norm = f[0], f[1], f[2], f[3]
if feat in features_params_map:
args['param'] = int(param_value) if feature_module_map[feat] == af else float(param_value)
if norm is True:
scaler = pickle.load(open(model_training_path + '/pickled_objects' + f'/{feat}_scaler.pkl', 'rb'))
X, _ = create_single_feature(feat, args, None, norm, scaler)
else:
X, _ = create_single_feature(feat, args, None, norm, None)
Xs.append(X)
X = np.hstack(Xs)
np.save(results_path + '/X_test.npy', X)
return X
| StarcoderdataPython |
3382881 | #!/usr/bin/python
"""Test module for QSimov."""
import doki
import numpy as np
import qsimov as qj
import random as rnd
import sys
# import webbrowser as wb
from operator import add
from qsimov.samples.djcircuit import DJAlgCircuit
def Bal(n, controlId=0):
"""Return Deutsch-Jozsa oracle for balanced function."""
gate = qj.QGate(n, "Balanced")
gate.add_operation("X", targets=n-1, controls=controlId)
return gate
def Const(n, twice=False):
"""Return Deutsch-Jozsa oracle for constant function."""
gate = qj.QGate(n, "Constant")
gate.add_operation("X", targets=n-1)
if twice:
gate.add_operation("X", targets=n-1)
return gate
def TeleportationCircuit(gate):
"""Return teleportation algorithm circuit.
Positional arguments:
gate: gate to apply to the qubit that is going to be sent
(so we don't send a 0 or a 1)
Return:
QCircuit with teleportation algorithm
"""
qc = qj.QCircuit(3, "Teleportation", ancilla=(0, 0))
qc.add_operation("H", targets=[1])
qc.add_operation("X", targets=[2], controls=[1])
# Aqui es donde trabajamos con el qubit Q que queremos enviar.
# Se le aplica la puerta pasada como parámetro.
qc.add_operation(gate, targets=0)
# Una vez terminado todo lo que queremos hacerle al QuBit,
# procedemos a preparar el envio
# Se aplica una puerta C-NOT sobre Q (control) y B (objetivo).
qc.add_operation("X", targets=1, controls=0)
# Se aplica una puerta Hadamard sobre Q.
qc.add_operation("H", targets=[0])
qc.add_operation("MEASURE", targets=(0, 1))
qc.add_operation("X", targets=2, controls=1)
qc.add_operation("Z", targets=2, controls=0)
return qc
def entangle_gate():
"""Return a QGate that creates a Bell pair."""
e = qj.QGate(2, "Entangle")
e.add_operation("H", targets=0)
e.add_operation("X", targets=1, controls=0)
return e
def entangle_system(s, id, id2):
"""Entangle specified qubits of a system."""
aux = s.apply_gate("H", targets=id)
res = aux.apply_gate("X", targets=id2, controls=id)
aux.free()
return res
def inversion_tests(verbose=False):
"""Test gate inversion."""
if verbose:
print(" Testing gate inversion...")
e = entangle_gate() # TODO: Intensive inversion test
ei = e.invert()
if e.get_operations() != ei.get_operations()[::-1]:
if verbose:
print(e.get_operations())
print(ei.get_operations())
print(ei.get_operations()[::-1])
print(e.get_operations() == ei.get_operations()[::-1])
print([e.get_operations()[i] != ei.get_operations()[::-1][i]
for i in range(len(e.get_operations()))])
print(" <NAME> visited your simulator...")
raise AssertionError("Failed invert test")
ed = e.dagger()
if e.get_operations() != ed.get_operations()[::-1]:
if verbose:
print(e.get_operations())
print(ed.get_operations())
print(ed.get_operations()[::-1])
print(e.get_operations() != ed.get_operations()[::-1])
print([e.get_operations()[i] != ed.get_operations()[::-1][i]
for i in range(len(e.get_operations()))])
print(" <NAME> visited your simulator...")
raise AssertionError("Failed dagger test")
r = qj.QRegistry(2)
er = qj.QRegistry(2)
s = qj.QSystem(2)
es = qj.QSystem(2)
aux = r.apply_gate(e)
r.free()
r = aux.apply_gate(ed)
aux.free()
aux = s.apply_gate(e)
s.free()
s = aux.apply_gate(ed)
aux.free()
if any(r.get_state() != er.get_state()) \
or any(s.get_state() != es.get_state()):
if verbose:
print(r.get_state())
print(er.get_state())
print(s.get_state())
print(es.get_state())
print(r.get_state() == er.get_state())
print(s.get_state() == es.get_state())
print(" <NAME> visited your simulator...")
r.free()
er.free()
s.free()
es.free()
raise AssertionError("Error comparing gate+inversion result")
if verbose:
print(" Noice")
def entangle_test(QItem, id1, id2, verbose):
e = entangle_gate()
r = QItem(3)
rg = QItem(3)
aux = entangle_system(r, id1, id2)
r.free()
r = aux
aux = rg.apply_gate(e, targets=[id1, id2])
rg.free()
rg = aux
if any(r.get_state() != rg.get_state()):
if verbose:
print(r.get_state())
print(rg.get_state())
print(r.get_state() == rg.get_state())
print(" <NAME> visited your simulator...")
r.free()
rg.free()
raise AssertionError(f"Error entangling {id1} and {id2}")
r.free()
rg.free()
def entangle_tests(verbose=False, useSystem=False):
"""Test entanglement."""
if verbose:
print(" Testing entanglement gate...")
if useSystem:
QItem = qj.QSystem
else:
QItem = qj.QRegistry
entangle_test(QItem, 0, 1, verbose)
entangle_test(QItem, 0, 2, verbose)
entangle_test(QItem, 1, 2, verbose)
if verbose:
print(" Noice")
def one_gate_tests(nq, verbose=False, QItem=qj.QRegistry):
"""Test application of one gate of one qubit."""
if verbose:
print(" One qubit gate tests:")
size = 2**nq
for id in range(nq):
invert = bool(rnd.randint(0, 1))
rands = np.random.rand(3) * 2 * np.pi - np.pi
gate = "U(" + ",".join([str(angle)
for angle in rands]) + ")"
if invert:
gate += "-1"
sgate = qj.SimpleGate(gate)
if verbose:
print(" Testing gate " + gate + " to qubit " + str(id) + "...")
# print(" Gate: " + str(numpygate))
b = QItem(nq, verbose=False)
a = doki.registry_new(nq, False)
a2 = doki.registry_apply(a, sgate.gate, [id], set(), set(),
-1, False)
del a
if verbose:
print(" Doki done")
b2 = b.apply_gate(gate, targets=id)
del b
if verbose:
print(" QSimov done")
a2_state = np.array([doki.registry_get(a2, i, False, False)
for i in range(size)])
b2_state = b2.get_state()
if not np.allclose(a2_state, b2_state):
if verbose:
print("Expected:", a2_state)
print("Received:", b2_state)
print(a2_state == b2_state)
print("Gate:", sgate.matrix)
print(" Michael Bay visited your simulator...")
del a2
del b2
raise AssertionError("Error comparing states after applying gate")
if verbose:
print(" Noice")
del a2
del b2
def TwoU_np(angle1_1, angle1_2, angle1_3,
angle2_1, angle2_2, angle2_3,
invert):
"""Return numpy two qubit gate that may entangle."""
gate1str = f"U({angle1_1},{angle1_2},{angle1_3})"
gate2str = f"U({angle2_1},{angle2_2},{angle2_3})"
if invert:
gate1str += "-1"
gate2str += "-1"
gate1aux = qj.SimpleGate(gate1str)
gate2aux = qj.SimpleGate(gate2str)
gate1 = np.kron(gate1aux.matrix, np.eye(2, dtype=complex))
gate2 = np.eye(4, dtype=complex)
gate2[2, 2] = gate2aux.matrix[0, 0]
gate2[2, 3] = gate2aux.matrix[0, 1]
gate2[3, 2] = gate2aux.matrix[1, 0]
gate2[3, 3] = gate2aux.matrix[1, 1]
if not invert:
return gate2.dot(gate1)
else:
return gate1.dot(gate2)
def _add_two_U():
"""Add the TwoU gate to the list of available gates."""
qj.add_gate("TwoU", TwoU_np, 6, 6, has_invert_arg=True,
is_own_inverse=False, overwrite=True)
def two_gate_tests(nq, verbose=False, QItem=qj.QRegistry):
"""Test application of one gate of two qubits."""
if verbose:
print(" Two qubit gate tests:")
if nq < 2:
raise ValueError("Can't apply 2 qubit gates to 1 qubit structure")
_add_two_U()
size = 2**nq
for id1 in range(nq):
for id2 in range(nq):
if id1 == id2:
continue
invert = bool(rnd.randint(0, 1))
rands = np.random.rand(6) * 2 * np.pi - np.pi
gate = "TwoU(" + ",".join([str(angle)
for angle in rands]) + ")"
if invert:
gate += "-1"
sgate = qj.SimpleGate(gate)
if verbose:
print(f" Testing gate {gate} to qubits {id1} and {id2}...")
b = QItem(nq, verbose=False)
a = doki.registry_new(nq, False)
a2 = doki.registry_apply(a, sgate.gate, [id1, id2], set(), set(),
-1, verbose)
del a
if verbose:
print(" Doki done")
b2 = b.apply_gate(gate, targets=[id1, id2])
del b
if verbose:
print(" QSimov done")
a2_state = np.array([doki.registry_get(a2, i, False, False)
for i in range(size)])
b2_state = b2.get_state()
if not np.allclose(a2_state, b2_state):
if verbose:
print("Expected:", a2_state)
print("Received:", b2_state)
print(b2.qubitMap)
print(b2.regs[b2.qubitMap[0]][0].get_state())
print(b2.regs[b2.qubitMap[1]][0].get_state())
print(a2_state == b2_state)
print(" Michael Bay visited your simulator...")
del a2
del b2
raise AssertionError("Error comparing states after " +
"applying gate")
if verbose:
print(" Noice")
del a2
del b2
def controlled_gate_tests(nq, verbose=False, QItem=qj.QRegistry):
"""Test application of controlled gates."""
if verbose:
print(" Controlled gate tests:")
size = 2**nq
for id in range(nq):
isControl = [rnd.randint(-1, 1) for i in range(nq - 1)]
controls = {i if i < id else i + 1
for i in range(nq - 1) if isControl[i] == 1}
anticontrols = {i if i < id else i + 1
for i in range(nq - 1) if isControl[i] == -1}
invert = bool(rnd.randint(0, 1))
rands = np.random.rand(3) * 2 * np.pi - np.pi
gate = "U(" + ",".join([str(angle)
for angle in rands]) + ")"
if invert:
gate += "-1"
sgate = qj.SimpleGate(gate)
if verbose:
print(f" Testing gate {gate} to qubit {id} and {isControl} ...")
# print(" Gate: " + str(numpygate))
b = QItem(nq, verbose=False)
a = doki.registry_new(nq, False)
a2 = doki.registry_apply(a, sgate.gate, [id], controls, anticontrols,
-1, False)
del a
if verbose:
print(" Doki done")
b2 = b.apply_gate(gate, targets=id,
controls=controls, anticontrols=anticontrols)
del b
if verbose:
print(" QSimov done")
a2_state = np.array([doki.registry_get(a2, i, False, False)
for i in range(size)])
b2_state = b2.get_state()
if not np.allclose(a2_state, b2_state):
if verbose:
print("Expected:", a2_state)
print("Received:", b2_state)
print(a2_state == b2_state)
print("Target:", id)
print("Cs:", controls)
print("ACs:", anticontrols)
print(" Michael Bay visited your simulator...")
del a2
del b2
raise AssertionError("Error comparing states after applying gate")
if verbose:
print(" Noice")
del a2
del b2
def measure_registry_tests(nq, verbose=False):
"""Test measurement with QRegistry."""
if verbose:
print(" Measure QRegistry tests:")
for id in range(nq):
reg = qj.QRegistry(nq)
reg2 = reg.apply_gate("X", targets=id)
del reg
reg = reg2
aux1, mes = reg.measure({id})
if nq > 1:
aux2, mes2 = aux1.measure({i for i in range(nq) if i != id})
if aux2 is None:
raise AssertionError("registry is never None after measure")
else:
aux2 = None
mes2 = None
del reg
if (not mes[id]
or (mes2 is not None and any(mes2))
or aux1 is None
or aux1.num_qubits != nq-1
or aux1.num_bits != 1
or (aux2 is not None
and (aux2.num_qubits != 0 or aux2.num_bits != nq))):
if verbose:
print("M1:", mes)
print("M2:", mes2)
print("Check1:", not mes[id])
print("Check2:", any(mes2))
print("Check3:", aux1.num_qubits != nq-1)
print("Check4:", aux2 is not None)
print(" <NAME> visited your simulator...")
del aux1
del aux2
raise AssertionError("Error measuring states")
del aux1
del aux2
if verbose:
print(" Noice")
def compare_state(r, state, rdm0, rdm1, srt0=1, srt1=1, verbose=False):
"""Compare states, reduced density matrices and reduced traces.
Positional arguments:
r: QRegistry
state: numpy array with the expected state of r
rdm0: numpy array with the expected reduced density matrix after
tracing out qubit 0.
rdm1: numpy array with the expected reduced density matrix after
tracing out qubit 1.
Keyworded arguments:
srt0: expected value for squared reduced trace after tracing out 0
srt1: expected value for squared reduced trace after tracing out 1
verbose: if messages with extra information should be printed
"""
if not np.allclose(r.get_state(), state):
if verbose:
print(r.get_state())
print(state)
print(r.get_state() == state)
print(" <NAME> visited your simulator...")
return False
dm = state * state.reshape((4, 1))
qdm = r.density_matrix()
if not np.allclose(qdm[:], dm):
if verbose:
print(r.density_matrix()[:])
print(dm)
print(r.density_matrix()[:] == dm)
print(" <NAME> visited your simulator...")
return False
qrdm0 = qdm.partial_trace(0)
if not np.allclose(qrdm0[:], rdm0):
if verbose:
print("RDM0")
print(qrdm0[:])
print(rdm0)
print(qrdm0[:] == rdm0)
print(" <NAME> visited your simulator...")
return False
qrdm1 = qdm.partial_trace(1)
if not np.allclose(qrdm1[:], rdm1):
if verbose:
print("RDM1")
print(qrdm1[:])
print(rdm1)
print(qrdm1[:] == rdm1)
print(" <NAME> visited your simulator...")
return False
qsrt0 = (qrdm0 @ qrdm0).trace()
if not np.allclose(qsrt0, srt0):
if verbose:
print("SRT0")
print(qrdm0[:])
print(qsrt0)
print(srt0)
print(qsrt0 == srt0)
print(" <NAME> visited your simulator...")
return False
qsrt1 = (qrdm1 @ qrdm1).trace()
if not np.allclose(qsrt1, srt1):
if verbose:
print("SRT1")
print(qsrt1)
print(srt1)
print(qsrt1 == srt1)
print(" <NAME> visited your simulator...")
return False
return True
def tool_test(verbose=False):
"""Test QRegistry states, density matrix, reduced dm and reduced trace."""
if verbose:
print(" Tools for QRegistry:")
reg = qj.QRegistry(2)
state = np.array([1, 0, 0, 0])
rdm0 = np.array([1, 0, 0, 0]).reshape((2, 2))
rdm1 = rdm0[:]
if not compare_state(reg, state, rdm0, rdm1, verbose=verbose):
del reg
raise AssertionError("Error on first step checking tools")
del state
del rdm0
del rdm1
reg2 = reg.apply_gate("H", targets=0)
del reg
reg = reg2
state = np.array([1/np.sqrt(2), 1/np.sqrt(2), 0, 0])
rdm0 = np.array([1, 0, 0, 0]).reshape((2, 2))
rdm1 = np.array([0.5, 0.5, 0.5, 0.5]).reshape((2, 2))
if not compare_state(reg, state, rdm0, rdm1, verbose=verbose):
del reg
raise AssertionError("Error on second step checking tools")
del state
del rdm0
del rdm1
reg2 = reg.apply_gate("X", targets=1, controls=0)
del reg
reg = reg2
state = np.array([1/np.sqrt(2), 0, 0, 1/np.sqrt(2)])
rdm0 = np.eye(2) * 0.5
rdm1 = rdm0[:]
if not compare_state(reg, state, rdm0, rdm1, srt0=0.5, srt1=0.5,
verbose=verbose):
del reg
raise AssertionError("Error on third step checking tools")
if verbose:
print(" Noice")
del reg
def measure_system_tests(nq, entangle=False, remove=False, verbose=False):
"""Test measurement with QSystem."""
if verbose:
print(f" Measure QSystem tests with entangle={entangle}:")
for id in range(nq):
reg = qj.QSystem(nq)
if entangle:
for control in range(1, nq, 2):
reg2 = reg.apply_gate("X", targets=control-1, controls=control)
if nq % 2 == 1:
reg2 = reg.apply_gate("X", targets=nq-2, controls=nq-1)
del reg
reg = reg2
reg2 = reg.apply_gate("X", targets=id)
del reg
reg = reg2
aux1, mes = reg.measure({id})
if nq > 1:
aux2, mes2 = aux1.measure({i for i in range(nq) if i != id})
del reg
if (not mes[id]
or (nq > 1 and any(mes2[i] for i in range(nq) if i != id))
or aux1.usable[id]
or not all(aux1.usable[i] for i in range(nq) if i != id)
or (nq > 1 and any(aux2.usable))):
if verbose:
print("M1:", mes)
print("M2:", mes2)
print("Check1:", not mes[id])
print("Check2:", (nq > 1 and any(mes2[i]
for i in range(nq)
if i != id)))
print("Check3:", aux1.usable[id])
print("Check4:", not all(aux1.usable[i]
for i in range(nq) if i != id))
print("Check5:", (nq > 1 and any(aux2.usable)))
print(" <NAME> visited your simulator...")
aux1.free()
if nq > 1:
aux2.free()
raise AssertionError("Error measuring states")
aux1.free()
if nq > 1:
aux2.free()
if verbose:
print(" Noice")
def add_operation_tests(qdesign, verbose=False):
"""Test add_line method of the given qstruct object."""
if verbose:
print(" add_line tests with " + qdesign.__name__ + ":")
qdes = qdesign(5, "Test")
cons = {1, 3}
acons = {2, 4}
qdes.add_operation("X", targets=0, controls=cons, anticontrols=acons)
if len(qdes.get_operations()) != 1:
raise AssertionError("Wrong operations list size: " +
f"{len(qdes.get_operations())}")
op_data = qdes.get_operations()[0]
if op_data["gate"]._str != "X" or op_data["targets"] != [0] \
or op_data["controls"] != cons or op_data["anticontrols"] != acons:
if verbose:
print(op_data)
print(" Michael Bay visited your simulator...")
raise AssertionError("Wrong operation added")
if verbose:
print(" Noice")
def _deutsch_aux(executor, nq, gate):
circuit = DJAlgCircuit(nq, gate)
mess = executor.execute(circuit)
mes = mess[0][0]
reg2 = qj.QSystem(nq) # Qubits (x1, ..., xn, y) initialized to zero
aux = reg2.apply_gate("X", targets=nq-1) # Qubit y set to one
del reg2
reg2 = aux
# Apply Hadamard to all qubits
for i in range(nq):
aux = reg2.apply_gate("H", targets=i)
del reg2
reg2 = aux
# Applied U (oracle)
aux = reg2.apply_gate(gate, targets=[i for i in range(nq)])
del reg2
reg2 = aux
# Applied Hadamard to (x1, ..., xn), nothing applied to y qubit
for i in range(nq - 1):
aux = reg2.apply_gate("H", targets=i)
del reg2
reg2 = aux
# We measure (x1, ..., xn) qubits
aux, mes2 = reg2.measure({i for i in range(nq - 1)})
del reg2
del aux
# If any qubit (x1, ..., xn) is 1, balanced. Otherwise constant.
return mes, mes2
def deutschTests(nq, verbose=False, useSystem=False, optimize=False):
"""Test Deutsch-Jozsa algorithm for the specified number of qubits."""
if verbose:
print(" Deutsch circuit (" + (qj.QSystem.__name__ if useSystem
else qj.QRegistry.__name__) + "):")
executor = qj.Drewom(qmachine="doki",
extra={"num_threads": -1,
"random_generator": np.random.rand,
"use_system": useSystem})
for id in range(nq - 1):
gate = Bal(nq, id)
mes, mes2 = _deutsch_aux(executor, nq, gate)
if not mes == mes2 or not any(mes):
if verbose:
print(mes)
print(mes2)
print(mes == mes2)
print(" <NAME> visited your simulator...")
raise AssertionError("Error checking DJ results")
for id in range(2):
gate = Const(nq, twice=(id == 1))
mes, mes2 = _deutsch_aux(executor, nq, gate)
if not mes == mes2 or any(mes):
if verbose:
print(mes)
print(mes2)
print(mes == mes2)
print(" <NAME> visited your simulator...")
raise AssertionError("Error checking DJ results")
if verbose:
print(" Noice")
def teleportation_tests(verbose=False, useSystem=False, optimize=False):
"""Execute teleportation algorithm related tests."""
rands = np.random.rand(3) * 2 * np.pi - np.pi
gate = "U(" + ",".join([str(angle)
for angle in rands]) + ")"
initialValue = rnd.randrange(2)
if verbose:
print(" Teleportation circuit (" + (qj.QSystem.__name__ if useSystem
else qj.QRegistry.__name__) + "):")
print(" Gate: " + gate)
print(" Initial value: " + str(initialValue))
executor = qj.Drewom(qmachine="doki",
extra={"num_threads": -1,
"random_generator": np.random.rand,
"use_system": useSystem,
"return_struct": True})
circuit = TeleportationCircuit(gate)
mess = executor.execute(circuit)
reg, mes = mess[0]
reg2 = qj.QRegistry(1)
aux = reg2.apply_gate(gate)
del reg2
reg2 = aux
if not np.allclose(reg.get_state(), reg2.get_state()):
if verbose:
print("Ops:", circuit.get_operations())
print(reg.get_state())
print(reg2.get_state())
print(reg.get_state() == reg2.get_state())
print(mes)
print(" <NAME> visited your simulator...")
del reg
del reg2
raise AssertionError("Error checking teleportation result!")
else:
if verbose:
print(" Noice")
del reg
del reg2
def all_gate_tests(seed=None, verbose=False):
"""Execute all gate tests."""
if not (seed is None):
qj.set_seed(seed)
rnd.seed(seed)
np.random.seed(seed)
result = [(0, 0) for i in range(15)] # We have 15 tests
# H gate tests
result[0] = gate_tests("H", verbose=verbose, hasInv=False, nArgs=0)
# X gate tests
result[1] = gate_tests("X", verbose=verbose, hasInv=False, nArgs=0)
# Y gate tests
result[2] = gate_tests("Y", verbose=verbose, hasInv=False, nArgs=0)
# Z gate tests
result[3] = gate_tests("Z", verbose=verbose, hasInv=False, nArgs=0)
# SqrtX gate tests
result[4] = gate_tests("SqrtX", verbose=verbose, hasInv=True, nArgs=0)
# RX gate tests
result[5] = gate_tests("RX", verbose=verbose, hasInv=True, nArgs=1)
# RY gate tests
result[6] = gate_tests("RY", verbose=verbose, hasInv=True, nArgs=1)
# RZ gate tests
result[7] = gate_tests("RZ", verbose=verbose, hasInv=True, nArgs=1)
# Phase shift gate tests
result[8] = gate_tests("R", verbose=verbose, hasInv=True, nArgs=1)
# Roots of unity gate tests
result[9] = gate_tests("RUnity", verbose=verbose, hasInv=True, nArgs=1)
# Partial Deutsch gate tests
result[10] = gate_tests("HalfDeutsch", verbose=verbose,
hasInv=True, nArgs=1)
# U gate tests
result[11] = gate_tests("U", verbose=verbose, hasInv=True, nArgs=3)
# U3 gate tests
result[12] = gate_tests("U3", verbose=verbose, hasInv=True, nArgs=3)
# U2 gate tests
result[13] = gate_tests("U2", verbose=verbose, hasInv=True, nArgs=2)
# U1 gate tests
result[14] = gate_tests("U1", verbose=verbose, hasInv=True, nArgs=1)
return result
def data_structure_tests(minqubits, maxqubits, seed=None, verbose=False,
QItem=qj.QRegistry):
"""Execute all data structure tests."""
if not (seed is None):
rnd.seed(seed)
np.random.seed(seed)
for nq in range(minqubits, maxqubits + 1):
if verbose:
print("Testing with " + str(nq) + " qubit " + QItem.__name__)
one_gate_tests(nq, verbose=verbose, QItem=QItem)
if nq >= 2:
two_gate_tests(nq, verbose=verbose, QItem=QItem)
controlled_gate_tests(nq, verbose=verbose, QItem=QItem)
if QItem == qj.QRegistry:
measure_registry_tests(nq, verbose=verbose)
else:
measure_system_tests(nq, entangle=False, verbose=verbose)
if nq >= 2:
measure_system_tests(nq, entangle=True, verbose=verbose)
if QItem == qj.QRegistry:
# get_state, density_matrix,
# reduced_density_matrix and reduced_trace tests
tool_test(verbose=verbose)
def high_level_tests(minqubits, maxqubits, seed=None, verbose=False):
"""Test high level structures: QGate and QCircuit."""
if not (seed is None):
rnd.seed(seed)
np.random.seed(seed)
if verbose:
print("Testing QGate inversion and application")
# Dagger/Inversion QGate tests
inversion_tests(verbose=verbose)
# Entanglement QGate with QRegistry tests
entangle_tests(verbose=verbose, useSystem=False)
# Entanglement QGate with QSystem tests
entangle_tests(verbose=verbose, useSystem=True)
if maxqubits > 1:
for nq in range(2, maxqubits + 1):
if verbose:
print("Testing Deutsch with " + str(nq) + " qubit circuits")
# Deutsch-Josza algorithm with QRegistry tests
deutschTests(nq, verbose=verbose, useSystem=False)
# Deutsch-Josza algorithm with QSystem tests
deutschTests(nq, verbose=verbose, useSystem=True)
# Teleportation algorithm with QRegistry tests
teleportation_tests(verbose=verbose, useSystem=False)
# Teleportation algorithm with QSystem tests
teleportation_tests(verbose=verbose, useSystem=True)
# Control and anticontrol check for QGate
add_operation_tests(qj.QGate, verbose=verbose)
# Control and anticontrol check for QCircuit
add_operation_tests(qj.QCircuit, verbose=verbose)
def main():
"""Execute all tests."""
argv = sys.argv[1:]
if 2 <= len(argv) <= 4:
minqubits = int(argv[0])
if minqubits < 1:
print("minimum number of qubits must be at least 1")
maxqubits = int(argv[1])
if maxqubits < minqubits:
print("minimum number of qubits cannot be greater than maximum")
verbose = False
seed = None
if len(argv) >= 3:
seed = int(argv[2])
else:
seed = rnd.randrange(2**32 - 1)
if len(argv) == 4:
verbose = bool(argv[3])
print("Seed:", seed)
# print("\tTesting Gates...")
# all_gate_tests(seed=seed, verbose=verbose)
print("\tTesting QRegistry...")
data_structure_tests(minqubits, maxqubits, seed=seed, verbose=verbose,
QItem=qj.QRegistry)
print("\tTesting QSystem...")
data_structure_tests(minqubits, maxqubits, seed=seed, verbose=verbose,
QItem=qj.QSystem)
print("\tTesting QGate and QCircuit...")
high_level_tests(minqubits, maxqubits, seed=seed, verbose=verbose)
print("PEACE AND TRANQUILITY")
else:
print("Syntax: " + sys.argv[0] + " <minimum number of qubits (min 1)>",
"<maximum number of qubits> <seed (optional)> " +
"<verbose (optional)")
if __name__ == "__main__":
main()
| StarcoderdataPython |
126155 | <reponame>rexor12/gacha
from .dict_utils import get_or_add
from .float_utils import isclose | StarcoderdataPython |
3287542 | """
Copyright (c) 2022 Huawei Technologies Co.,Ltd.
openGauss is licensed under Mulan PSL v2.
You can use this software according to the terms and conditions of the Mulan PSL v2.
You may obtain a copy of Mulan PSL v2 at:
http://license.coscl.org.cn/MulanPSL2
THIS SOFTWARE IS PROVIDED ON AN "AS IS" BASIS, WITHOUT WARRANTIES OF ANY KIND,
EITHER EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO NON-INFRINGEMENT,
MERCHANTABILITY OR FIT FOR A PARTICULAR PURPOSE.
See the Mulan PSL v2 for more details.
"""
"""
Case Type : 数据库系统
Case Name : 设置recovery_min_apply_delay为非法值
Description :
1.设置recovery_min_apply_delay参数为-1
2.设置recovery_min_apply_delay参数为4294967295
3.设置recovery_min_apply_delay参数为小数
4.设置recovery_min_apply_delay参数为字母符号
Expect :
1.设置失败
2.设置失败
3.设置失败
4.设置失败
History :
"""
import unittest
from yat.test import Node
from testcase.utils.Logger import Logger
from testcase.utils.Constant import Constant
from testcase.utils.CommonSH import CommonSH
class RecoveryDelay(unittest.TestCase):
db_primary_user_node = Node(node='PrimaryDbUser')
commshpri = CommonSH('PrimaryDbUser')
def setUp(self):
self.log = Logger()
self.log.info("-----------this is setup-----------")
self.log.info("---Opengauss_Function_Recovery_Delay_Case0007 start---")
self.constant = Constant()
def test_recovery_delay(self):
self.log.info('-----设置recovery_min_apply_delay参数为-1----')
result = self.commshpri.execute_gsguc(
'set', self.constant.GSGUC_SUCCESS_MSG,
'recovery_min_apply_delay=-1')
self.assertFalse(result)
self.log.info('-----设置recovery_min_apply_delay参数为2147483648----')
result = self.commshpri.execute_gsguc(
'reload', self.constant.GSGUC_SUCCESS_MSG,
'recovery_min_apply_delay=2147483648')
self.assertFalse(result)
self.log.info('-----设置recovery_min_apply_delay参数为小数----')
sql = f"alter SYSTEM set recovery_min_apply_delay to '1.5' "
result = self.commshpri.execut_db_sql(sql)
self.log.info(result)
self.assertNotIn('ALTER', result)
self.log.info('-----设置recovery_min_apply_delay参数为字母符号----')
result = self.commshpri.execute_gsguc(
'reload', self.constant.GSGUC_SUCCESS_MSG,
"recovery_min_apply_delay='qw_fr'")
self.assertFalse(result)
def tearDown(self):
self.log.info('------------this is tearDown-------------')
self.log.info("---Opengauss_Function_Recovery_Delay_Case0007 end--") | StarcoderdataPython |
3252890 | # Copyright 2019-present Kensho Technologies, LLC.
"""Int value conversion.
This module is a utility for reasoning about intervals when computing filter selectivities,
and generating parameters for pagination. Since integers are the easiest type to deal with
in this context, when we encounter a different type we represent it as an int, do all the
computation in the integer domain, and transfer the computation back into the original domain.
In order to be able to reason about value intervals and successor/predecessor values, we
make sure these mappings to integers are increasing bijective functions.
This kind of mapping is easy to do for int, uuid and datetime types, but not possible for other
types, like string. When the need for other types arises, the precise interface for range
reasoning can be defined and implemented separately for each type.
"""
import datetime
from typing import Any
from uuid import UUID
from ..schema import is_meta_field
from ..schema.schema_info import QueryPlanningSchemaInfo, UUIDOrdering
from .helpers import (
get_uuid_ordering,
is_date_field_type,
is_datetime_field_type,
is_int_field_type,
is_uuid4_type,
)
# UUIDs are defined in RFC-4122 as a 128-bit identifier. This means that the minimum UUID value
# (represented as a natural number) is 0, and the maximal value is 2^128-1.
MIN_UUID_INT = 0
MAX_UUID_INT = 2 ** 128 - 1
DATETIME_EPOCH_TZ_NAIVE = datetime.datetime(1970, 1, 1)
def swap_uuid_prefix_and_suffix(uuid_string: str) -> str:
"""Swap the first 12 and last 12 hex digits of a uuid string.
Different databases implement uuid comparison differently (see UUIDOrdering). This function
is useful as a helper method to implement the LastSixBytesFirst ordering method based on the
LeftToRight ordering method.
args:
uuid_string: uuid string
Returns:
the input with the first and last 12 hex digits swapped
"""
segments = uuid_string.split("-")
segment_lengths = tuple(len(segment) for segment in segments)
expected_segment_lengths = (8, 4, 4, 4, 12)
if expected_segment_lengths != segment_lengths:
raise AssertionError(f"Unexpected segment lengths {segment_lengths} in {uuid_string}")
new_segments = [
segments[4][:8],
segments[4][8:],
segments[2],
segments[3],
segments[0] + segments[1],
]
return "-".join(new_segments)
def field_supports_range_reasoning(
schema_info: QueryPlanningSchemaInfo, vertex_class: str, property_field: str
) -> bool:
"""Return whether range reasoning is supported. See module docstring for definition."""
if is_meta_field(property_field):
return False
return (
is_uuid4_type(schema_info, vertex_class, property_field)
or is_int_field_type(schema_info, vertex_class, property_field)
or is_datetime_field_type(schema_info, vertex_class, property_field)
or is_date_field_type(schema_info, vertex_class, property_field)
)
def convert_int_to_field_value(
schema_info: QueryPlanningSchemaInfo, vertex_class: str, property_field: str, int_value: int
) -> Any:
"""Return the given integer's corresponding property field value.
See module docstring for details. The int_value is expected to be in the range of
convert_field_value_to_int.
Args:
schema_info: QueryPlanningSchemaInfo
vertex_class: str, name of vertex class to which the property field belongs.
property_field: str, name of property field that the value refers to.
int_value: int, integer value which will be represented as a property field value.
Returns:
Any, the given integer's corresponding property field value.
Raises:
ValueError, if the given int_value is outside the range of valid values for the given
property field.
"""
if is_int_field_type(schema_info, vertex_class, property_field):
return int_value
elif is_datetime_field_type(schema_info, vertex_class, property_field):
return DATETIME_EPOCH_TZ_NAIVE + datetime.timedelta(microseconds=int_value)
elif is_date_field_type(schema_info, vertex_class, property_field):
return datetime.date.fromordinal(int_value)
elif is_uuid4_type(schema_info, vertex_class, property_field):
if not MIN_UUID_INT <= int_value <= MAX_UUID_INT:
raise AssertionError(
"Integer value {} could not be converted to UUID, as it "
"is not in the range of valid UUIDs {} - {}: {} {}".format(
int_value, MIN_UUID_INT, MAX_UUID_INT, vertex_class, property_field
)
)
uuid_string = str(UUID(int=int(int_value)))
ordering = get_uuid_ordering(schema_info, vertex_class, property_field)
if ordering == UUIDOrdering.LeftToRight:
return uuid_string
elif ordering == UUIDOrdering.LastSixBytesFirst:
return swap_uuid_prefix_and_suffix(uuid_string)
else:
raise AssertionError(
f"Unexpected ordering for {vertex_class}.{property_field}: {ordering}"
)
elif field_supports_range_reasoning(schema_info, vertex_class, property_field):
raise AssertionError(
"Could not represent int {} as {} {}, but should be able to.".format(
int_value, vertex_class, property_field
)
)
else:
raise NotImplementedError(
"Could not represent int {} as {} {}.".format(int_value, vertex_class, property_field)
)
def convert_field_value_to_int(
schema_info: QueryPlanningSchemaInfo, vertex_class: str, property_field: str, value: Any
) -> int:
"""Return the integer representation of a property field value."""
if is_int_field_type(schema_info, vertex_class, property_field):
return value
elif is_datetime_field_type(schema_info, vertex_class, property_field):
return (value.replace(tzinfo=None) - DATETIME_EPOCH_TZ_NAIVE) // datetime.timedelta(
microseconds=1
)
elif is_date_field_type(schema_info, vertex_class, property_field):
return value.toordinal()
elif is_uuid4_type(schema_info, vertex_class, property_field):
ordering = get_uuid_ordering(schema_info, vertex_class, property_field)
if ordering == UUIDOrdering.LeftToRight:
return UUID(value).int
elif ordering == UUIDOrdering.LastSixBytesFirst:
return UUID(swap_uuid_prefix_and_suffix(value)).int
else:
raise AssertionError(
f"Unexpected ordering for {vertex_class}.{property_field}: {ordering}"
)
elif field_supports_range_reasoning(schema_info, vertex_class, property_field):
raise AssertionError(
"Could not represent {} {} value {} as int, but should be able to".format(
vertex_class, property_field, value
)
)
else:
raise NotImplementedError(
"Could not represent {} {} value {} as int.".format(vertex_class, property_field, value)
)
| StarcoderdataPython |
1664077 | #!/usr/bin/python3
import os
import sys
import argparse
import gensim.models
import pickle
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy
import numpy as np
import os
import sys
import argparse
import gensim.models
import pickle
from nltk.tokenize import sent_tokenize, word_tokenize
import numpy
import numpy as np
from scipy.spatial import procrustes
from scipy.spatial.distance import cosine
# parse command line args
parser = argparse.ArgumentParser(description = "Processes semantic drift over time.")
parser.add_argument("--input", "-i", default = "./preprocessed/", help = "the directory containing the text files", metavar = "input_dir")
parser.add_argument("--output", "-o", default = "./output/", help = "the directory into which to place the embedding and result files", metavar = "output_dir")
parser.add_argument("--smoothing", "-s", type = int, default = 1, help = "the amount of smoothing, in years")
parser.add_argument("--topn", "-t", type = int, default = 10, help = "the amount of smoothing, in years")
parser.add_argument("--csv", "-c", type = bool, default = False, help = "output .csv files with detailed information on each word")
parser.add_argument("--dimensionality", "-d", type = int, default = 50, help = "dimensionality to use for embeddings")
parser.add_argument("start_year", type = int, help = "the year from which to start calculating drift")
parser.add_argument("end_year", type = int, help = "the year until which to calculate drift")
ns = parser.parse_args()
start_year = ns.start_year
end_year = ns.end_year
window_len = ns.smoothing
input_dir = ns.input
output_dir = ns.output
dimensionality = ns.dimensionality
csv = ns.csv
top_n = ns.topn
# map each time window to a sentence list and an embedding model
sentence_sets = {}
models = {}
if end_year < start_year :
print("Fatal: End year must be after start year", file = sys.stderr)
sys.exit(2)
# make models
print("Making models...", end = "\r")
year_range = end_year + 1 - start_year
i = 1
for year in range(start_year, end_year + 1) :
try :
input = open(input_dir + str(year) + ".txt")
# normalize, split by sentences
text = input.read()
text = text.lower()
sentences = sent_tokenize(text)
sentences = [word_tokenize(sent) for sent in sentences]
# add these sentences to every set in the time window
for y in range(year, year + window_len) :
if y not in sentence_sets :
sentence_sets[y] = []
sentence_sets[y] += sentences
except :
print("Could not find data for %d (%d.txt); skipping" % (year, year))
# make embedding model regardless of whether data for this year was found (use windows)
# however, there must be something in the set or else this won't work; fail if empty
if len(sentence_sets[year]) == 0 :
print("Fatal: No data in window for %d" % (year), file = sys.stderr)
sys.exit(1)
else :
model = gensim.models.Word2Vec(sentence_sets[year], size = dimensionality, window = 5, min_count = 5, workers = 4)
model.save("%s%d+%dx%d.word2vec" % (output_dir, year, window_len, dimensionality))
models[year] = model.wv
# clear sentence set from memory
del(sentence_sets[year])
print("Making models (%d/%d)" % (i, year_range), end = "\r")
i += 1
print()
del(sentence_sets)
# # intermittent load due to errors
# print("Loading models...", end = "\r")
# for year in range(start_year, end_year + 1) :
# try :
# model = gensim.models.Word2Vec.load("%s%d+%dx%d.word2vec" % (output_dir, year, window_len, dimensionality))
# models[year] = model.wv
# del(model)
# print("Loading models (%d - %d)" % (start_year, year), end = "\r")
# except :
# print("Fatal: No model found for %d (%s%d+%dx%d.word2vec)" % (year, output_dir, year, window_len, dimensionality), file = sys.stderr)
# sys.exit(4)
# print()
# consider only words that are in all models
print("Finding overlap...", end = "\r")
base = list(models.values())[0].vocab
wordset = set()
i = 1
p = 0
for word in base :
add = True
for model in models.values() :
if word not in model :
add = False
break
if add :
wordset.add(word)
i += 1
if (100 * i // len(base)) > p :
p = 100 * i // len(base)
print("Finding overlap (%d%%; %d words)" % (p, len(wordset)), end = "\r")
print()
# save overlap set
output = open(output_dir + "overlap-%d-%d+%dx%d" % (start_year, end_year, window_len, dimensionality), "wb")
pickle.dump(wordset, output)
output.close()
for year in range(start_year, end_year):
model1 = models[year]
model2 = models[year + 1]
# Normalisation : same length
# model1.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
# model2.delete_temporary_training_data(replace_word_vectors_with_normalized=True)
i = 1
p = 0
dict_metric = dict()
mat1 = list()
mat2 = list()
for word in wordset:
# Aligning the learned embedding spaces using Procrustes transformation
mat1 += [model1[word]]
mat2 += [model2[word]]
i += 1
if (100 * i // len(wordset)) > p:
p = (100 * i // len(wordset))
print("Calculating drift (%d%%)" % (p), end="\r")
print()
mat1 = np.array(mat1)
mat2 = np.array(mat2)
# Using the distance of the words in the aligned space as a metric of shift
mtx1, mtx2, disparity = procrustes(mat1, mat2)
disparity /= dimensionality * len(wordset)
#similarity = np.sum([cosine(mtx1[row], mtx2[row]) for row in range(len(wordset))]) / len(wordset)
# Cosine similarity using cosine function from our library
similarity = np.sum([1 - cosine(mtx1[row], mtx2[row]) for row in range(len(wordset))]) / len(wordset)
# Cosine similarity using cos_sim function that I implemented
# similarity = np.sum([cos_sim(mtx1[row], mtx2[row]) for row in range(len(wordset))]) / len(wordset)
print("cosine SIMILARITY")
print(disparity, similarity)
def cos_sim(l1, l2):
"""Takes 2 vectors l1, l2 and returns the cosine similarity according
to the definition of the dot product
"""
dot_product = np.dot(l1, l2)
norm_l1 = np.linalg.norm(l1)
norm_l2 = np.linalg.norm(l2)
return dot_product / (norm_l1 * norm_l2)
i = 1
p = 0
for word in wordset:
union = set()
rows = dict()
for year in range(start_year, end_year + 1) :
similar = models[year].most_similar(positive = [word], topn = top_n)
union |= set([e[0] for e in similar])
rows[year] = dict(similar)
for year in rows :
for w in union :
if w not in rows[year] :
if w in models[year] :
rows[year][w] = models[year].similarity(word, w)
else :
rows[year][w] = 0
cols = numpy.array([[row[val] for val in sorted(row)] for row in list(rows.values())])
dict_metric[word] = numpy.sum([numpy.std(row) for row in numpy.rot90(cols)])
# write exhaustive data to csv
if csv :
try :
with open("%s%s-%s-%s+%sx%dt%d.csv" % (output_dir, word, start_year, end_year, window_len, dimensionality, top_n), "w") as output :
print(",%s" % (",".join(map(str, range(start_year, end_year + 1)))), file = output)
for word in union :
print(word, file = output, end = ",")
print(",".join(map(str, [rows[year][word] for year in range(start_year, end_year + 1)])), file = output)
print("", file = output)
output.close()
except :
print("Error: could not write file %s%s-%s-%s+%sx%dt%d.csv; skipping" % (output_dir, word, start_year, end_year, window_len, dimensionality, top_n), file = sys.stderr)
i += 1
if (100 * i // len(wordset)) > p :
p = (100 * i // len(wordset))
print("Calculating drift (%d%%)" % (p), end = "\r")
print()
# sort list
print("Sorting...", end = "\r")
drifters = sorted(dict_metric, key = dict_metric.get)
print("Sorted ")
# save sorted list
output = open(output_dir + "sorted-%s-%s+%sx%dt%d" % (start_year, end_year, window_len, dimensionality, top_n), "wb")
pickle.dump(drifters, output)
output.close()
# save metric dict
output = open(output_dir + "metric-%s-%s+%sx%dt%d" % (start_year, end_year, window_len, dimensionality, top_n), "wb")
pickle.dump(dict_metric, output)
output.close()
print()
print("Best:")
for word in drifters[-30:] :
print("\t%s\t%d" % (word, dict_metric[word]))
print (drifters) | StarcoderdataPython |
1765072 | description = "Hi!! its me <NAME>, made for fun and moderation!"
github = ""
| StarcoderdataPython |
1757617 | #from collections import namedtuple
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from PyTorchDisentanglement.models.base import BaseModel
import PyTorchDisentanglement.models.model_loader as ml
class Ensemble(BaseModel):
def setup_model(self):
self.models = []
for model_idx, params in enumerate(self.params.ensemble_params):
params.epoch_size = self.params.epoch_size
params.num_val_images = self.params.num_val_images
params.num_test_images = self.params.num_test_images
params.data_shape = self.params.data_shape
model = ml.load_model(params.model_type)
model.setup(params, self.logger)
model.to(params.device)
#model.print_update = self.print_update
self.models.append(model)
def forward(self, x):
for model in models:
x = model.get_encodings(x) # pre-classifier or pre-generator latent encodings
return x
def setup_optimizer(self):
for model in self.models:
model.optimizer = self.get_optimizer(
optimizer_params=model.params,
trainable_variables=model.parameters())
model.scheduler = torch.optim.lr_scheduler.MultiStepLR(
model.optimizer,
milestones=model.params.optimizer.milestones,
gamma=model.params.optimizer.lr_decay_rate)
def get_ensemble_losses(self, input_tuple):
x = input_tuple[0]
losses = []
for model in self.models:
x = model.get_encodings(x)
losses.append(model.get_total_loss((x, input_tuple[1])))
return losses
def get_total_loss(self, input_tuple):
total_loss = self.get_ensemble_losses(input_tuple)
return torch.stack(total_loss, dim=0).sum()
def generate_update_dict(self, input_data, input_labels=None, batch_step=0):
update_dict = super(Ensemble, self).generate_update_dict(input_data,
input_labels, batch_step)
x = input_data
for model in self.models:
model_update_dict = model.generate_update_dict(x, input_labels, batch_step)
for key, value in model_update_dict.items():
key = model.params.model_type+"_"+key
update_dict[key] = value
x = model.get_encodings(x)
return update_dict
| StarcoderdataPython |
3373356 | import torch
import torch.nn.functional as F
from linear_models import *
class Reshape(torch.nn.Module):
def __init__(self, *args):
super(Reshape, self).__init__()
self.shape = args
def forward(self, x):
return x.view(self.shape)
class NeuralNetworkRegression(LinearRegression):
def __init__(self, n_features, n_labels):
super().__init__(n_features, n_labels)
self.layers = torch.nn.Sequential(
torch.nn.Linear(n_features, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 20),
torch.nn.ReLU(),
torch.nn.Linear(20, 30),
torch.nn.ReLU(),
torch.nn.Linear(30, 40),
torch.nn.ReLU(),
torch.nn.Linear(40, 30),
torch.nn.ReLU(),
torch.nn.Linear(30, 20),
torch.nn.ReLU(),
torch.nn.Linear(20, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, n_labels)
)
class NeuralNetworkClassification(LogisticRegression):
def __init__(self, n_features, n_labels):
super().__init__(n_features, n_labels)
self.layers = torch.nn.Sequential(
torch.nn.Linear(n_features, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, 20),
torch.nn.ReLU(),
torch.nn.Linear(20, 30),
torch.nn.ReLU(),
torch.nn.Linear(30, 40),
torch.nn.ReLU(),
torch.nn.Linear(40, 30),
torch.nn.ReLU(),
torch.nn.Linear(30, 20),
torch.nn.ReLU(),
torch.nn.Linear(20, 10),
torch.nn.ReLU(),
torch.nn.Linear(10, n_labels),
torch.nn.Softmax(1)
)
class CNNClassifier(LogisticRegression):
def __init__(self):
super().__init__(1, 1)
self.layers = torch.nn.Sequential(
torch.nn.Conv2d(1, 10, kernel_size=5),
torch.nn.MaxPool2d(2),
torch.nn.ReLU(),
torch.nn.Conv2d(10, 20, kernel_size=5),
torch.nn.Dropout(),
torch.nn.MaxPool2d(2),
torch.nn.ReLU(),
Reshape(-1, 320),
torch.nn.Linear(320, 50),
torch.nn.ReLU(),
torch.nn.Dropout(),
torch.nn.Linear(50, 10),
torch.nn.LogSoftmax(1)
) | StarcoderdataPython |
105625 | <reponame>LIU2016/Demo<filename>language/python/DeepNudeImage/DeepNude_software_itself/color.py
def checkcolor():
return [255, 240, 255]
def newcolor(a, b):
return 255 | StarcoderdataPython |
3339293 | <reponame>BrynjarGeir/AdventOFCode2021
d = input('What is your name?')
print(d) | StarcoderdataPython |
3241121 | <reponame>Athenian-ComputerScience-Fall2020/tic-tac-toe-maleich
# Collaborators (including web sites where you got help: (enter none if you didn't need help)
#
# A note on style: Dictionaries can be defined before or after functions.
board = {'tl': ' ', 'tm': ' ', 'tr': ' ',
'ml': ' ', 'mm': ' ', 'mr': ' ',
'bl': ' ', 'bm': ' ', 'br': ' '}
def print_board(gb):
print(gb['tl'] + '|' + gb['tm'] + '|' + gb['tr'])
print('--' + '+' + '--' + '+' + '--')
print(gb['ml'] + '|' + gb['mm'] + '|' + gb['mr'])
print('--' + '+' + '--' + '+' + '--')
print(gb['bl'] + '|' + gb['bm'] + '|' + gb['br'])
def check_win(gb):
if gb['tl'] == gb['tm'] == gb['tr'] and gb['tl'] != ' ': # across top row
return True
elif gb['ml'] == gb['mm'] == gb['mr'] and gb['ml'] != ' ': # across middle row
return True
elif gb['bl'] == gb['bm'] == gb['br'] and gb['bl'] != ' ': # across bottom row
return True
elif gb['tl'] == gb['ml'] == gb['bl'] and gb['tl'] != ' ': # down left column
return True
elif gb['tm'] == gb['mm'] == gb['bm'] and gb['tm'] != ' ': # down middle column
return True
elif gb['tr'] == gb['mr'] == gb['br'] and gb['tr'] != ' ': # down right column
return True
elif gb['tl'] == gb['mm'] == gb['br'] and gb['tl'] != ' ': # upper left to lower right
return True
elif gb['tr'] == gb['mm'] == gb['bl'] and gb['tr'] != ' ': # lower left to upper right
return True
def player_turn():
print("hi")
print_board(board)
print("O goes next. Choose a space")
move = input()
board[move] = "O"
print_board(board)
| StarcoderdataPython |
112350 | #! /usr/bin/env python3
'''
Created on 02-Dec-2020
@author: anita-1372
'''
import argparse
import libvirt
import json
if __name__ == '__main__':
conn = None
data = {}
try:
parser = argparse.ArgumentParser()
parser.add_argument('--host', help='kvm host to connect', nargs='?', default='qemu:///system')
parser.add_argument('--plugin_version', help='plugin template version', type=int, nargs='?', default=1)
parser.add_argument('--heartbeat', help='alert if monitor does not send data', type=bool, nargs='?', default=True)
args = parser.parse_args()
conn = libvirt.openReadOnly(args.host)
except Exception as e:
data['status'] = 0
data['msg'] = str(e)
data['heartbeat_required'] = args.heartbeat
data['plugin_version'] = args.plugin_version
if conn is not None :
data['host'] = conn.getHostname()
'''
System host details
'''
cpu_model, mem_in_mb, active_cpus, cpu_freq_in_hz, numa_node_count, sockets_per_node, cores_per_socket, threads_per_core = conn.getInfo()
data['cpu_model'] = cpu_model
data['mem_in_mb'] = mem_in_mb
data['active_cpus'] = active_cpus
data['cpu_freq_in_hz'] = cpu_freq_in_hz
data['numa_node_count'] = numa_node_count
data['sockets_per_node'] = sockets_per_node
data['cores_per_socket'] = cores_per_socket
data['threads_per_core'] = threads_per_core
'''
System virtualization details
'''
data['virtualization_type'] = conn.getType()
data['version'] = conn.getVersion()
data['libvirt_version'] = conn.getLibVersion()
data['uri'] = conn.getURI()
data['conn_encrypted'] = True if conn.isEncrypted() else False
data['conn_secure'] = True if conn.isSecure() else False
'''
System memory details
'''
#memory_params = conn.getMemoryParameters()
#for param in memory_params:
# data[param] = memory_params[param]
mem_stats = conn.getMemoryStats(libvirt.VIR_NODE_MEMORY_STATS_ALL_CELLS)
for param in mem_stats:
data[param+'_memory'] = mem_stats[param]
#memlist = conn.getCellsFreeMemory(0, numa_node_count)
#cell = 0
#for cellfreemem in memlist:
# data['node_'+str(cell)+'_free_mem_in_kb'] = cellfreemem
# cell += 1
'''
System cpu utilization details
'''
stats = conn.getCPUStats(0)
data['kernel'] = stats['kernel']
data['iowait_time'] = stats['iowait']
data['user_time'] = stats['user']
data['idle_time'] = stats['idle']
#print(conn.getSysinfo())
'''
count details
'''
data['vms_count'] = len(conn.listAllDomains())
data['networks_count'] = len(conn.listAllNetworks())
data['storage_pool_count'] = len(conn.listAllStoragePools())
conn.close()
print(json.dumps(data, indent=4, sort_keys=True))
| StarcoderdataPython |
1780764 | from foreign import StataReader, genfromdta, savetxt
from table import SimpleTable, csv2st
from scikits.statsmodels import NoseWrapper as Tester
test = Tester().test
| StarcoderdataPython |
3200245 | """
Definition of ListNode
class ListNode(object):
def __init__(self, val, next=None):
self.val = val
self.next = next
"""
class Solution:
"""
@param head: The first node of linked list.
@param x: an integer
@return: a ListNode
"""
def partition(self, head, x):
# write your code here
# Solution 1
# arr = []
# tmp = head
# while (tmp):
# arr.append(tmp.val)
# tmp = tmp.next
# less = filter(lambda n: n < x, arr)
# rest = filter(lambda n: n >= x, arr)
# arr = less + rest
# arr = map(lambda n: ListNode(n), arr)
# for i in range(len(arr) - 1):
# arr[i].next = arr[i + 1]
# return arr[0] if len(arr) > 0 else None
# Solution 2
less = ListNode(0)
rest = ListNode(0)
i = head
lessIndex = less
restIndex = rest
while (i is not None):
if (i.val < x):
lessIndex.next = i
lessIndex = lessIndex.next
else:
restIndex.next = i
restIndex = restIndex.next
i = i.next
restIndex.next = None
lessIndex.next = rest.next
return less.next
| StarcoderdataPython |
3390734 | # BSD 3-Clause License
#
# Copyright (c) 2017,
# All rights reserved.
# Copyright 2020 Huawei Technologies Co., Ltd
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import torch
class MultiEpochsDataLoader(torch.utils.data.DataLoader):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._DataLoader__initialized = False
self.batch_sampler = _RepeatSampler(self.batch_sampler)
self._DataLoader__initialized = True
self.iterator = super().__iter__()
def __len__(self):
return len(self.batch_sampler.sampler)
def __iter__(self):
for _ in range(len(self)):
yield next(self.iterator)
class _RepeatSampler(object):
"""
Sampler that repeats forever.
Args:
sampler (Sampler)
"""
def __init__(self, sampler):
self.sampler = sampler
def __iter__(self):
while True:
yield from iter(self.sampler) | StarcoderdataPython |
1631382 | import simpleprocess
import externals
class Server(simpleprocess.SimpleProcess):
def __init__(self, cmdline, cwd = "."):
simpleprocess.SimpleProcess.__init__(self, cmdline, cwd)
self.weather = "unknown"
self.daytime = 0
self.type = "java"
def say(self, msg):
msg = msg.replace("@", "\ufe6b")
msg = msg.replace("\r", "")
# Split multiline messages into one say command per line
# What to add before each line
# prefix = "tell " + userinfo["username"] + " "
prefix = "say "
# What to add after each line
suffix = "\r\n"
# What to add between lines
midfix = suffix + prefix
externals.minecraft.send(prefix + midfix.join(msg.split("\n")) + suffix)
def set_weather(self, new_weather):
if self.weather != new_weather:
self.weather = new_weather
self.send("weather " + new_weather + "\r\n")
def summon(self, entity_type, position = None, options = None):
print("Summoning entity type \"" + entity_type + "\" at " + str(position))
if position is None:
self.send("summon " + entity_type + "\r\n")
else:
self.send("summon " + entity_type + " " + position + "\r\n")
def announcement(self, title, subtitle = None):
if subtitle is not None:
self.send("title @p subtitle {\"text\": \"" + subtitle + "\"}\r\n")
self.send("title @p title {\"text\": \"" + title + "\"}\r\n")
| StarcoderdataPython |
1762774 | <filename>examples/study.cases/openspiel/run-dvracer.py
#!/usr/bin/env python3
import os
import sys
sys.path.append('./_model')
from env import *
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--env', help='Specifies which environment to run.', required=True)
parser.add_argument(
'--engine',
help='NN backend to use',
default='OneDNN',
required=False)
parser.add_argument(
'--maxGenerations',
help='Maximum Number of generations to run',
default=1000,
required=False)
parser.add_argument(
'--optimizer',
help='Optimizer to use for NN parameter updates',
default='Adam',
required=False)
parser.add_argument(
'--learningRate',
help='Learning rate for the selected optimizer',
default=1e-3,
required=False)
parser.add_argument(
'--concurrentEnvironments',
help='Number of environments to run concurrently',
default=1,
required=False)
parser.add_argument(
'--testRewardThreshold',
help='Threshold for the testing MSE, under which the run will report an error',
default=150,
required=False)
args = parser.parse_args()
print("Running Leduc example with arguments:")
print(args)
####### Defining Korali Problem
import korali
k = korali.Engine()
e = korali.Experiment()
### Initializing Openspiel environment
initEnvironment(e, args.env)
### Defining the Openspiel game's configuration
e["Problem"]["Training Reward Threshold"] = 300
e["Problem"]["Policy Testing Episodes"] = 40
e["Problem"]["Actions Between Policy Updates"] = 5
### Defining Agent Configuration
e["Solver"]["Type"] = "Agent / Discrete / dVRACER"
e["Solver"]["Mode"] = "Training"
e["Solver"]["Episodes Per Generation"] = 10
e["Solver"]["Experiences Between Policy Updates"] = 1
e["Solver"]["Learning Rate"] = float(args.learningRate)
e["Solver"]["Mini Batch"]["Size"] = 256
e["Solver"]["Concurrent Environments"] = int(args.concurrentEnvironments)
### Defining Experience Replay configuration
e["Solver"]["Experience Replay"]["Start Size"] = 4096
e["Solver"]["Experience Replay"]["Maximum Size"] = 65536
### Setting Experience Replay and REFER settings
e["Solver"]["Experience Replay"]["Off Policy"]["Annealing Rate"] = 5.0e-8
e["Solver"]["Experience Replay"]["Off Policy"]["Cutoff Scale"] = 5.0
e["Solver"]["Experience Replay"]["Off Policy"]["REFER Beta"] = 0.3
e["Solver"]["Experience Replay"]["Off Policy"]["Target"] = 0.1
e["Solver"]["State Rescaling"]["Enabled"] = True
e["Solver"]["Reward"]["Rescaling"]["Enabled"] = True
### Configuring the neural network and its hidden layers
e["Solver"]["Neural Network"]["Engine"] = args.engine
e["Solver"]["Neural Network"]["Optimizer"] = args.optimizer
### Configuring the neural network and its hidden layers
e["Solver"]["Neural Network"]["Hidden Layers"][0]["Type"] = "Layer/Linear"
e["Solver"]["Neural Network"]["Hidden Layers"][0]["Output Channels"] = 128
e["Solver"]["Neural Network"]["Hidden Layers"][1]["Type"] = "Layer/Activation"
e["Solver"]["Neural Network"]["Hidden Layers"][1]["Function"] = "Elementwise/Tanh"
e["Solver"]["Neural Network"]["Hidden Layers"][2]["Type"] = "Layer/Linear"
e["Solver"]["Neural Network"]["Hidden Layers"][2]["Output Channels"] = 128
e["Solver"]["Neural Network"]["Hidden Layers"][3]["Type"] = "Layer/Activation"
e["Solver"]["Neural Network"]["Hidden Layers"][3]["Function"] = "Elementwise/Tanh"
### Defining Termination Criteria
e["Solver"]["Termination Criteria"]["Max Generations"] = args.maxGenerations
e["Solver"]["Termination Criteria"]["Testing"]["Target Average Reward"] = 250
### Setting file output configuration
e["File Output"]["Enabled"] = False
e["Console Output"]["Verbosity"] = "Detailed"
### Running Experiment
k.run(e)
### Now we run a few test samples and check their reward
e["Solver"]["Mode"] = "Testing"
e["Solver"]["Testing"]["Sample Ids"] = list(range(5))
k.run(e)
averageTestReward = np.average(e["Solver"]["Testing"]["Reward"])
print("Average Reward: " + str(averageTestReward))
if (averageTestReward < 100):
print("Openspiel example did not reach minimum testing average.")
exit(-1)
| StarcoderdataPython |
164710 | from typing import List
class JuneFifth:
"""
2020/06/12 15. 三数之和
给你一个包含 n 个整数的数组 nums,判断 nums 中是否存在三个元素 a,b,c ,使得 a + b + c = 0 ?
请你找出所有满足条件且不重复的三元组。
示例:
给定数组 nums = [-1, 0, 1, 2, -1, -4],
满足要求的三元组集合为:
[[-1, 0, 1],[-1, -1, 2]]
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/3sum
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
@staticmethod
def threeSum(nums: List[int]) -> List[List[int]]:
"""
1116ms/40.99%/16.4MB/9.64%
:param nums:
:return:
"""
res = []
if not nums or len(nums) < 3: # 如果数组为null或者长度小于3 返回[]
return res
nums.sort() # 对数组排序
for i in range(len(nums)):
if nums[i] > 0: # 因为已经排好序,如果nums[i] > 0,则后面不可能有三数之和等于零
return res
if i > 0 and nums[i] == nums[i - 1]: # 重复元素跳过,避免出现重复解
continue
left, right = i + 1, len(nums) - 1 # nums[i]为小数 nums[left]为中数 nums[right]为大数
while left < right:
if nums[i] + nums[left] + nums[right] == 0: # 如果三个数相加等于零
res.append([nums[i], nums[left], nums[right]])
while left < right and nums[left] == nums[left + 1]: # 跳过左边的重复元素
left += 1
while left < right and nums[right] == nums[right - 1]: # 跳过右边的重复元素
right -= 1
left += 1
right -= 1
elif nums[i] + nums[left] + nums[right] > 0: # 如果结果和大于零 减小大数
right -= 1
else: # 如果结果和小于零 增大中数
left += 1
return res
class JuneNinth:
"""
2020/06/19 125. 验证回文串
给定一个字符串,验证它是否是回文串,只考虑字母和数字字符,可以忽略字母的大小写。
说明:本题中,我们将空字符串定义为有效的回文串。
示例 1:
输入: "A man, a plan, a canal: Panama"
输出: true
示例 2:
输入: "race a car"
输出: false
来源:力扣(LeetCode)
链接:https://leetcode-cn.com/problems/valid-palindrome
著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。
"""
@staticmethod
def isPalindrome(s: str) -> bool:
"""
51ms/81.52%/13.7MB/55.56%
双指针法,不是字母或数字就跳过
:param s:
:return:
"""
s, left, right = s.lower(), 0, len(s) - 1
while left < right:
while left < right and not s[left].isalnum():
left += 1
while right > left and not s[right].isalnum():
right -= 1
if s[left] != s[right]:
return False
left, right = left + 1, right - 1
return True
@staticmethod
def isPalindrome(s: str) -> bool:
"""
52ms/81.52%/14.4MB/37.04%
去除所有字母和数字以外的的字符
判断原字符串是否和翻转后的字符串相同
:param s:
:return:
"""
s = ''.join(filter(str.isalnum, s.lower()))
return s == s[::-1]
if __name__ == '__main__':
print(JuneNinth.isPalindrome("OP"))
| StarcoderdataPython |
1747557 | import cv2
from tracker import KCFTracker
def tracker(cam, frame, bbox):
tracker = KCFTracker(True, True, True) # (hog, fixed_Window, multi_scale)
tracker.init(bbox, frame)
while True:
ok, frame = cam.read()
timer = cv2.getTickCount()
bbox = tracker.update(frame)
bbox = list(map(int, bbox))
fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
# Tracking success
p1 = (int(bbox[0]), int(bbox[1]))
p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
# Put FPS
cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
cv2.imshow("Tracking", frame)
# Exit if ESC pressed
k = cv2.waitKey(1) & 0xff
if k == 27:
break
cam.release()
cv2.destroyAllWindows()
if __name__ == '__main__':
video = cv2.VideoCapture(0)
# ok, frame = video.read()
ok, frame = video.read()
bbox = cv2.selectROI('Select ROI', frame, False)
if min(bbox) == 0: exit(0)
tracker(video, frame, bbox)
| StarcoderdataPython |
3280326 | <filename>venv/lib/python3.8/site-packages/vsts/task_agent/v4_0/models/publish_task_group_metadata.py
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# Generated file, DO NOT EDIT
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------------------------
from msrest.serialization import Model
class PublishTaskGroupMetadata(Model):
"""PublishTaskGroupMetadata.
:param comment:
:type comment: str
:param parent_definition_revision:
:type parent_definition_revision: int
:param preview:
:type preview: bool
:param task_group_id:
:type task_group_id: str
:param task_group_revision:
:type task_group_revision: int
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'parent_definition_revision': {'key': 'parentDefinitionRevision', 'type': 'int'},
'preview': {'key': 'preview', 'type': 'bool'},
'task_group_id': {'key': 'taskGroupId', 'type': 'str'},
'task_group_revision': {'key': 'taskGroupRevision', 'type': 'int'}
}
def __init__(self, comment=None, parent_definition_revision=None, preview=None, task_group_id=None, task_group_revision=None):
super(PublishTaskGroupMetadata, self).__init__()
self.comment = comment
self.parent_definition_revision = parent_definition_revision
self.preview = preview
self.task_group_id = task_group_id
self.task_group_revision = task_group_revision
| StarcoderdataPython |
189762 | <reponame>e-m-albright/CS682
"""
Explore performance of traditional 2d convolutional networks on a flattened view of the brain scans
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision.models import resnet
from src.data.ml import Dataset
from src.utils import hyper, train, reload, plot
def optimizer(model, learning_rate: float = 1e-2):
return optim.SGD(
model.parameters(),
lr=learning_rate,
momentum=0.9,
nesterov=True,
)
def criterion():
return nn.CrossEntropyLoss()
def model(*args, **kwargs):
model = SingleChannelResNet(
resnet.BasicBlock, # block
[2, 2, 2, 2], # layers
num_classes=2,
zero_init_residual=False,
groups=1,
width_per_group=64,
replace_stride_with_dilation=None,
norm_layer=None
)
# Unfreeze / allow training (might not actually be frozen to start with though)
for params in model.parameters():
params.requires_grad = True
return model
# -----------------------------------------
# Sourced from torchvision.models.resnet
#
# Code pulled in for convenience of customization
# especially in altering the number of channels
# -----------------------------------------
class SingleChannelResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000, zero_init_residual=False,
groups=1, width_per_group=64, replace_stride_with_dilation=None,
norm_layer=None):
super(SingleChannelResNet, self).__init__()
if norm_layer is None:
norm_layer = nn.BatchNorm2d
self._norm_layer = norm_layer
self.inplanes = 64
self.dilation = 1
if replace_stride_with_dilation is None:
# each element in the tuple indicates if we should replace
# the 2x2 stride with a dilated convolution instead
replace_stride_with_dilation = [False, False, False]
if len(replace_stride_with_dilation) != 3:
raise ValueError("replace_stride_with_dilation should be None "
"or a 3-element tuple, got {}".format(replace_stride_with_dilation))
self.groups = groups
self.base_width = width_per_group
self.conv1 = nn.Conv2d(1, self.inplanes, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = norm_layer(self.inplanes)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2,
dilate=replace_stride_with_dilation[0])
self.layer3 = self._make_layer(block, 256, layers[2], stride=2,
dilate=replace_stride_with_dilation[1])
self.layer4 = self._make_layer(block, 512, layers[3], stride=2,
dilate=replace_stride_with_dilation[2])
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
nn.init.constant_(m.weight, 1)
nn.init.constant_(m.bias, 0)
# Zero-initialize the last BN in each residual branch,
# so that the residual branch starts with zeros, and each residual block behaves like an identity.
# This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677
if zero_init_residual:
for m in self.modules():
if isinstance(m, resnet.Bottleneck):
nn.init.constant_(m.bn3.weight, 0)
elif isinstance(m, resnet.BasicBlock):
nn.init.constant_(m.bn2.weight, 0)
def _make_layer(self, block, planes, blocks, stride=1, dilate=False):
norm_layer = self._norm_layer
downsample = None
previous_dilation = self.dilation
if dilate:
self.dilation *= stride
stride = 1
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
resnet.conv1x1(self.inplanes, planes * block.expansion, stride),
norm_layer(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample, self.groups,
self.base_width, previous_dilation, norm_layer))
self.inplanes = planes * block.expansion
for _ in range(1, blocks):
layers.append(block(self.inplanes, planes, groups=self.groups,
base_width=self.base_width, dilation=self.dilation,
norm_layer=norm_layer))
return nn.Sequential(*layers)
def _forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = torch.flatten(x, 1)
x = self.fc(x)
return x
# Allow for accessing forward method in a inherited class
forward = _forward
def run(iargs):
dimensions = '2d'
learning_rate = 1e-3 if iargs.learning_rate is None else iargs.learning_rate
# name doesn't fully convey but helpful enough to recall most details
name = "conv_{}_s{}_lr{}_e{}".format(
dimensions,
iargs.subjects,
learning_rate,
iargs.epochs,
)
dataset = Dataset(dimensions=dimensions, limit=iargs.subjects)
conv2d_model = model()
if iargs.load:
reload.load(conv2d_model, iargs.load)
losses, accuracies = train.train(
conv2d_model,
optimizer(conv2d_model, learning_rate=learning_rate),
criterion(),
dataset,
epochs=iargs.epochs,
print_frequency=iargs.print_freq,
)
if iargs.plot:
l_train, _, _ = dataset.get_loaders()
num_batches = len(l_train)
plot.plot_loss(losses, num_batches, name)
plot.plot_accuracies(accuracies, name)
if iargs.save:
reload.save(conv2d_model, name)
return conv2d_model
| StarcoderdataPython |
102724 | <reponame>megvii-model/RLNAS
import os
class config:
# Basic configration
layers = 14
edges = 14
model_input_size_imagenet = (1, 3, 224, 224)
# Candidate operators
blocks_keys = [
'none',
'max_pool_3x3',
'avg_pool_3x3',
'skip_connect',
'sep_conv_3x3',
'sep_conv_5x5',
'dil_conv_3x3',
'dil_conv_5x5'
]
op_num=len(blocks_keys)
# Operators encoding
NONE = 0
MAX_POOLING_3x3 = 1
AVG_POOL_3x3 = 2
SKIP_CONNECT = 3
SEP_CONV_3x3 = 4
SEP_CONV_5x5 = 5
DIL_CONV_3x3 = 6
DIL_CONV_5x5 = 7
| StarcoderdataPython |
1600303 | # -*- coding: utf-8 -*-
import os.path
import json
import logging
import time
from google.protobuf.message import DecodeError
from requests.exceptions import ConnectionError
from googleplay_api import googleplay
import config
# TODO
# Handle ip, account ban
class PackageError(Exception):
def __init__(self, value):
super(Exception, self).__init__()
self.value = value
def __str__(self):
return repr(self.value)
class VersionCodeAPI(googleplay.GooglePlayAPI):
MAX_VC = 40347
INTERVAL_SECS = 2
MAX_ERR_COUNT = 5
def __init__(self, androidId, lang, log_path):
super(VersionCodeAPI, self).__init__(androidId, lang)
self.logger = create_logger(log_path)
def purchase(self, pkg_name, vc, ot=1):
"""purchase
Fetch download url and download cookie for an app (pkg_name).
:param pkg_name: Package name of the app
:param vc: versionCode
:param ot: offerType
"""
self.logger.debug("purchase(pkg_name={}, vc={}, ot={}".format(pkg_name, vc, ot))
path = "purchase"
data = "ot=%d&doc=%s&vc=%d" % (ot, pkg_name, vc)
message = self.executeRequestApi2(path, data)
return message.payload.buyResponse
def __check_vc_exists(self, pkg_name, vc):
decode_err_count = 0
while True:
try:
buy_res = self.purchase(pkg_name, vc)
except DecodeError as e:
decode_err_count += 1
if decode_err_count > VersionCodeAPI.MAX_ERR_COUNT:
self.logger.exception("DecodeError exceeds max error count")
raise e
else:
break
vc_exists = len(buy_res.SerializeToString()) != 0
self.logger.debug("check {}:{} exists, {}".format(pkg_name, vc, vc_exists))
return vc_exists
def __fetch_latest_vc(self, pkg_name):
"""__fetch_latest_vc
Fetch the versionCode of the latest version of an app (pkg_name)
:param pkg_name:
"""
m = self.details(pkg_name)
doc = m.docV2
latest_vc = doc.details.appDetails.versionCode
return latest_vc
def fetch_existing_vcs(self, pkg_name):
self.logger.info("fetch existing vcs, {}".format(pkg_name))
latest_vc = self.__fetch_latest_vc(pkg_name)
pkg_name_exists = latest_vc != 0
if not pkg_name_exists:
msg = "{} does not exist".format(pkg_name)
self.logger.info(msg)
raise PackageError(msg)
if latest_vc > VersionCodeAPI.MAX_VC:
msg = "{} exceeds max versionCode".format(latest_vc)
self.logger.info(msg)
raise PackageError(msg)
# Test all the possible versionCodes to check if they exist or not
existing_vcs = list()
for vc in xrange(latest_vc, 0, -1):
vc_exists = self.__check_vc_exists(pkg_name, vc)
time.sleep(VersionCodeAPI.INTERVAL_SECS)
if vc_exists:
existing_vcs.append(vc)
return existing_vcs
def dump_existing_vcs(api, pkg_name):
# Give up if an error occors
try:
vcs = api.fetch_existing_vcs(pkg_name)
except (ConnectionError, PackageError, DecodeError):
return
filename = config.VC_FILENAME_FORMAT.format(pkg_name)
with open(filename, "w") as f:
f.write(json.dumps(vcs, indent=4))
def create_logger(log_path):
logger = logging.getLogger(__name__)
logger.setLevel(config.LOGGER_LEVEL)
f_handler = logging.FileHandler(log_path)
f_handler.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s - %(message)s")
f_handler.setFormatter(formatter)
logger.addHandler(f_handler)
s_handler = logging.StreamHandler()
s_handler.setLevel(logging.DEBUG)
logger.addHandler(s_handler)
return logger
def load_pkg_names_from_json(filename):
with open(filename, "r") as f:
return json.load(f)
def main():
api = VersionCodeAPI(config.ANDROID_DEVICE_ID, config.LANG, config.LOG_PATH)
api.login(config.GOOGLE_LOGIN, config.GOOGLE_PASSWORD)
pkg_names = load_pkg_names_from_json(config.PKG_FILEPATH)
for pkg_name in pkg_names:
dump_existing_vcs(api, pkg_name)
if __name__ == "__main__":
main()
| StarcoderdataPython |
3282496 | <reponame>Floplosion05/MerossIot
from meross_iot.utilities.lock import lock_factory
class AtomicCounter(object):
def __init__(self, initialValue):
self._lock = lock_factory.build_rlock()
self._val = initialValue
def dec(self):
with self._lock:
self._val -= 1
return self._val
def inc(self):
with self._lock:
self._val += 1
return self._val
def get(self):
with self._lock:
return self._val | StarcoderdataPython |
3217838 | <filename>functions/scheduler/call.py
# Copyright 2018 U.C. Berkeley RISE Lab
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import random
import uuid
import time
import zmq
from include.functions_pb2 import *
from include.serializer import *
from include import server_utils as sutils
from include.shared import *
from . import utils
sys_random = random.SystemRandom()
def call_function(func_call_socket, pusher_cache, executors, key_ip_map,
executor_status_map, running_counts, backoff):
call = FunctionCall()
call.ParseFromString(func_call_socket.recv())
if not call.HasField('resp_id'):
call.resp_id = str(uuid.uuid4())
refs = list(filter(lambda arg: type(arg) == FluentReference,
map(lambda arg: get_serializer(arg.type).load(arg.body),
call.args)))
ip, tid = _pick_node(executors, key_ip_map, refs, running_counts, backoff)
sckt = pusher_cache.get(utils._get_exec_address(ip, tid))
sckt.send(call.SerializeToString())
executors.discard((ip, tid))
executor_status_map[(ip, tid)] = time.time()
r = GenericResponse()
r.success = True
r.response_id = call.resp_id
func_call_socket.send(r.SerializeToString())
def call_dag(call, pusher_cache, dags, func_locations, key_ip_map,
running_counts, backoff):
dag, sources = dags[call.name]
schedule = DagSchedule()
schedule.id = str(uuid.uuid4())
schedule.dag.CopyFrom(dag)
schedule.consistency = NORMAL
if call.HasField('response_address'):
schedule.response_address = call.response_address
for fname in dag.functions:
locations = func_locations[fname]
args = call.function_args[fname].args
refs = list(filter(lambda arg: type(arg) == FluentReference,
map(lambda arg: get_serializer(arg.type).load(arg.body),
args)))
loc = _pick_node(locations, key_ip_map, refs, running_counts, backoff)
schedule.locations[fname] = loc[0] + ':' + str(loc[1])
# copy over arguments into the dag schedule
arg_list = schedule.arguments[fname]
arg_list.args.extend(args)
for func in schedule.locations:
loc = schedule.locations[func].split(':')
ip = utils._get_queue_address(loc[0], loc[1])
schedule.target_function = func
triggers = sutils._get_dag_predecessors(dag, func)
if len(triggers) == 0:
triggers.append('BEGIN')
schedule.ClearField('triggers')
schedule.triggers.extend(triggers)
sckt = pusher_cache.get(ip)
sckt.send(schedule.SerializeToString())
for source in sources:
trigger = DagTrigger()
trigger.id = schedule.id
trigger.source = 'BEGIN'
trigger.target_function = source
ip = sutils._get_dag_trigger_address(schedule.locations[source])
sckt = pusher_cache.get(ip)
sckt.send(trigger.SerializeToString())
return schedule.id
def _pick_node(valid_executors, key_ip_map, refs, running_counts, backoff):
# Construct a map which maps from IP addresses to the number of
# relevant arguments they have cached. For the time begin, we will
# just pick the machine that has the most number of keys cached.
arg_map = {}
reason = ''
executors = set(valid_executors)
for executor in backoff:
if len(executors) > 1:
executors.discard(executor)
keys = list(running_counts.keys())
sys_random.shuffle(keys)
for key in keys:
if len(running_counts[key]) > 1000 and len(executors) > 1:
executors.discard(key)
executors = set(valid_executors)
for executor in backoff:
if len(executors) > 1:
executors.discard(executor)
keys = list(running_counts.keys())
sys_random.shuffle(keys)
for key in keys:
if len(running_counts[key]) > 1000 and len(executors) > 1:
executors.discard(key)
executor_ips = [e[0] for e in executors]
for ref in refs:
if ref.key in key_ip_map:
ips = key_ip_map[ref.key]
for ip in ips:
# only choose this cached node if its a valid executor for our
# purposes
if ip in executor_ips:
if ip not in arg_map:
arg_map[ip] = 0
arg_map[ip] += 1
max_ip = None
max_count = 0
for ip in arg_map.keys():
if arg_map[ip] > max_count:
max_count = arg_map[ip]
max_ip = ip
# pick a random thead from our potential executors that is on that IP
# address; we also route some requests to a random valid node
if max_ip:
candidates = list(filter(lambda e: e[0] == max_ip, executors))
max_ip = sys_random.choice(candidates)
# This only happens if max_ip is never set, and that means that
# there were no machines with any of the keys cached. In this case,
# we pick a random IP that was in the set of IPs that was running
# most recently.
if not max_ip or sys_random.random() < 0.20:
max_ip = sys_random.sample(executors, 1)[0]
if max_ip not in running_counts:
running_counts[max_ip] = set()
running_counts[max_ip].add(time.time())
return max_ip
| StarcoderdataPython |
3376879 | __all__ = [
"BYTE_BITSIZE"
, "OPERAND_MAX_BITSIZE"
, "SUPPORTED_READ_BITSIZES"
]
# Note, for code readability only
BYTE_BITSIZE = 8
OPERAND_MAX_BITSIZE = 64
# Note, descending order is needed to correctly calculate the size of readings
SUPPORTED_READ_BITSIZES = (64, 32, 16, 8)
| StarcoderdataPython |
3209287 | #!Measurement
'''
baseline:
after: true
before: false
counts: 120
detector: H1
mass: 34.2
settling_time: 15
default_fits: nominal
multicollect:
counts: 400
detector: H1
isotope: Ar40
peakcenter:
after: true
before: false
detector: H1
isotope: Ar40
detectors:
- H1
- AX
- CDD
equilibration:
inlet: R
outlet: O
inlet_delay: 3
eqtime: 20
use_extraction_eqtime: True
whiff:
eqtime: 4
counts: 1
abbreviated_count_ratio: 0.25
conditionals:
- action: run_remainder
teststr: Ar40.cur<=100
attr: Ar40
- action: pump
teststr: Ar40.cur>100
attr: Ar40
'''
ACTIVE_DETECTORS=('H2','H1','AX','L1','L2', 'CDD')
#FITS=('Ar41:linear','Ar40:linear', 'Ar39:parabolic','Ar38:parabolic','Ar37:parabolic','Ar36:parabolic')
def main():
#simulate CO2 analysis
#open('T')
#sleep(5)
#close('L')
#display information with info(msg)
info('unknown measurement script')
if mx.peakcenter.before:
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
#open a plot panel for this detectors
activate_detectors(*ACTIVE_DETECTORS)
if mx.baseline.before:
baselines(ncounts=mx.baseline.counts,mass=mx.baseline.mass, detector=mx.baseline.detector)
#position mass spectrometer
position_magnet(mx.multicollect.isotope, detector=mx.multicollect.detector)
#gas is staged behind inlet
#make a pipette volume
close('S')
sleep(1)
meqtime = mx.whiff.eqtime
equil(meqtime, False)
result = whiff(ncounts=mx.whiff.counts, conditionals=mx.whiff.conditionals)
info('Whiff result={}'.format(result))
wab=1.0
if result=='run_remainder':
open('R')
open('S')
sleep(eqtime-meqtime)
close('R')
post_equilibration()
elif result=='pump':
reset_measurement(ACTIVE_DETECTORS)
activate_detectors(*ACTIVE_DETECTORS)
#pump out spectrometer and sniff volume
open('R')
open(mx.equilibration.outlet)
sleep(15)
#close(mx.equilibration.outlet)
close('R')
sleep(1)
open('S')
sleep(2)
close('T')
sleep(2)
close(mx.equilibration.outlet)
equil(eqtime)
multicollect(ncounts=mx.multicollect.counts*wab, integration_time=1)
if mx.baseline.after:
baselines(ncounts=mx.baseline.counts*wab, mass=mx.baseline.mass, detector=mx.baseline.detector,
settling_time=mx.baseline.settling_time)
if mx.peakcenter.after:
activate_detectors(*mx.peakcenter.detectors, **{'peak_center':True})
peak_center(detector=mx.peakcenter.detector,isotope=mx.peakcenter.isotope)
info('finished measure script')
def equil(eqt, do_post=True, set_tzero=True):
#post equilibration script triggered after eqtime elapsed
#equilibrate is non blocking
#so use either a sniff of sleep as a placeholder until eq finished
equilibrate(eqtime=eqt, do_post_equilibration=do_post,
inlet=mx.equilibration.inlet, outlet=mx.equilibration.outlet)
if set_tzero:
#equilibrate returns immediately after the inlet opens
set_time_zero(0)
sniff(eqt)
#set default regression
set_fits()
set_baseline_fits()
#========================EOF==============================================================
| StarcoderdataPython |
3365671 | <reponame>flexiooss/hotballoon-shed<gh_stars>0
import shutil
from pathlib import Path
from cmd.Directories import Directories
from cmd.Tasks.Task import Task
from cmd.Tasks.Tasks import Tasks
from cmd.package.modules.Module import Module
from cmd.package.modules.ModulesHandler import ModulesHandler
class CleanSources(Task):
NAME = Tasks.CLEAN_SOURCES
def __modules_clean(self):
if self.package.config().has_modules():
modules: ModulesHandler = ModulesHandler(self.package)
module: Module
for module in modules.modules:
CleanSources(self.options, module.package, module.package.cwd).process()
def process(self):
print('CLEAN SOURCES: ' + self.package.name())
if Path(self.cwd.as_posix() + ('/' + Directories.GENERATED)).is_dir():
shutil.rmtree(Path(self.cwd.as_posix() + ('/' + Directories.GENERATED)).as_posix())
print('**** CLEAN : generated')
if self.options.module_only is not True:
self.__modules_clean()
| StarcoderdataPython |
1745452 | from django.contrib import admin
from .models import VoiceCall
class VoiceCallAdmin(admin.ModelAdmin):
list_display = ['id', 'shortcode', 'created_at', 'msisdn', 'duration',
'reason']
admin.site.register(VoiceCall, VoiceCallAdmin)
| StarcoderdataPython |
3306079 | import uuid
import requests
import requests_mock
import simplejson as json
from chaoscloud.api import client_session
from chaoscloud.api import urls
from chaoscloud.api.execution import initialize_execution, publish_execution, \
fetch_execution, publish_event
ENDPOINT = "https://console.chaosiq.io"
def test_execution_not_created_when_experiment_is_invalid_type(
organizations, default_org_id,
default_team_id):
experiment_id = str(uuid.uuid4())
# the remote endpoint cannot deal with anything but a experiment
experiment = {
"extensions": [
{
"name": "chaosiq",
"experiment_id": experiment_id
}
]
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, default_team_id,
experiment_id, with_executions=True)
m.post(
url, status_code=422, json=[
{
"loc": ["a_dict"],
"msg": "value is not a valid dict",
"type": "type_error.dict"
}
],
headers={
"content-type": "application/json"
}
)
with client_session(ENDPOINT, organizations) as s:
r = initialize_execution(s, experiment, {})
assert r.status_code == 422
def test_create_execution(organizations, default_org_id, default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
experiment = {
"title": "Hello there",
"extensions": [
{
"name": "chaosiq",
"experiment_id": experiment_id
}
]
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, default_team_id,
experiment_id, with_executions=True)
m.post(
url, status_code=201,
json={
"id": x_id,
},
headers={
"content-type": "application/json",
"content-location": "{}/{}".format(url, x_id)
}
)
with client_session(ENDPOINT, organizations) as s:
r = initialize_execution(s, experiment, {})
assert r.status_code == 201
# we injected the execution_id
assert experiment["extensions"][0]["execution_id"] == x_id
def test_cannot_create_execution_on_requests_connection_timeout(
organizations, default_org_id,
default_team_id):
experiment_id = str(uuid.uuid4())
experiment = {
"title": "Hello there",
"extensions": [
{
"name": "chaosiq",
"experiment_id": experiment_id
}
]
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, default_team_id,
experiment_id, with_executions=True)
m.post(
url,
exc=requests.exceptions.ConnectTimeout
)
with client_session(ENDPOINT, organizations) as s:
r = initialize_execution(s, experiment, {})
assert r is None
def test_cannot_create_execution_from_unknown_experiment_id(
organizations, default_org_id,
default_team_id):
experiment_id = str(uuid.uuid4())
experiment = {
"title": "Hello there",
"extensions": [
{
"name": "chaosiq",
"experiment_id": experiment_id
}
]
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, default_team_id,
experiment_id, with_executions=True)
m.post(
url, status_code=422,
json=[],
headers={
"content-type": "application/json"
}
)
with client_session(ENDPOINT, organizations) as s:
r = initialize_execution(s, experiment, {})
assert r.status_code == 422
assert "execution_id" not in experiment["extensions"][0]
def test_cannot_update_execution_with_invalid_execution_id(organizations,
default_org_id,
default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
journal = {
"experiment": {
"extensions": [
{
"name": "chaosiq",
"execution_id": x_id,
"experiment_id": experiment_id
}
]
}
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, default_team_id,
experiment_id, execution_id=x_id)
m.put(
url, status_code=404,
headers={
"content-type": "text/plain"
}
)
with client_session(ENDPOINT, organizations) as s:
r = publish_execution(s, journal)
assert r.status_code == 404
def test_update_execution(organizations, default_org_id, default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
journal = {
"experiment": {
"extensions": [
{
"name": "chaosiq",
"execution_id": x_id,
"experiment_id": experiment_id
}
]
}
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, default_team_id,
experiment_id, x_id)
m.put(url, status_code=204)
with client_session(ENDPOINT, organizations) as s:
r = publish_execution(s, journal)
assert r.status_code == 204
def test_cannot_update_execution_on_request_connection_timeout(
organizations, default_org_id, default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
journal = {
"experiment": {
"extensions": [
{
"name": "chaosiq",
"execution_id": x_id,
"experiment_id": experiment_id
}
]
}
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, experiment_id,
default_team_id, x_id)
m.put(url, exc=requests.exceptions.ConnectTimeout)
with client_session(ENDPOINT, organizations) as s:
r = publish_execution(s, journal)
assert r is None
def test_fetch_execution(organizations, default_org_id, default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
journal = {
"experiment": {
"extensions": [
{
"name": "chaosiq",
"execution_id": x_id,
"experiment_id": experiment_id
}
]
}
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, default_team_id,
experiment_id, x_id)
m.get(url, json=journal)
with client_session(ENDPOINT, organizations) as s:
r = fetch_execution(s, journal)
assert r.status_code == 200
def test_cannot_fetch_execution_on_request_connection_timeout(organizations,
default_org_id,
default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
journal = {
"experiment": {
"extensions": [
{
"name": "chaosiq",
"execution_id": x_id,
"experiment_id": experiment_id
}
]
}
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, experiment_id,
default_team_id, x_id)
m.get(url, exc=requests.exceptions.ConnectTimeout)
with client_session(ENDPOINT, organizations) as s:
r = fetch_execution(s, journal)
assert r is None
def test_cannot_fetch_execution_non_published_experiment(organizations,
default_org_id,
default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
journal = {
"experiment": {}
}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, experiment_id,
default_team_id, x_id)
m.get(url, exc=requests.exceptions.ConnectTimeout)
with client_session(ENDPOINT, organizations) as s:
r = fetch_execution(s, journal)
assert r is None
assert m.call_count == 0
def test_publish_event(organizations, default_org_id, default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
extensions = [
{
"name": "chaosiq",
"execution_id": x_id,
"experiment_id": experiment_id
}
]
activity = {}
run = {}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, default_team_id,
experiment_id, x_id, with_events=True)
m.post(url, status_code=201)
with client_session(ENDPOINT, organizations) as s:
publish_event(
s, "start-experiment", activity, None, None, extensions, None,
run)
r = json.loads(m.last_request.body)
assert r["specversion"] in ["0.3", "1.0"]
assert r["datacontenttype"] == "application/json"
assert r["type"] == "start-experiment"
assert r["source"] == "chaosiq-cloud"
assert "id" in r
assert "time" in r
assert "data" in r
def test_cannot_publish_event_non_published_execution(organizations,
default_org_id,
default_team_id):
experiment_id = str(uuid.uuid4())
x_id = str(uuid.uuid4())
extensions = []
activity = {}
run = {}
with requests_mock.mock() as m:
url = urls.full(
urls.base(ENDPOINT), default_org_id, experiment_id,
default_team_id, x_id, with_events=True)
m.post(url, status_code=201)
with client_session(ENDPOINT, organizations) as s:
publish_event(
s, "start-experiment", activity, None, None, extensions, None,
run)
assert m.call_count == 0
def test_initialize_execution_requires_experiment_id():
assert initialize_execution(None, {}, {}) is None
| StarcoderdataPython |
4817304 | import pickle
import torch
import torch.nn as nn
def load_vocab(path):
with open(path, 'rb') as inFile:
return pickle.load(inFile)
def save_vocab(vocab, path):
with open(path, 'wb') as output:
pickle.dump(vocab, output)
def count_parameters(model: nn.Module):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def init_weights(model: nn.Module):
if hasattr(model, 'weight') and model.weight.dim() > 1:
nn.init.xavier_uniform_(model.weight.data)
def get_device() -> str:
return torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def epoch_time(start_time, end_time):
elapsed_time = end_time - start_time
elapsed_mins = int(elapsed_time / 60)
elapsed_secs = int(elapsed_time - (elapsed_mins * 60))
return elapsed_mins, elapsed_secs
def get_lr(optimizer: torch.optim):
for param_group in optimizer.param_groups:
return param_group['lr'] | StarcoderdataPython |
158895 | # Lists
courses=['History','Math','Physics','Compsci']
courses_2=['Football','Basketball']
print("Slicing Examples")
print(len(courses))
print(courses)
print(courses[-1])
print(courses[0:3])
print(courses[::-1])
print("\nAdd")
courses.append('Art')
print(courses)
print("\nInsert at the beginning")
courses.insert(0,'Science')
print(courses)
#Not what we want
#courses.insert(0,courses_2)
#print(courses)
#print(courses[0])
print("\nAppend we use to add individual items , Extend we use to add another list in the form of individual items")
courses.extend(courses_2)
print(courses)
print("\nremove")
courses.remove('Math')
print(courses)
print("\nTo remove the last value")
popped=courses.pop()
print(courses)
print (f"popped value is {popped}")
print("Reverse")
courses.reverse()
print(courses)
print("\nSort")
courses.sort()
print(courses)
num=[4,21,54,1,34]
num.sort()
print(num)
print("\nSort in descending Order")
courses.sort(reverse=True)
num.sort(reverse=True)
print(courses)
print(num)
print("\nSorting the list without altering the original list using the function sorted other than the sort method")
Sorted_courses=sorted(courses)
print(courses)
print(Sorted_courses)
print("\nFinding Min and Max Values")
print(min(num))
print(max(num))
print(sum(num))
print("\nFinding the Index\n")
print(courses.index('Compsci'))
print('Print("Art in Courses")')
print('Art' in courses)
print("\nUsing for Loop")
for course in courses:
print(course)
# TO get the index also
print("\nGetting Index value also using for loop")
for index,course in enumerate(courses):
print(index,course)
print("\nTO start with 1 as the index")
print("\n")
for index,course in enumerate(courses,start=1):
print(index,course)
print("\nturning the list into a string separated by some character")
course_str=' - '.join(courses)
print(course_str)
print("\nConverting the string back to List")
new_list=course_str.split(' - ')
print(new_list)
| StarcoderdataPython |
53741 | <filename>tilse/util/sentence_segmentation.py<gh_stars>0
import syntok.segmenter as segmenter
def sentence_segmenter(document):
sentences = []
for paragraph in segmenter.process(document):
for sentence in paragraph:
s_sentence = ""
for token in sentence:
# roughly reproduce the input,
# except for hyphenated word-breaks
# and replacing "n't" contractions with "not",
# separating tokens by single spaces
# print(token.value, end=' ')
s_sentence += token.value + " "
# print() # print one sentence per line
sentences.append(s_sentence)
# print() # separate paragraphs with newlines
return "\n".join(sentences) | StarcoderdataPython |
3308511 | <reponame>fga-gpp-mds/2018.1-Cris-Down
from django.db import models
from django.utils.translation import ugettext_lazy as _
from django.contrib.auth.models import Group, Permission, ContentType
from django.db.models import Q
from django.core.exceptions import ValidationError
from django.db.models.signals import post_delete
from django.dispatch import receiver
from django.contrib.auth.models import Group
from ..utils.validators import validate_cpf
from .model_user import User, BaseUserDelete
from .model_patient import Patient
from .model_responsible import Responsible
class Employee(BaseUserDelete, models.Model):
user = models.OneToOneField(
User,
on_delete=models.CASCADE,
limit_choices_to=Q(has_specialization=False),
verbose_name=_('User')
)
cpf = models.CharField(
help_text=_("Please, enter a valid CPF" +
" in the following format: XXX.XXX.XXX-XX"),
unique=True,
validators=[validate_cpf],
max_length=14
)
# this is separated from the list because of Django standars
# if we leave like this we can access the choices from outside
# example: employee.SPEECH_THERAPHY
# note: those texts aren't using _() because they are not meant
# to be translated norshown to the user
SECRETAY = "SEC"
ADMINISTRATION = "ADM"
OTHER = "OTH"
DEPARTAMENT_CHOICES = (
(SECRETAY, _('Secretary')),
(ADMINISTRATION, _('Administration')),
(OTHER, _('Other')),
)
departament = models.CharField(
_('Departament'),
null=False,
choices=DEPARTAMENT_CHOICES,
help_text=_("The departament where this user works."),
max_length=30
)
# const representig the name of the group wich this model
# will add to the related user
GROUP_NAME = "Employees"
def __str__(self):
return (self.user.get_username() +
" - " +
self.get_departament_display())
def clean(self, *args, **kwargs):
try:
user_db = Employee.objects.get(id=self.id).user
if self.user != user_db:
raise ValidationError(
_("Don't change users"))
else:
pass
except Employee.DoesNotExist:
pass
self.user.clean()
def save(self, *args, **kwargs):
# we wan't to add the required permissions to the
# related user, before saving
self.user.is_staff = True
try:
employee_group = Group.objects.get(name=Employee.GROUP_NAME)
except Group.DoesNotExist:
employee_group = Group.objects.create(name=Employee.GROUP_NAME)
set_permissions(
Patient,
employee_group,
['change', 'add']
)
set_permissions(
Responsible,
employee_group,
['change', 'add']
)
self.user.groups.add(employee_group)
self.user.clean()
self.user.save()
self.clean()
super().save(*args, **kwargs)
class Meta:
verbose_name = _('Employee')
verbose_name_plural = _('Employees')
def set_permissions(model, group, permissions_to_add):
content_type = ContentType.objects.get_for_model(model)
if 'add' in permissions_to_add:
group.permissions.add(Permission.objects.get(
content_type=content_type, codename__startswith='add_')
)
if 'delete' in permissions_to_add:
group.permissions.add(Permission.objects.get(
content_type=content_type, codename__startswith='delete_')
)
if 'change' in permissions_to_add:
group.permissions.add(Permission.objects.get(
content_type=content_type, codename__startswith='change_')
)
| StarcoderdataPython |
3219783 | <filename>model-optimizer/extensions/middle/GroupNorm_test.py
"""
Copyright (C) 2018-2021 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import unittest
from extensions.middle.GroupNorm import GroupNormToMVN
from mo.front.common.partial_infer.utils import float_array, int64_array
from mo.utils.ir_engine.compare_graphs import compare_graphs
from mo.utils.unittest.graph import build_graph, result, build_graph_with_edge_attrs, connect, \
regular_op_with_shaped_data, valued_const_with_data, connect_data
shape = int64_array([1, 3, 5, 2])
nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}),
**valued_const_with_data('gamma', float_array([0.5])),
**valued_const_with_data('beta', float_array([0.5])),
**regular_op_with_shaped_data('group_norm', shape,
{'op': 'GroupNorm', 'name': 'group_norm', 'num_groups': 3, 'eps': 1e-9}),
**result('result')
}
edges = [*connect('input:0', '0:group_norm'),
*connect('gamma', '1:group_norm'),
*connect('beta', '2:group_norm'),
*connect('group_norm:0', 'result'),
]
ref_nodes = {**regular_op_with_shaped_data('input', shape, {'type': 'Parameter', 'op': 'Parameter'}),
**regular_op_with_shaped_data('shape1', int64_array([4]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('shape2', int64_array([4]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('shape3', int64_array([1]), {'op': 'ShapeOf'}),
**regular_op_with_shaped_data('hcast1', int64_array([4]), {'op': 'Cast'}),
**regular_op_with_shaped_data('cast2', int64_array([2]), {'op': 'Cast'}),
**regular_op_with_shaped_data('cast3', int64_array([4]), {'op': 'Cast'}),
**regular_op_with_shaped_data('gather1', int64_array([2]), {'op': 'Gather'}),
**regular_op_with_shaped_data('gather2', int64_array([1]), {'op': 'Gather'}),
**regular_op_with_shaped_data('gather3', int64_array([1]), {'op': 'Gather'}),
**regular_op_with_shaped_data('mul1', int64_array([1]), {'op': 'Mul'}),
**regular_op_with_shaped_data('mul2', int64_array([1]), {'op': 'Mul'}),
**regular_op_with_shaped_data('mul3', shape, {'op': 'Mul'}),
**regular_op_with_shaped_data('concat', int64_array([4]), {'op': 'Concat'}),
**regular_op_with_shaped_data('reshape1', int64_array([3, 1, 5, 2]), {'op': 'Reshape'}),
**regular_op_with_shaped_data('reshape2', shape, {'op': 'Reshape'}),
**regular_op_with_shaped_data('squeeze', int64_array([]), {'op': 'Squeeze'}),
**regular_op_with_shaped_data('range', int64_array([3]), {'op': 'Range'}),
**regular_op_with_shaped_data('mvn', int64_array([3, 1, 5, 2]), {'op': 'MVN'}),
**regular_op_with_shaped_data('add', shape, {'op': 'Add'}),
**valued_const_with_data('shape/axis1', int64_array(0)),
**valued_const_with_data('shape/ind1', int64_array([2, 3])),
**valued_const_with_data('shape/axis2', int64_array(0)),
**valued_const_with_data('shape/ind2', int64_array([0])),
**valued_const_with_data('shape/axis3', int64_array(0)),
**valued_const_with_data('shape/ind3', int64_array([1])),
**valued_const_with_data('gn/rec', float_array([1./3])),
**valued_const_with_data('group', int64_array([3])),
**valued_const_with_data('squeeze/axis', int64_array([0])),
**valued_const_with_data('range/start', int64_array(1)),
**valued_const_with_data('range/step', int64_array(1)),
**valued_const_with_data('gamma', float_array([[[[0.5]]]])),
**valued_const_with_data('beta', float_array([[[[0.5]]]])),
**result('result')
}
ref_edges = [*connect('input', '0:reshape1'),
*connect('input', 'shape1', skip_data=True),
*connect('shape1:0', '0:gather1'),
*connect('shape1:0', 'hcast1', skip_data=True),
*connect('shape/ind1', '1:gather1'),
*connect('shape/axis1', '2:gather1'),
*connect('gather1', 'cast2'),
*connect('hcast1', '0:gather3'),
*connect('hcast1', '0:gather2', skip_data=True),
*connect('shape/ind2', '1:gather2'),
*connect('shape/axis2', '2:gather2'),
*connect('gather2', '0:mul2'),
*connect('group', '1:mul2'),
*connect('shape/ind3', '1:gather3'),
*connect('shape/axis3', '2:gather3'),
*connect('gather3', '0:mul1'),
*connect('gn/rec', '1:mul1'),
*connect('mul2', '0:concat'),
*connect('mul1', '1:concat'),
*connect('cast2', '2:concat'),
*connect('concat', 'cast3'),
*connect('cast3', '1:reshape1'),
*connect('reshape1', 'shape2'),
*connect('shape2', 'shape3'),
*connect('shape3', '0:squeeze'),
*connect('squeeze/axis', '1:squeeze'),
*connect('range/start', '0:range'),
*connect('squeeze', '1:range'),
*connect('range/step', '2:range'),
*connect('reshape1', '0:mvn', skip_data=True),
*connect('range', '1:mvn'),
*connect('mvn', '0:reshape2'),
*connect('shape1:0', '1:reshape2', skip_data=True),
*connect('reshape2', '0:mul3'),
*connect('gamma', '1:mul3'),
*connect('mul3', '0:add'),
*connect('beta', '1:add'),
*connect('add', 'result')
]
class GroupNormToMVNTest(unittest.TestCase):
def test_group_norm_1(self):
graph = build_graph(nodes, edges)
graph_ref = build_graph(ref_nodes, ref_edges)
graph.graph['layout'] = 'NCHW'
GroupNormToMVN().find_and_replace_pattern(graph)
graph.clean_up()
(flag, resp) = compare_graphs(graph, graph_ref, 'result')
self.assertTrue(flag, resp)
| StarcoderdataPython |
120531 | <filename>src/discord_bot/bot.py
from discord import Intents
from discord.ext.commands import Bot
from . import settings
from typing import TYPE_CHECKING
if TYPE_CHECKING:
from discord.ext.commands import Context
def build_bot() -> "Bot[Context]":
b = Bot(command_prefix=settings.COMMAND_PREFIX, intents=Intents.all())
for cog_cls in settings.COGS:
b.add_cog(cog_cls(b))
return b
| StarcoderdataPython |
3851 | <gh_stars>0
# Copyright 2018 The Forseti Security Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests the LogSinkRulesEngine."""
import unittest
import mock
from tests.unittest_utils import ForsetiTestCase
from tests.unittest_utils import get_datafile_path
from google.cloud.forseti.common.gcp_type.billing_account import BillingAccount
from google.cloud.forseti.common.gcp_type.folder import Folder
from google.cloud.forseti.common.gcp_type.log_sink import LogSink
from google.cloud.forseti.common.gcp_type.organization import Organization
from google.cloud.forseti.common.gcp_type.project import Project
from google.cloud.forseti.scanner.audit import log_sink_rules_engine as lsre
from google.cloud.forseti.scanner.audit.errors import InvalidRulesSchemaError
class LogSinkRulesEngineTest(ForsetiTestCase):
"""Tests for the LogSinkRulesEngine."""
def setUp(self):
"""Set up GCP resources for tests."""
self.lsre = lsre
self.lsre.LOGGER = mock.MagicMock()
# Set up resources in the following hierarchy:
# +-----> billing_acct_abcd
# |
# |
# +-----------------------> proj-1
# |
# |
# org_234 +-----> folder_56 +-----> proj-2
# |
# |
# +-----------------------> proj-3
self.org_234 = Organization(
'234',
display_name='Organization 234',
full_name='organization/234/',
data='fake_org_data_234')
self.billing_acct_abcd = BillingAccount(
'ABCD-1234',
display_name='Billing Account ABCD',
full_name='organization/234/billingAccount/ABCD-1234/',
data='fake_billing_account_data_abcd')
self.folder_56 = Folder(
'56',
display_name='Folder 56',
full_name='organization/234/folder/56/',
data='fake_folder_data456456')
self.proj_1 = Project(
'proj-1',
project_number=11223344,
display_name='My project 1',
parent=self.org_234,
full_name='organization/234/project/proj-1/',
data='fake_project_data_2341')
self.proj_2 = Project(
'proj-2',
project_number=223344,
display_name='My project 2',
parent=self.folder_56,
full_name='organization/234/folder/56/project/proj-2/',
data='fake_project_data_4562')
self.proj_3 = Project(
'proj-3',
project_number=33445566,
display_name='My project 3',
parent=self.org_234,
full_name='organization/234/project/proj-3/',
data='fake_project_data_1233')
def get_engine_with_valid_rules(self):
"""Create a rule engine build with a valid rules file."""
rules_local_path = get_datafile_path(
__file__, 'log_sink_test_valid_rules.yaml')
rules_engine = self.lsre.LogSinkRulesEngine(
rules_file_path=rules_local_path)
rules_engine.build_rule_book()
return rules_engine
def test_build_rule_book_from_local_yaml_file_works(self):
"""Tests that a RuleBook is built correctly with a yaml file."""
rules_engine = self.get_engine_with_valid_rules()
# Creates 'self' rules for 5 difference resources and 'children' rules
# for 2.
self.assertEqual(
6, len(rules_engine.rule_book.resource_rules_map['self']))
self.assertEqual(
2, len(rules_engine.rule_book.resource_rules_map['children']))
self_rule_resources = []
for resource in rules_engine.rule_book.resource_rules_map['self']:
self_rule_resources.append(resource.name)
expected_rule_resources = [
'billingAccounts/ABCD-1234', 'folders/56', 'organizations/234',
'projects/proj-1', 'projects/proj-2', 'projects/proj-3']
self.assertEqual(expected_rule_resources, sorted(self_rule_resources))
child_rule_resources = []
for resource in rules_engine.rule_book.resource_rules_map['children']:
child_rule_resources.append(resource.name)
expected_rule_resources = ['folders/56', 'organizations/234']
self.assertEqual(expected_rule_resources, sorted(child_rule_resources))
def test_build_rule_book_invalid_applies_to_fails(self):
"""Tests that a rule with invalid applies_to type cannot be created."""
rules_local_path = get_datafile_path(
__file__, 'log_sink_test_invalid_rules.yaml')
rules_engine = self.lsre.LogSinkRulesEngine(
rules_file_path=rules_local_path)
with self.assertRaises(InvalidRulesSchemaError):
rules_engine.build_rule_book()
def test_project_with_no_violations(self):
"""Tests that no violations are produced for a correct project."""
rules_engine = self.get_engine_with_valid_rules()
# proj-1 needs an Audit Log sink.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_1_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.proj_1,
raw_json='_SINK_1_'
),
LogSink(
sink_id='compute_logs_saver',
destination=('bigquery.googleapis.com/projects/proj_1/'
'datasets/compute_logs'),
sink_filter='resource.type="gce_instance"',
include_children=False,
writer_identity=('serviceAccount:<PASSWORD>@'
'gcp-sa-logging.iam.gserviceaccount.com'),
parent=self.proj_1,
raw_json='_SINK_2_'
)
]
actual_violations = rules_engine.find_violations(
self.proj_1, log_sinks)
self.assertEqual(set(), actual_violations)
def test_folder_with_no_violations(self):
"""Tests that no violations are produced for a correct folder."""
rules_engine = self.get_engine_with_valid_rules()
# Rules disallow any folder-level LogSinks.
actual_violations = rules_engine.find_violations(self.folder_56, [])
self.assertEqual(set(), actual_violations)
def test_billing_account_with_no_violations(self):
"""Tests that no violations are produced for a correct billing acct."""
rules_engine = self.get_engine_with_valid_rules()
log_sinks = [
LogSink(
sink_id='billing_logs',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/billing_logs'),
sink_filter='',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.billing_acct_abcd,
raw_json='__SINK_1__'
),
]
actual_violations = rules_engine.find_violations(
self.billing_acct_abcd, log_sinks)
self.assertEqual(set(), actual_violations)
def test_org_with_no_violations(self):
"""Tests that no violations are produced for a correct organization."""
rules_engine = self.get_engine_with_valid_rules()
# Org needs an Audit Log sink, but to any destination.
log_sinks = [
LogSink(
sink_id='audit_logs_to_pubsub',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:<EMAIL>',
parent=self.org_234,
raw_json='__SINK_1__'
)
]
actual_violations = rules_engine.find_violations(
self.org_234, log_sinks)
self.assertEqual(set(), actual_violations)
def test_project_missing_required_sinks(self):
"""Tests violations are produced for project missing required sinks."""
rules_engine = self.get_engine_with_valid_rules()
# proj-2 needs an Audit Log sink, by org-level rules, and a pubsub
# sink, by folder-level rules.
log_sinks = [
LogSink(
sink_id='non_audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_2_logs'),
sink_filter='logName:"logs/non-cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.proj_2,
raw_json='__SINK_1__'
),
LogSink(
sink_id='compute_logs_saver',
destination=('bigquery.googleapis.com/projects/proj_2/'
'datasets/compute_logs'),
sink_filter='resource.type="gce_instance"',
include_children=False,
writer_identity=('serviceAccount:p12345-67890@'
'gcp-sa-logging.iam.gserviceaccount.com'),
parent=self.proj_2,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.proj_2, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='proj-2',
resource_type='project',
resource_id='proj-2',
full_name='organization/234/folder/56/project/proj-2/',
rule_name='Require Audit Log sinks in all projects.',
rule_index=0,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('^bigquery\\.googleapis\\.com\\/projects\\/'
'my\\-audit\\-logs\\/datasets\\/.+$'),
sink_filter=('^logName\\:\\"logs\\/'
'cloudaudit\\.googleapis\\.com\\"$'),
sink_include_children='*',
resource_data=''
),
lsre.Rule.RuleViolation(
resource_name='proj-2',
resource_type='project',
resource_id='proj-2',
full_name='organization/234/folder/56/project/proj-2/',
rule_name='Require a PubSub sink in folder-56 projects.',
rule_index=3,
violation_type='LOG_SINK_VIOLATION',
sink_destination='^pubsub\\.googleapis\\.com\\/.+$',
sink_filter='^$',
sink_include_children='*',
resource_data=''
)
])
self.assertEqual(expected_violations, actual_violations)
def test_project_whitelist_violation(self):
"""Tests violations are produced for non-whitelisted sinks."""
rules_engine = self.get_engine_with_valid_rules()
# proj-3 can only have BigQuery sinks.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/proj_1_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.proj_3,
raw_json='__SINK_1__'
),
LogSink(
sink_id='audit_logs_to_pubsub',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'proj-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:<EMAIL>',
parent=self.proj_3,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.proj_3, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='projects/proj-3/sinks/audit_logs_to_pubsub',
resource_type='sink',
resource_id='audit_logs_to_pubsub',
full_name='organization/234/project/proj-3/audit_logs_to_pubsub/',
rule_name='Only allow BigQuery sinks in Proj-1 and Proj-3.',
rule_index=4,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('pubsub.googleapis.com/projects/proj-3/'
'topics/proj-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
sink_include_children=True,
resource_data='__SINK_2__'
)
])
self.assertEqual(expected_violations, actual_violations)
def test_folder_blacklist_violation(self):
"""Tests violations are produced for blacklisted sinks."""
rules_engine = self.get_engine_with_valid_rules()
# Rules disallow any folder-level LogSinks.
log_sinks = [
LogSink(
sink_id='audit_logs_to_bq',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/folder_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.folder_56,
raw_json='__SINK_1__'
)
]
actual_violations = rules_engine.find_violations(
self.folder_56, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='folders/56/sinks/audit_logs_to_bq',
resource_type='sink',
resource_id='audit_logs_to_bq',
full_name='organization/234/folder/56/audit_logs_to_bq/',
rule_name='Disallow folder sinks.',
rule_index=2,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/folder_logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
sink_include_children=False,
resource_data='__SINK_1__')
])
self.assertEqual(expected_violations, actual_violations)
def test_billing_account_with_whitelist_violations(self):
"""Tests violations are produced for billing account sinks."""
rules_engine = self.get_engine_with_valid_rules()
log_sinks = [
LogSink(
sink_id='billing_logs',
destination=('bigquery.googleapis.com/projects/my-audit-logs/'
'datasets/wrong_dataset'),
sink_filter='',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.billing_acct_abcd,
raw_json='__SINK_1__'
),
]
actual_violations = rules_engine.find_violations(
self.billing_acct_abcd, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_type='sink',
resource_id='billing_logs',
resource_name='billingAccounts/ABCD-1234/sinks/billing_logs',
full_name='organization/234/billingAccount/ABCD-1234/billing_logs/',
rule_name=('Only allow Billing Account sinks to audit logs '
'project.'),
rule_index=6,
violation_type='LOG_SINK_VIOLATION',
sink_destination=('bigquery.googleapis.com/projects/'
'my-audit-logs/datasets/wrong_dataset'),
sink_filter='',
sink_include_children=False,
resource_data='__SINK_1__')
])
self.assertEqual(expected_violations, actual_violations)
def test_org_missing_required_sinks(self):
"""Tests violations are produced for an org missing required sinks."""
rules_engine = self.get_engine_with_valid_rules()
# Org needs an Audit Log sink, including children.
log_sinks = [
LogSink(
sink_id='sink_not_including_children',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-audit-logs'),
sink_filter='logName:"logs/cloudaudit.googleapis.com"',
include_children=False,
writer_identity='serviceAccount:<EMAIL>',
parent=self.org_234,
raw_json='__SINK_1__'
),
LogSink(
sink_id='sink_with_wrong_filter',
destination=('pubsub.googleapis.com/projects/proj-3/topics/'
'org-more-logs'),
sink_filter='logName:"logs/otherapi.googleapis.com"',
include_children=True,
writer_identity='serviceAccount:<EMAIL>',
parent=self.org_234,
raw_json='__SINK_2__'
)
]
actual_violations = rules_engine.find_violations(
self.org_234, log_sinks)
expected_violations = set([
lsre.Rule.RuleViolation(
resource_name='234',
resource_type='organization',
resource_id='234',
full_name='organization/234/',
rule_name='Require an Org Level audit log sink.',
rule_index=1,
violation_type='LOG_SINK_VIOLATION',
sink_destination='^.*$',
sink_filter=('^logName\\:\\"logs\\/'
'cloudaudit\\.googleapis\\.com\\"$'),
sink_include_children=True,
resource_data=''
)
])
self.assertEqual(expected_violations, actual_violations)
def test_add_invalid_rules(self):
"""Tests that adding invalid rules raises exceptions."""
rule_book = self.lsre.LogSinkRuleBook(global_configs=None)
valid_resource = {
'type': 'organization',
'applies_to': 'children',
'resource_ids': ['1234']
}
valid_sink_spec = {
'destination': 'bigquery.*',
'filter': '',
'include_children': '*'
}
rule_book.add_rule(
{
'name': 'Valid rule',
'resource': [valid_resource],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, 0)
bad_rules = [
{},
{
'name': 'Mising Resource',
'mode': 'whitelist',
'sink': valid_sink_spec,
}, {
'name': 'Mising sink',
'resource': [valid_resource],
'mode': 'whitelist',
}, {
'name': 'Bad mode',
'resource': [valid_resource],
'sink': valid_sink_spec,
'mode': 'other',
}, {
'name': 'Bad resource type',
'resource': [{
'type': 'bucket',
'applies_to': 'self',
'resource_ids': ['bucket-1']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Bad applies to type',
'resource': [{
'type': 'folder',
'applies_to': 'self_and_children',
'resource_ids': ['56']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Bad applies to type',
'resource': [{
'type': 'billing_account',
'applies_to': 'children',
'resource_ids': ['ABCD-1234']
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Empty resource_ids',
'resource': [{
'type': 'project',
'applies_to': 'self',
'resource_ids': []
}],
'sink': valid_sink_spec,
'mode': 'whitelist'
}, {
'name': 'Missing filter',
'resource': [valid_resource],
'sink': {
'destination': 'bigquery.*',
'include_children': '*'
},
'mode': 'whitelist'
}, {
'name': 'Bad include_children',
'resource': [valid_resource],
'sink': {
'destination': 'bigquery.*',
'filter': '*',
'include_children': 'Yes'
},
'mode': 'whitelist'
}
]
for rule in bad_rules:
with self.assertRaises(InvalidRulesSchemaError):
rule_book.add_rule(rule, 1)
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1745317 | <reponame>xingjianleng/cogent3
#!/usr/bin/env python
"""Parser for PSL format (default output by blat).
Compatible with blat v.34
"""
from cogent3.util.table import Table
__author__ = "<NAME>, <NAME>"
__copyright__ = "Copyright 2007-2022, The Cogent Project"
__credits__ = ["<NAME>", "<NAME>", "<NAME>", "<NAME>"]
__license__ = "BSD-3"
__version__ = "2022.4.20a1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
def make_header(lines):
"""returns one header line from multiple header lines"""
lengths = list(map(len, lines))
max_length = max(lengths)
for index, line in enumerate(lines):
if lengths[index] != max_length:
for i in range(lengths[index], max_length):
line.append("")
header = []
for t, b in zip(*lines):
if t.strip().endswith("-"):
c = t.strip() + b
else:
c = " ".join([t.strip(), b.strip()])
header += [c.strip()]
return header
def MinimalPslParser(data):
"""returns version, header and rows from data"""
if type(data) == str:
data = open(data)
psl_version = None
header = None
rows = []
for record in data:
if psl_version is None:
assert "psLayout version" in record
psl_version = record.strip()
yield psl_version
continue
if not record.strip():
continue
if header is None and record[0] == "-":
header = make_header(rows)
yield header
rows = []
continue
rows += [record.rstrip().split("\t")]
if header is not None:
yield rows[0]
rows = []
try:
data.close()
except AttributeError:
pass
def PslToTable(data):
"""converts psl format to a table"""
parser = MinimalPslParser(data)
version = next(parser)
header = next(parser)
rows = [row for row in parser]
return Table(header=header, data=rows, title=version)
| StarcoderdataPython |
1767112 | <reponame>Dylan0888/csws-week3
#for x in range (1,101):
# print(x)
#print("These are the numbers!")
fours =[ i**4 for i in range (1,13)]
print (fours)
print ("These are the numbers 1 - 12 to the power of 4" )
number = int(input(" pick a number to multiply by 10: "))
numberOne = number * 10
print(numberOne)
| StarcoderdataPython |
4841352 | <reponame>musen-rse/examples_python
from abc import ABC, abstractmethod
from typing import List
from core.charts_abc import Chart
class Sensor(ABC):
def __init__(self) -> None:
self.charts: List[Chart] = []
def add_chart(self, chart: Chart) -> None:
self.charts.append(chart)
def remove_chart(self, chart: Chart) -> None:
self.charts.remove(chart)
def draw_all(self, value: float) -> None:
for chart in self.charts:
chart.draw(value)
@abstractmethod
def measure(self) -> float:
pass
@property
@abstractmethod
def name(self) -> str:
pass
| StarcoderdataPython |
134385 | import torch.nn as nn
from torch.nn.utils.rnn import pad_packed_sequence, pack_padded_sequence
import torch
from layers.attention import MultiHeadedAttention
from layers.rezero import RezeroConnection
class Encoder(nn.Module):
def __init__(self, src_embed_size, ans_embed_size, hidden_size, dropout, bidir, n_head):
super(Encoder, self).__init__()
self.ans_pooling = nn.MaxPool1d(4)
atten_input_size = src_embed_size
gru_hidden_size = hidden_size // (2 if bidir else 1)
self.bigru = nn.GRU(src_embed_size, gru_hidden_size, 1, batch_first=True, dropout=dropout, bidirectional=bidir)
self.multi_atten = MultiHeadedAttention(n_head, hidden_size, hidden_size, hidden_size, hidden_size)
self.rezero_connection = RezeroConnection()
self.decoder_init_proj = nn.Linear(gru_hidden_size, hidden_size)
def forward(self, src_embed:torch.Tensor, src_mask, src_len, ans_embed):
"""
:param src_embed: (B, src_len, embed)
:param src_mask: (B, src_len)
:param src_len: (B,)
:param ans_embed: (B, ans_len, embed)
:return:
"""
packed = pack_padded_sequence(src_embed, src_len, batch_first=True)
packed_memory, last_hidden = self.bigru(packed)
memory, _ = pad_packed_sequence(packed_memory, batch_first=True)
atten_mem = self.rezero_connection(memory, lambda x: self.multi_atten(x, x, x, src_mask))
dec_init_hidden = torch.tanh(self.decoder_init_proj(last_hidden[1]))
return atten_mem, dec_init_hidden | StarcoderdataPython |
1766949 | from .lims_autosamplerMethod_postgresql_models import *
from SBaaS_base.sbaas_base_query_update import sbaas_base_query_update
from SBaaS_base.sbaas_base_query_drop import sbaas_base_query_drop
from SBaaS_base.sbaas_base_query_initialize import sbaas_base_query_initialize
from SBaaS_base.sbaas_base_query_insert import sbaas_base_query_insert
from SBaaS_base.sbaas_base_query_select import sbaas_base_query_select
from SBaaS_base.sbaas_base_query_delete import sbaas_base_query_delete
from SBaaS_base.sbaas_template_query import sbaas_template_query
class lims_autosamplerMethod_query(sbaas_template_query):
def initialize_supportedTables(self):
'''Set the supported tables dict for
'''
tables_supported = {'autosampler_parameters':autosampler_parameters,
'autosampler_information':autosampler_information,
'autosampler_method':autosampler_method,
};
self.set_supportedTables(tables_supported);
#table initializations:
def drop_lims_autosamplerMethod(self):
try:
autosampler_parameters.__table__.drop(self.engine,True);
autosampler_information.__table__.drop(self.engine,True);
autosampler_method.__table__.drop(self.engine,True);
except SQLAlchemyError as e:
print(e);
def reset_lims_autosamplerMethod(self):
try:
reset = self.session.query(autosampler_parameters).delete(synchronize_session=False);
reset = self.session.query(autosampler_information).delete(synchronize_session=False);
reset = self.session.query(autosampler_method).delete(synchronize_session=False);
self.session.commit();
except SQLAlchemyError as e:
print(e);
def initialize_lims_autosamplerMethod(self):
try:
autosampler_parameters.__table__.create(self.engine,True);
autosampler_information.__table__.create(self.engine,True);
autosampler_method.__table__.create(self.engine,True);
except SQLAlchemyError as e:
print(e); | StarcoderdataPython |
4819259 | """ query widgets
"""
# Copyright (c) 2020 ipyradiant contributors.
# Distributed under the terms of the Modified BSD License.
__all__ = ["QueryWidget"]
from .query_widget import QueryWidget
| StarcoderdataPython |
1631874 | <gh_stars>1-10
from django.apps import AppConfig
class LatestTweetsConfig(AppConfig):
name = "latest_tweets"
label = "latest_tweets"
verbose_name = "Latest Tweets"
default_auto_field = "django.db.models.AutoField"
| StarcoderdataPython |
3399712 | <gh_stars>0
from .statsig_environment_tier import StatsigEnvironmentTier
import typing
class StatsigOptions:
"""An object of properties for initializing the sdk with additional parameters"""
def __init__(self, api: str="https://api.statsig.com/v1/", tier: 'typing.Any'=None):
self._environment = None
if tier is not None:
if isinstance(tier, str) or isinstance(tier, StatsigEnvironmentTier):
tier_str = tier.value if isinstance(tier, StatsigEnvironmentTier) else tier
self.set_environment_parameter("tier", tier_str)
else:
raise ValueError('StatsigEvent.tier must be a str or StatsigEnvironmentTier')
if api is None:
api = "https://api.statsig.com/v1/"
self.api = api
def set_environment_parameter(self, key: str, value: str):
if self._environment is None:
self._environment = {}
self._environment[key] = value
def _get_evironment(self):
return self._environment | StarcoderdataPython |
134880 | <filename>eulxml/xmlmap/teimap.py
# file eulxml/xmlmap/teimap.py
#
# Copyright 2010,2011 Emory University Libraries
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from eulxml import xmlmap
# TODO: generic/base tei xml object with common attributes?
TEI_NAMESPACE = 'http://www.tei-c.org/ns/1.0'
class _TeiBase(xmlmap.XmlObject):
'''Common TEI namespace declarations, for use by all TEI XmlObject instances.'''
ROOT_NS = TEI_NAMESPACE
ROOT_NAME = 'tei'
ROOT_NAMESPACES = {
'tei' : ROOT_NS,
}
class TeiLine(_TeiBase):
rend = xmlmap.StringField("@rend")
"""set up indents for lines with @rend=indent plus some number. Handle default indent in css."""
def indent(self):
if self.rend.startswith("indent"):
indentation = self.rend[len("indent"):]
if indentation:
return int(indentation)
else:
return 0
class TeiLineGroup(_TeiBase):
head = xmlmap.StringField('tei:head')
linegroup = xmlmap.NodeListField('tei:lg', 'self')
line = xmlmap.NodeListField('tei:l', TeiLine)
class TeiQuote(_TeiBase):
line = xmlmap.NodeListField('tei:l', TeiLine)
linegroup = xmlmap.NodeListField('tei:lg', TeiLineGroup)
class TeiEpigraph(_TeiBase):
quote = xmlmap.NodeListField('tei:q|tei:quote|tei:cit/tei:q|tei:cit/tei:quote', TeiQuote)
bibl = xmlmap.StringField('tei:bibl')
class TeiDiv(_TeiBase):
id = xmlmap.StringField('@xml:id')
type = xmlmap.StringField('@type')
author = xmlmap.StringField('tei:docAuthor/tei:name/tei:choice/tei:sic')
docauthor = xmlmap.StringField('tei:docAuthor')
title = xmlmap.StringField('tei:head[1]') # easy access to FIRST head
title_list = xmlmap.StringListField('tei:head') # access to all heads when there are multiple
text = xmlmap.StringField('.') # short-hand mapping for full text of a div (e.g., for short divs)
linegroup = xmlmap.NodeListField('tei:lg', TeiLineGroup)
div = xmlmap.NodeListField('tei:div', 'self')
byline = xmlmap.StringField('tei:byline')
epigraph = xmlmap.NodeListField('tei:epigraph', TeiEpigraph)
p = xmlmap.StringListField('tei:p')
q = xmlmap.StringListField('tei:q')
quote = xmlmap.StringListField('tei:quote')
floatingText = xmlmap.NodeListField('tei:floatingText/tei:body/tei:div', 'self')
class TeiFloatingText(_TeiBase):
head = xmlmap.StringField("./tei:body/tei:head")
line_group = xmlmap.NodeListField('.//tei:lg', TeiLineGroup)
line = xmlmap.NodeListField('.//tei:l', TeiLine)
# note: not currently mapped to any of the existing tei objects... where to add?
class TeiFigure(_TeiBase):
#entity = xmlmap.StringField("@entity") #not used in P5
# TODO: ana should be a more generic attribute, common to many elements...
ana = xmlmap.StringField("@ana") # FIXME: how to split on spaces? should be a list...
head = xmlmap.StringField("tei:head")
description = xmlmap.StringField("tei:figDesc")
entity = xmlmap.StringField("tei:graphic/@url") #graphic replaces entity in p5.
floatingText = xmlmap.NodeListField('tei:floatingText', TeiFloatingText)
# currently not mapped... should it be mapped by default? at what level?
class TeiInterp(_TeiBase):
id = xmlmap.StringField("@xml:id")
value = xmlmap.StringField("@value")
class TeiSection(_TeiBase):
# top-level sections -- front/body/back
div = xmlmap.NodeListField('tei:div', TeiDiv)
all_figures = xmlmap.NodeListField('.//tei:figure', TeiFigure)
class TeiInterpGroup(_TeiBase):
type = xmlmap.StringField("@type")
interp = xmlmap.NodeListField("tei:interp", TeiInterp)
class TeiName(_TeiBase):
type = xmlmap.StringField('@person')
reg = xmlmap.StringField('tei:choice/tei:reg')
'regularized value for a name'
value = xmlmap.StringField('tei:choice/tei:sic')
'name as displayed in the text'
class TeiHeader(_TeiBase):
'''xmlmap object for a TEI (Text Encoding Initiative) header'''
title = xmlmap.StringField('tei:fileDesc/tei:titleStmt/tei:title')
author_list = xmlmap.NodeListField('tei:fileDesc/tei:titleStmt/tei:author/tei:name',
TeiName)
editor_list = xmlmap.NodeListField('tei:fileDesc/tei:titleStmt/tei:editor/tei:name',
TeiName)
publisher = xmlmap.StringField('tei:fileDesc/tei:publicationStmt/tei:publisher')
publication_date = xmlmap.StringField('tei:fileDesc/tei:publicationStmt/tei:date')
availability = xmlmap.StringField('tei:fileDesc/tei:publicationStmt/tei:availability')
source_description = xmlmap.StringField('tei:fileDesc/tei:sourceDesc')
series_statement = xmlmap.StringField('tei:fileDesc/tei:seriesStmt')
class Tei(_TeiBase):
"""xmlmap object for a TEI (Text Encoding Initiative) XML document """
id = xmlmap.StringField('@xml:id')
title = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:title')
author = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:author/tei:name/tei:choice/tei:sic')
editor = xmlmap.StringField('tei:teiHeader/tei:fileDesc/tei:titleStmt/tei:editor/tei:name/tei:choice/tei:sic')
header = xmlmap.NodeField('tei:teiHeader', TeiHeader)
front = xmlmap.NodeField('tei:text/tei:front', TeiSection)
body = xmlmap.NodeField('tei:text/tei:body', TeiSection)
back = xmlmap.NodeField('tei:text/tei:back', TeiSection)
| StarcoderdataPython |
186848 | # -*- coding: utf-8 -*-
# IPC: A python library for interprocess communication via standard streams.
#
# $Id$
#
# License: MIT
# Copyright 2015-2017 <NAME> (https://github.com/UncleRus)
# Copyright 2017 <NAME> (https://github.com/oleg-golovanov)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
# THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
# OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
# ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
# OTHER DEALINGS IN THE SOFTWARE.
import re
import os
from setuptools import setup
DIR = os.path.dirname(__file__)
with open(os.path.join(DIR, 'ipc.py')) as f:
version = re.search(r'__version__\s+=\s+[\'\"]+(.*)[\'\"]+', f.read()).group(1)
setup(
name='ipc',
version=version,
py_modules=['ipc'],
data_files=[('', ['LICENSE', 'README.md'])],
description='Interprocess communication via standard streams.',
zip_safe=False,
platforms='any',
long_description=open(os.path.join(DIR, 'README.md')).read(),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Developers',
'Operating System :: OS Independent',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.0',
'Programming Language :: Python :: 3.1',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: Implementation :: CPython',
'Topic :: Software Development :: Libraries :: Python Modules'
]
)
| StarcoderdataPython |
3352946 | import json
import subprocess
from pathlib import Path
import luigi
from luigi.util import inherits, requires
from luigi.contrib.sqla import SQLAlchemyTarget
import pipeline.models.db_manager
from ..tools import tools
from .targets import TargetList
from .helpers import meets_requirements, is_inscope
from ..models.target_model import Target
@inherits(TargetList)
class AmassScan(luigi.Task):
""" Run ``amass`` scan to perform subdomain enumeration of given domain(s).
Note:
Expects **TARGET_FILE.domains** file to be a text file with one top-level domain per line.
Install:
.. code-block:: console
sudo apt-get install -y -q amass
Basic Example:
.. code-block:: console
amass enum -ip -brute -active -min-for-recursive 3 -df tesla -json amass.tesla.json
Luigi Example:
.. code-block:: console
PYTHONPATH=$(pwd) luigi --local-scheduler --module recon.amass AmassScan --target-file tesla
Args:
exempt_list: Path to a file providing blacklisted subdomains, one per line.
db_location: specifies the path to the database used for storing results *Required by upstream Task*
target_file: specifies the file on disk containing a list of ips or domains *Required by upstream Task*
results_dir: specifes the directory on disk to which all Task results are written *Required by upstream Task*
"""
exempt_list = luigi.Parameter(default="")
requirements = ["go", "amass"]
exception = True
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.db_mgr = pipeline.models.db_manager.DBManager(db_location=self.db_location)
self.results_subfolder = (Path(self.results_dir) / "amass-results").expanduser().resolve()
def requires(self):
""" AmassScan depends on TargetList to run.
TargetList expects target_file as a parameter.
Returns:
luigi.ExternalTask - TargetList
"""
meets_requirements(self.requirements, self.exception)
args = {"target_file": self.target_file, "results_dir": self.results_dir, "db_location": self.db_location}
return TargetList(**args)
def output(self):
""" Returns the target output for this task.
Naming convention for the output file is amass.json.
Returns:
luigi.local_target.LocalTarget
"""
results_subfolder = Path(self.results_dir) / "amass-results"
new_path = results_subfolder / "amass.json"
return luigi.LocalTarget(new_path.expanduser().resolve())
def run(self):
""" Defines the options/arguments sent to amass after processing.
Returns:
list: list of options/arguments, beginning with the name of the executable to run
"""
self.results_subfolder.mkdir(parents=True, exist_ok=True)
hostnames = self.db_mgr.get_all_hostnames()
if hostnames:
# TargetList generated some domains for us to scan with amass
amass_input_file = self.results_subfolder / "input-from-targetlist"
with open(amass_input_file, "w") as f:
for hostname in hostnames:
f.write(f"{hostname}\n")
else:
return subprocess.run(f"touch {self.output().path}".split())
command = [
tools.get("amass").get("path"),
"enum",
"-active",
"-ip",
"-brute",
"-min-for-recursive",
"3",
"-df",
str(amass_input_file),
"-json",
self.output().path,
]
if self.exempt_list:
command.append("-blf") # Path to a file providing blacklisted subdomains
command.append(self.exempt_list)
subprocess.run(command)
amass_input_file.unlink()
@requires(AmassScan)
class ParseAmassOutput(luigi.Task):
""" Read amass JSON results and create categorized entries into ip|subdomain files.
Args:
db_location: specifies the path to the database used for storing results *Required by upstream Task*
target_file: specifies the file on disk containing a list of ips or domains *Required by upstream Task*
scope_file: specifies the scope in JSON files taken by BurpSuite
exempt_list: Path to a file providing blacklisted subdomains, one per line. *Optional by upstream Task*
results_dir: specifes the directory on disk to which all Task results are written *Required by upstream Task*
"""
scope_file = luigi.Parameter(default="")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.db_mgr = pipeline.models.db_manager.DBManager(db_location=self.db_location)
self.results_subfolder = (Path(self.results_dir) / "amass-results").expanduser().resolve()
def output(self):
""" Returns the target output files for this task.
Returns:
luigi.contrib.sqla.SQLAlchemyTarget
"""
return SQLAlchemyTarget(
connection_string=self.db_mgr.connection_string, target_table="target", update_id=self.task_id
)
def run(self):
""" Parse the json file produced by AmassScan and categorize the results into ip|subdomain files.
An example (prettified) entry from the json file is shown below
{
"Timestamp": "2019-09-22T19:20:13-05:00",
"name": "beta-partners.tesla.com",
"domain": "tesla.com",
"addresses": [
{
"ip": "192.168.127.12",
"cidr": "172.16.31.10/24",
"asn": 394161,
"desc": "TESLA - Tesla"
}
],
"tag": "ext",
"source": "Previous Enum"
}
"""
self.results_subfolder.mkdir(parents=True, exist_ok=True)
if Path(self.input().path).stat().st_size == 0:
self.output().touch()
return
amass_json = self.input().open()
with amass_json as amass_json_file:
for line in amass_json_file:
entry = json.loads(line)
# test domain and enter into database only if within scope
if is_inscope(entry.get("name"), self.scope_file):
tgt = self.db_mgr.get_or_create(Target, hostname=entry.get("name"), is_web=True)
for address in entry.get("addresses"):
ipaddr = address.get("ip")
tgt = self.db_mgr.add_ipv4_or_v6_address_to_target(tgt, ipaddr)
self.db_mgr.add(tgt)
self.output().touch()
self.db_mgr.close()
| StarcoderdataPython |
142923 | import unittest
from datetime import date
from pyramid import testing
from whoahqa.utils import format_date_for_locale
class TestLocaleDate(unittest.TestCase):
def test_returns_date_string_as_per_request_locale(self):
request = testing.DummyRequest()
formatted_date = format_date_for_locale(
date(2014, 3, 13), "MMM Y", request)
self.assertEqual(formatted_date, "Mar 2014")
| StarcoderdataPython |
3242751 | <gh_stars>1-10
#!/usr/bin/python
# Filename: verilog_port_analysis.py
class VerilogPort:
def __init__(self):
self.name = ''
self.ins_name = ''
self.style = ''
self.axis_last = False
pass
def print(self):
print(self.style+':'+self.name+','+str(self.width)+','+self.direction)
class VerilogModule:
def __init__(self, filename, ex_prefixs):
self.params = {}
self.ports_native = []
self.ports = []
self.external_ports = []
self.external_prefixs = ex_prefixs
f = open(filename, 'r')
step = 'start'
incomments = False
line_cnt = 0
while True:
line = f.readline()
line_cnt = line_cnt + 1
if (len(line) == 0): #EOF
break
line = line.expandtabs()
line = line.strip()
#jump over comments
if (incomments):
if (line.find('*/')):
strs = line.partition('*/')
line = strs[2]
incomments = False
else:
line = '';
if (line.find('//') >= 0):
strs = line.partition('//')
line = strs[0]
if (line.find('/*') >= 0):
strs = line.partition('/*')
line = strs[0]
incomments = True
line = line.replace('`','')
line = line.replace(',','')
line = line.replace(';','')
line = line.replace('\n','')
strs = line.split()
if (len(strs) < 2):
continue
#find a parameter
if ((strs[0] == 'parameter') or (strs[0] == 'localparam')):
if (len(strs) < 4):
raise RuntimeError(r'Line ' + str(line_cnt) + ' format error.')
if (strs[2] != '='):
raise RuntimeError(r'Line ' + str(line_cnt) + ' format error.')
param_name = strs[1]
param_value = strs[3]
self.params[param_name] = param_value;
#find module name
if (strs[0] == 'module'):
self.name = strs[1]
#find a port
if ((strs[0] == 'input') or (strs[0] == 'output')):
if (len(strs) < 2):
raise RuntimeError(r'Line ' + str(line_cnt) + ' format error.')
port = VerilogPort()
if (strs[0] == 'input'):
port.direction = 'in'
else:
port.direction = 'out'
str1 = strs[1].replace(' ','')
if (str1[0] == '['): #it's a bus
if (len(strs) < 2):
raise RuntimeError(r'Line ' + str(line_cnt) + ' format error.')
str1=str1.replace('[','')
str1=str1.replace(']','')
strs1 = str1.split(':')
msb = int(strs1[0]) #self.getvalue(strs1[0])
lsb = int(strs1[1]) #self.getvalue(strs1[2])
port.width = abs(msb - lsb) + 1
port.name = strs[2]
else:
port.width = 1
port.name = str1
port.New = True
self.ports_native.append(port)
#search exteranl ports
for port in self.ports_native:
if (port.New):
for prefix in self.external_prefixs:
if (prefix in port.name):
ex_port = VerilogPort()
ex_port.name = port.name
ex_port.style = 'Scalar'
ex_port.direction = port.direction
ex_port.width = port.width
self.external_ports.append(ex_port)
print('Find external port {0}'.format(ex_port.name))
port.New = False
break
axis_suffixs = ('_TDATA', '_TREADY', '_TVALID', 'TLAST')
bram_suffixs = ('_ADDR_A', '_DOUT_A', '_WEN_A', '_EN_A', '_DIN_A', '_CLK_A', '_RST_A')
for port in self.ports_native:
if (port.New):
axis_name = ''
bram_name = ''
suffix = axis_suffixs[0]
if (port.name.upper().endswith(suffix)):
axis_name = port.name[:-len(suffix)]
suffix = bram_suffixs[0]
if (port.name.upper().endswith(suffix)):
bram_name = port.name[:-len(suffix)]
if (bram_name != ''):
bram_port_exists = [False]*len(bram_suffixs)
for port1 in self.ports_native:
if (port1.New):
bram_port_index = 0
for suffix in bram_suffixs:
if (port1.name.startswith(bram_name) and \
port1.name.upper().endswith(suffix) and \
len(port1.name) == (len(bram_name) + len(suffix))):
bram_suffix = suffix
break
bram_port_index = bram_port_index + 1
if (bram_port_index < len(bram_suffixs)):
bram_port_exists[bram_port_index] = True
port1.New = False
if (bram_suffix.upper() == '_DIN_A'):
bram_in_width = port1.width
if (bram_suffix.upper() == '_DOUT_A'):
bram_out_width = port1.width
if (bram_suffix.upper() == '_ADDR_A'):
bram_addr_width = port1.width
if (bram_suffix.upper() == '_WEN_A'):
bram_wena_width = port1.width
if (bram_port_exists[0] and \
((bram_port_exists[1] and bram_port_exists[2]) or \
bram_port_exists[4])):
bram_port = VerilogPort()
bram_port.name = bram_name
bram_port.addr_width = bram_addr_width
bram_port.wena_width = bram_wena_width
bram_port.style = 'BRAM'
if (bram_port_exists[1] and bram_port_exists[4]):
bram_port.direction = 'inout'
elif (bram_port_exists[1]):
bram_port.direction = 'out'
elif (bram_port_exists[4]):
bram_port.direction = 'in'
else:
raise RuntimeError(r'BRAM port ' + bram_name + ' direction unknown.')
bram_port.has_en = bram_port_exists[3]
bram_port.has_clk = bram_port_exists[5]
bram_port.has_rst = bram_port_exists[6]
if (bram_port.direction == 'inout'):
if (bram_in_width != bram_out_width):
raise RuntimeError(r'BRAM port ' + bram_name + ' got different in/out width.')
if (bram_port.direction == 'in' or bram_port.direction == 'inout'):
bram_port.width = bram_in_width
else:
bram_port.width = bram_out_width
self.ports.append(bram_port)
print('Find BRAM port {0}'.format(bram_name))
else:
raise RuntimeError(r'AXIS ' + axis_name + ' incomplete.')
if (axis_name != ''):
#seach for a complete AXIS port group
axis_port_exists = [False,False,False,False]
for port1 in self.ports_native:
if (port1.New):
axis_port_index = 0
for suffix in axis_suffixs:
if (port1.name.startswith(axis_name) and \
port1.name.upper().endswith(suffix) and \
len(port1.name) == (len(axis_name) + len(suffix))):
break
axis_port_index = axis_port_index + 1
if (axis_port_index < 4):
axis_port_exists[axis_port_index] = True
port1.New = False #mark the port as Old
if (axis_port_index == 0):
axis_direction = port1.direction
axis_width = port1.width
#if tdata/tready/tvalid exist
if (axis_port_exists[0:3] == [True, True, True]):
axis_port = VerilogPort()
axis_port.name = axis_name
axis_port.style = 'AXIS'
axis_port.direction = axis_direction
axis_port.axis_last = axis_port_exists[3]
axis_port.width = axis_width
self.ports.append(axis_port)
print('Find AXIS port {0}'.format(axis_name))
else:
raise RuntimeError(r'AXIS ' + axis_name + ' incomplete.')
for port in self.ports_native:
if (port.New):
scalar_port = VerilogPort()
scalar_port.name = port.name
scalar_port.style = 'Scalar'
scalar_port.direction = port.direction
scalar_port.width = port.width
self.ports.append(scalar_port)
print('Find SCALAR port {0}'.format(scalar_port.name))
#self.ports = sorted(self.ports, key = lambda port:port.name)
#self.ports = sorted(self.ports, key = lambda port:port.style)
for port in self.ports:
port.comma = ','
port.module = self
self.ports[-1].comma = ');'
for port in self.external_ports:
port.comma = ','
port.module = self
def print(self):
for port in self.ports:
port.print()
#md = VerilogModule(r'HDL/MergeSort.v');
#End of verilog_port_analysis.py
| StarcoderdataPython |
3380606 | #!/usr/bin/env python
"""
Template for making scripts to run from the command line
Copyright (C) CSIRO 2020
"""
import pylab
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
import os
import sys
import logging
__author__ = "<NAME> <<EMAIL>>"
def _main():
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
parser = ArgumentParser(description='Script description', formatter_class=ArgumentDefaultsHelpFormatter)
parser.add_argument('-v', '--verbose', action='store_true', help='Be verbose')
parser.add_argument(dest='files', nargs='+')
parser.set_defaults(verbose=False)
values = parser.parse_args()
if values.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
if __name__ == '__main__':
_main()
| StarcoderdataPython |
1642532 | <gh_stars>0
#!/usr/bin/env python3
#----------------------------------------------------------------------------
# Copyright (c) 2018 FIRST. All Rights Reserved.
# Open Source Software - may be modified and shared by FRC teams. The code
# must be accompanied by the FIRST BSD license file in the root directory of
# the project.
#----------------------------------------------------------------------------
import json
import time
import sys
import numpy as np
import cv2
import glob
from PIL import Image
from networktables import *
import ntcore
import os
import threading
from http.server import BaseHTTPRequestHandler,HTTPServer
from socketserver import ThreadingMixIn
from io import StringIO ## for Python 3
import time
import threading
class CamHandler(BaseHTTPRequestHandler):
def do_GET(self):
global img
if self.path.endswith('.mjpg'):
self.send_response(200)
self.send_header('Content-type','multipart/x-mixed-replace; boundary=--jpgboundary')
self.end_headers()
while True:
try:
if(img is not None):
ret, jpg = cv2.imencode('.jpg', img)
# print 'Compression ratio: %d4.0:1'%(compress(img.size,jpg.size))
self.wfile.write(b'--jpgboundary')
self.send_header('Content-type', 'image/jpeg')
# self.send_header('Content-length',str(tmpFile.len))
self.send_header('Content-length', str(jpg.size))
self.end_headers()
self.wfile.write(jpg.tostring())
except KeyboardInterrupt:
break
return
if self.path.endswith('.html'):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write('<html><head></head><body>')
self.wfile.write('<img src="http://10.17.36.10:5805/cam.mjpg"/>')
self.wfile.write('</body></html>')
return
class ThreadedHTTPServer(ThreadingMixIn, HTTPServer):
"""Handle requests in a separate thread."""
##############################################################################
# Camera Calibrator
##############################################################################
class CameraParamGetter():
def calibrateToChess(self):
# termination criteria
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 30, 0.001)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((6*8,3), np.float32)
objp[:,:2] = np.mgrid[0:8,0:6].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d point in real world space
imgpoints = [] # 2d points in image plane.
images = glob.glob('*.jpg')
graysize=cv2.cvtColor(cv2.imread(images[0]),cv2.COLOR_BGR2GRAY)
for fname in images:
print("Starting a new Image:"+fname)
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chess board corners
ret, corners = cv2.findChessboardCorners(gray, (8,6),None)
# If found, add object points, image points (after refining them)
if ret == True:
objpoints.append(objp)
corners2 = cv2.cornerSubPix(gray,corners,(11,11),(-1,-1),criteria)
imgpoints.append(corners2)
# Draw and display the corners
img = cv2.drawChessboardCorners(img, (8,6), corners2,ret)
cv2.imwrite(fname[:-4]+" chess board.jpg",img)
print("completed another image")
ret, mtx, dist, _, _ = cv2.calibrateCamera(objpoints, imgpoints, graysize.shape[::-1],None,None)
print(ret)
print(mtx)
print(dist)
if __name__ == "__main__":
global img
img = None
print("Casserole Vision Processing starting")
print("OpenCV Version: {}".format(cv2.__version__))
print("numpy Version: {}".format(np.__version__))
#cap = cv2.VideoCapture('0')
PATH=os.getcwd()
os.chdir(PATH+"//ChessSamples")
calibrator=CameraParamGetter()
calibrator.calibrateToChess()
print("It calibrated all the images. but you are still probably dumb")
# start NetworkTables
ntinst = NetworkTablesInstance.getDefault()
print("Setting up NetworkTables client for team {}".format(1736))
ntinst.startClientTeam(1736)
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 1920)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 1080)
capture.set(cv2.CAP_PROP_SATURATION,0.2)
| StarcoderdataPython |
1737725 | <reponame>BarracudaPff/code-golf-data-pythpn
try:
pass
except ImportError:
sys.exit("install SimpleWebSocketServer")
request_queue = queue.Queue()
class ElectrumWebSocket(WebSocket):
def handleMessage(self):
assert self.data[0:3] == "id:"
util.print_error("message received", self.data)
request_id = self.data[3:]
request_queue.put((self, request_id))
def handleConnected(self):
util.print_error("connected", self.address)
def handleClose(self):
util.print_error("closed", self.address)
class WsClientThread(util.DaemonThread):
def __init__(self, config, network):
util.DaemonThread.__init__(self)
self.network = network
self.config = config
self.response_queue = queue.Queue()
self.subscriptions = defaultdict(list)
def make_request(self, request_id):
rdir = self.config.get("requests_dir")
n = os.path.join(rdir, "req", request_id[0], request_id[1], request_id, request_id + ".json")
with open(n) as f:
s = f.read()
d = json.loads(s)
addr = d.get("address")
amount = d.get("amount")
return addr, amount
def reading_thread(self):
while self.is_running():
try:
ws, request_id = request_queue.get()
except queue.Empty:
continue
try:
addr, amount = self.make_request(request_id)
except:
continue
l = self.subscriptions.get(addr, [])
l.append((ws, amount))
self.subscriptions[addr] = l
h = self.network.addr_to_scripthash(addr)
self.network.send([("blockchain.scripthash.subscribe", [h])], self.response_queue.put)
def run(self):
threading.Thread(target=self.reading_thread).start()
while self.is_running():
try:
r = self.response_queue.get(timeout=0.1)
except queue.Empty:
continue
util.print_error("response", r)
method = r.get("method")
params = r.get("params")
result = r.get("result")
if result is None:
continue
if method == "blockchain.scripthash.subscribe":
self.network.send([("blockchain.scripthash.get_balance", params)], self.response_queue.put)
elif method == "blockchain.scripthash.get_balance":
h = params[0]
addr = self.network.h2addr.get(h, None)
if addr is None:
util.print_error("can't find address for scripthash: %s" % h)
l = self.subscriptions.get(addr, [])
for ws, amount in l:
if not ws.closed:
if sum(result.values()) >= amount:
ws.sendMessage("paid")
class WebSocketServer(threading.Thread):
def __init__(self, config, ns):
threading.Thread.__init__(self)
self.config = config
self.net_server = ns
self.daemon = True
def run(self):
t = WsClientThread(self.config, self.net_server)
t.start()
host = self.config.get("websocket_server")
port = self.config.get("websocket_port", 9999)
certfile = self.config.get("ssl_chain")
keyfile = self.config.get("ssl_privkey")
self.server = SimpleSSLWebSocketServer(host, port, ElectrumWebSocket, certfile, keyfile)
self.server.serveforever() | StarcoderdataPython |
4831201 | {
"includes": [
"../common.gypi"
],
"targets": [
{
"target_name": "libgdal_ogr_idrisi_frmt",
"type": "static_library",
"sources": [
"../gdal/ogr/ogrsf_frmts/idrisi/ogridrisidatasource.cpp",
"../gdal/ogr/ogrsf_frmts/idrisi/ogridrisilayer.cpp",
"../gdal/ogr/ogrsf_frmts/idrisi/ogridrisidriver.cpp"
],
"include_dirs": [
"../gdal/ogr/ogrsf_frmts/idrisi",
"../gdal/frmts/idrisi"
]
}
]
}
| StarcoderdataPython |
4829611 | """Tasks related to the Tamr auxiliary service DF-connect"""
from . import client
from . import jdbc_info
__all__ = ["client", "jdbc_info"]
| StarcoderdataPython |
60435 | from flask import Flask
from flask import request
from flask import Response
from handling import Handler
import json
app = Flask(__name__)
INDEX_NAME = 'contacts'
PORT = 9200
handler = Handler(INDEX_NAME, port = PORT, wipe_index = True)
@app.route('/contact', methods=['GET','POST'])
def contact_without_name():
if request.method == 'POST':
if handler.create_contact(request.json):
return Response('{"result": "created"}',
status = 201,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
else:
res = handler.list_contacts(request.args)
if res:
return Response('{"data":' + json.dumps(res) + '}',
status = 200,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
@app.route('/contact/<name>', methods=['GET', 'PUT', 'DELETE'])
def contact_with_name(name):
if request.method == 'GET':
res = handler.list_a_contact(name)
if res:
return Response('{"data":' + json.dumps(res) + '}',
status = 200,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
elif request.method == 'PUT':
if handler.update_contact(request.json):
return Response('{"result": "updated"}',
status = 200,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
else:
if handler.delete_contact(name):
return Response('{"result": "deleted"}',
status = 200,
mimetype = 'application/json')
else:
return Response('{"result": "failure"}',
status = 400,
mimetype = 'application/json')
| StarcoderdataPython |
3320750 | from . import _simplecoremidi as cfuncs
class MIDIInput(object):
def __init__(self, input_name=None):
if not input_name:
self._input = None
else:
self._input = cfuncs.find_input(input_name)
def recv(self):
return cfuncs.recv_midi_from_input(self._input)
@staticmethod
def enumerate():
return cfuncs.enumerate_inputs()
class MIDIOutput(object):
def __init__(self, output_name=None):
if not output_name:
self._output = None
else:
self._output = cfuncs.find_output(output_name)
def send(self, midi_data):
assert isinstance(midi_data, tuple) or isinstance(midi_data, list)
return cfuncs.send_midi_to_output(self._output, midi_data)
@staticmethod
def enumerate():
return cfuncs.enumerate_outputs()
class MIDISource(object):
def __init__(self, source_name=None):
if not source_name:
source_name = "unnamed source"
self._source = cfuncs.create_source(source_name)
def send(self, midi_data):
assert isinstance(midi_data, tuple) or isinstance(midi_data, list)
return cfuncs.send_midi(self._source, midi_data)
class MIDIDestination(object):
def __init__(self, dest_name=None):
if not dest_name:
dest_name = "unnamed destination"
self._dest = cfuncs.create_destination(dest_name)
def recv(self):
return cfuncs.recv_midi(self._dest)
_global_midi_source = None
def _get_global_midi_source():
global _global_midi_source
if _global_midi_source is None:
_global_midi_source = MIDISource("simple core midi source")
return _global_midi_source
_global_midi_dest = None
def _get_global_midi_dest():
global _global_midi_dest
if _global_midi_dest is None:
_global_midi_dest = MIDIDestination("simple core midi destination")
return _global_midi_dest
def send_midi(midi_data):
return _get_global_midi_source().send(midi_data)
def recv_midi():
return _get_global_midi_dest().recv()
| StarcoderdataPython |
1771374 | <reponame>azatoth/telepresence
"""
Test environment variable being set.
This module will indicate success it will exit with code 113.
"""
import os
import sys
from traceback import print_exception
def handle_error(type, value, traceback):
print_exception(type, value, traceback, file=sys.stderr)
raise SystemExit(3)
def check_custom_env():
assert os.environ["MYENV"] == "hello"
assert os.environ["EXAMPLE_ENVFROM"] == "foobar"
assert os.environ["EX_MULTI_LINE"] == \
"first line (no newline before, newline after)\n" + \
"second line (newline before and after)\n"
def main():
# make sure exceptions cause exit:
sys.excepthook = handle_error
check_custom_env()
# Exit with code indicating success:
sys.exit(113)
if __name__ == '__main__':
main()
| StarcoderdataPython |
1765026 | <filename>run.py<gh_stars>1-10
#!/usr/bin/python
"""
Top level script. Calls other functions that generate datasets that this script then creates in HDX.
"""
import logging
from os.path import expanduser, join
from hdx.facades.simple import facade
from hdx.api.configuration import Configuration
from hdx.utilities.downloader import Download
from hdx.utilities.path import progress_storing_folder, wheretostart_tempdir_batch
from faostat import download_indicatorsets, generate_dataset_and_showcase, get_countries
logger = logging.getLogger(__name__)
lookup = "hdx-scraper-faostat"
def main():
"""Generate dataset and create it in HDX"""
filelist_url = Configuration.read()["filelist_url"]
countrygroup_url = Configuration.read()["countrygroup_url"]
indicatorsetnames = Configuration.read()["indicatorsetnames"]
showcase_base_url = Configuration.read()["showcase_base_url"]
with Download() as downloader:
with wheretostart_tempdir_batch(lookup) as info:
folder = info["folder"]
batch = info["batch"]
indicatorsets = download_indicatorsets(
filelist_url, indicatorsetnames, downloader, folder
)
logger.info(
f"Number of indicator types to upload: {len(indicatorsetnames)}"
)
countries, countrymapping = get_countries(countrygroup_url, downloader)
logger.info(f"Number of countries to upload: {len(countries)}")
for info, country in progress_storing_folder(info, countries, "iso3"):
for indicatorsetname in indicatorsets:
(
dataset,
showcase,
bites_disabled,
qc_indicators,
) = generate_dataset_and_showcase(
indicatorsetname,
indicatorsets,
country,
countrymapping,
showcase_base_url,
filelist_url,
downloader,
info["folder"],
)
if dataset:
dataset.update_from_yaml()
dataset.generate_resource_view(
-1, bites_disabled=bites_disabled, indicators=qc_indicators
)
dataset.create_in_hdx(
remove_additional_resources=True,
hxl_update=False,
updated_by_script="HDX Scraper: FAOStat",
batch=batch,
)
showcase.create_in_hdx()
showcase.add_dataset(dataset)
if __name__ == "__main__":
facade(
main,
user_agent_config_yaml=join(expanduser("~"), ".useragents.yml"),
user_agent_lookup=lookup,
project_config_yaml=join("config", "project_configuration.yml"),
)
| StarcoderdataPython |
1740879 | # encoding=utf-8
import os
from flask import Flask
from flask import render_template
from flask import jsonify
from flask import request
import re
from Subject import Subject
from constants import SubjectType, Gender, VisitorPurpose
from flask.ext.sqlalchemy import SQLAlchemy
from flask_script import Shell, Manager
basedir = os.path.abspath(os.path.dirname(__file__))
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = \
'sqlite:///' + os.path.join(basedir, 'data.sqlite')
app.config['SQLALCHEMY_COMMIT_ON_TEARDOWN'] = True
db = SQLAlchemy(app)
manager = Manager(app)
def make_shell_context():
return dict(app=app, db=db, User=User, Role=Role)
manager.add_command("shell", Shell(make_context=make_shell_context))
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
def __repr__(self):
return '<Role %r>' % self.name
users = db.relationship('User', backref='role', lazy='dynamic')
#
# subject_type = db.Column(db.SmallInteger, nullable=False, default=SubjectType.TYPE_EMPLOYEE)
# create_time = db.Column(db.Integer, server_default='0')
#
# email = db.Column(db.String(64), default='')
# password_hash = db.Column(db.String(256), nullable=False)
# password_reseted = db.Column(db.Boolean, default=False)
#
# real_name = db.Column(db.String(64), nullable=False, index=True)
# pinyin = db.Column(db.String(128), nullable=False)
# gender = db.Column(db.SmallInteger, default=Gender.MALE)
# phone = db.Column(db.String(20), default='')
# avatar = db.Column(db.String(256), default='')
# department = db.Column(db.String(256), default='')
# department_pinyin = db.Column(db.String(512), default='')
# title = db.Column(db.String(64), default='')
# description = db.Column(db.String(128), default='')
# mobile_os = db.Column(db.Integer)
# birthday = db.Column(db.Date)
# entry_date = db.Column(db.Date)
#
# job_number = db.Column(db.String(64), default='')
# remark = db.Column(db.String(128), default='')
#
# # visitor info
# purpose = db.Column(db.Integer, default=VisitorPurpose.OTHER)
# interviewee = db.Column(db.String(20), default='')
# interviewee_pinyin = db.Column(db.String(128), default='')
# come_from = db.Column(db.String(128), default='')
# visited = db.Column(db.Boolean, default=False)
# visit_notify = db.Column(db.Boolean, default=False)
#
# start_time = db.Column(db.Integer)
# end_time = db.Column(db.Integer)
# events = db.relationship('Event', backref='subject', lazy='dynamic', cascade='all')
# photos = db.relationship('Photo', backref='subject', lazy='select', cascade='all')
# attendances = db.relationship('Attendance', backref='subject', lazy='dynamic', cascade='all')
# all_attendances = db.relationship('Attendance', lazy='select', cascade='all')
# visitors = db.relationship('Subject', backref=db.backref('inviter', remote_side=id),
# lazy='dynamic', cascade='all')
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
username = db.Column(db.String(64), nullable=False, index=True, unique=True)
def __repr__(self):
return '<User %r>' % self.username
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
#
# password_hash = db.Column(db.String(256), nullable=False)
# password_reseted = db.Column(db.Boolean, default=False)
# reset_token = db.Column(db.String(64), default='')
#
# permission = db.Column(db.String(32), server_default='[]')
#
# avatar = db.Column(db.String(256))
# remark = db.Column(db.String(256))
def success_result(data={}, page={}):
ret = {
'code': 0,
'data': data,
'page': page
}
return jsonify(ret)
app = Flask(__name__)
def get(self, url, **kwargs):
"""Sends a GET request. Returns :class:`Response` object.
:param url: URL for the new :class:`Request` object.
:param \*\*kwargs: Optional arguments that ``request`` takes.
"""
kwargs.setdefault('allow_redirects', True)
return self.request('GET', url, **kwargs)
def json(self):
"""If the mimetype is `application/json` this will contain the
parsed JSON data. Otherwise this will be `None`.
The :meth:`get_json` method should be used instead.
"""
# XXX: deprecate property
return self.get_json()
def ProcessMail(inputMail):
isMatch = bool(
re.match(r"^[a-zA-Z](([a-zA-Z0-9]*\.[a-zA-Z0-9]*)|[a-zA-Z0-9]*)[a-zA-Z]@([a-z0-9A-Z]+\.)+[a-zA-Z]{2,}$",
inputMail, re.VERBOSE))
if isMatch:
print ("邮箱注册成功。")
else:
print ("邮箱注册失败。")
return isMatch
@app.route('/', methods=['GET', 'POST'])
def home():
return render_template('home.html')
@app.route('/auth/login', methods=['POST'])
def signin():
username = request.form['username']
password = request.form['password']
tasks = {
"avatar": "",
"verify": "false",
"password_reseted": "false",
"company_id": 1,
"id": 2,
"company": {
"door_weekdays": [1, 2, 3, 4, 5, 6],
"id": 1,
"door_range": [[8, 35], [21, 55]],
"organization": "旷视科技",
"data_version": 1474959615,
"attendance_weekdays": [1, 2, 3, 4, 5],
"attendance_on": "true",
"feature_version": 3,
"logo": "https://o7rv4xhdy.qnssl.com/@/static/upload/logo/2016-08-23/5135a156badc8d2a11dafe38cb6b22c7095538b0.jpg",
"scenario": "企业办公",
"remark": "",
"deployment": 1,
"create_time": 0,
"name": "megvii旷视-Megvii",
"consigner": "葛聪颖"
},
"permission": [
],
"role_id": 2,
"username": "<EMAIL>",
"organization_id": "null"
}
if username == 'admin<EMAIL>' and password == '<PASSWORD>':
return success_result(data=tasks, page={})
return render_template('form.html', message='Bad username or password', username=username)
@app.route('/auth/login', methods=['GET'])
def signin_form():
return render_template('form.html')
@app.route('/mobile-admin/subjects/list', methods=['GET'])
def subjects():
print '------>', request
print request.args
newTasks = Subject.get_json()
params = request.args
category = params.get('', 'employee')
order = params.get('', 'name')
count = 6
total = 6
current = 1
size = 6
if category == 'employee' and order == 'name':
return success_result(
data=newTasks, page={"size": size, "current": current, "count": count, "total": total})
return render_template('form.html', message='Bad username or password')
if __name__ == '__main__':
app.run(host='169.254.215.161')
manager.run()
db.init_app(app)
db.create_all()
db.session.commit()
print app.config['SQLALCHEMY_DATABASE_URI']
# def _make_context():
# return dict(db=db)
# manage.add_command("shell", Shell(make_context=_make_context))
| StarcoderdataPython |
1706649 |
def getRuleName(rule):
adj, color, _ = rule.strip().split(' ')
return '{} {}'.format(adj, color)
def generateNode(rule):
pRule, cRules = list(map(lambda x: x.strip(), rule.split('contain')))
parent = getRuleName(pRule)
cList = cRules.split(', ')
children = []
for c in cList:
if c != 'no other bags.':
count, adj, color, _ = c.strip().split(' ')
count = int(count)
id = '{} {}'.format(adj, color)
children.extend([id] * count)
return parent, children
def readFile(path='./Day7/input.txt'):
f = open(path, 'r')
nodes = {}
for l in f:
parent, children = generateNode(l)
nodes[parent] = children
return nodes
def findBags(id='shiny gold'):
nodes = readFile()
positive = set([])
visited = set([])
for node in nodes:
dfs(node, nodes, id, positive, visited)
return len(positive) - 1 # shiny gold bag don't count :(
def dfs(node, nodes, id, positive, visited):
if node in positive:
return True
if node in visited:
return False
visited.add(node)
if node == id:
positive.add(node)
return True
children = nodes[node]
for c in children:
found = dfs(c, nodes, id, positive, visited)
if found:
positive.add(node)
return True
return False
def countTree(id='shiny gold'):
nodes = readFile()
visited = {}
return getNodeCount(id, nodes, visited) - 1
def getNodeCount(id, nodes, visited):
if id in visited:
return visited[id]
children = nodes[id]
count = 1
for c in children:
count += getNodeCount(c, nodes, visited)
visited[id] = count
return visited[id]
if __name__ == "__main__":
# print(findBags())
print(countTree())
| StarcoderdataPython |
3205819 | <reponame>Schevo/schevo
"""Field metadata changing tests."""
# Copyright (c) 2001-2009 ElevenCraft Inc.
# See LICENSE for details.
from schevo.test import CreatesSchema
from schevo.field import not_expensive, not_fget, not_hidden
class BaseFieldMetadataChanged(CreatesSchema):
body = '''
class Foo(E.Entity):
bar = f.string()
class _Update(T.Update):
def h_bar(self):
self.f.bar.readonly = True
def _setup(self):
self.f.bar.required = False
'''
def test_metadata_not_changed_initially(self):
tx = db.Foo.t.create()
assert tx.f.bar.metadata_changed == False
tx.bar = 'baz'
foo = db.execute(tx)
tx = foo.t.update()
assert tx.f.bar.required == False
assert tx.f.bar.metadata_changed == False
def test_metadata_changed_during_change_handler(self):
tx = db.Foo.t.create(bar='baz')
foo = db.execute(tx)
tx = foo.t.update()
assert tx.f.bar.metadata_changed == False
tx.f.bar.readonly = False
assert tx.f.bar.metadata_changed == True
tx.f.bar.reset_metadata_changed()
assert tx.f.bar.metadata_changed == False
tx.bar = 'frob'
assert tx.f.bar.readonly == True
assert tx.f.bar.metadata_changed == True
tx.f.bar.reset_metadata_changed()
assert tx.f.bar.metadata_changed == False
# class TestFieldMetadataChanged1(BaseFieldMetadataChanged):
# include = True
# format = 1
class TestFieldMetadataChanged2(BaseFieldMetadataChanged):
include = True
format = 2
| StarcoderdataPython |
4823480 | from numpy import random
map_x_size = 10
map_list = [0 for i in range(map_x_size)]
class Character:
def __init__(self,level, HP, atk, speed, attack_speed, critical_hit_rate, evasion_rate, exp):
print("Character Production")
self.result = 0
self.level = level
self.HP = HP
self.atk = atk
self.speed = speed
self.attack_speed = attack_speed
self.critical_hit_rate = critical_hit_rate
self.evasion_rate = evasion_rate
self.exp = exp
def print_info(self):
print("--------------------")
print("level: ",self.level)
print("HP: ",self.HP)
print("ATK: ",self.atk)
print("Speed: ",self.speed)
print("Attak_Speed: ",self.attack_speed)
print("Critical_hit_rate: ",self.critical_hit_rate)
print("Evasion_rate: ",self.evasion_rate)
print("EXP: ",self.exp)
print("--------------------")
#캐릭터 레벨업 시 스펙업
def level_up(self):
print("level up!!")
self.level += 1
self.HP += 10
self.atk += 1
if self.level%10 == 0:
self.speed += 1
self.attack_speed += 1
self.evasion_rate += 1
def charactor_dps(self):
dps = self.atk * self.attack_speed
return dps
def charactor_exp(self):
return self.exp
def exp_up(self):
self.exp += 10
class Monster:
def __init__(self, HP, atk, attack_speed, evasion_rate, exp):
self.result = 0
self.HP = HP
self.atk = atk
self.attack_speed = attack_speed
self.evasion_rate = evasion_rate
self.exp = exp
def print_info(self):
print("\n--------------------\nMonster apperance!!")
print("--------------------")
print("Monster")
print("HP: ",self.HP)
print("ATK: ",self.atk)
print("Attak_Speed: ",self.attack_speed)
print("Evasion_rate: ",self.evasion_rate)
print("EXP: ",self.exp)
print("--------------------")
def monster_exp(self):
return self.exp
def monster_dps(self):
dps = self.atk * self.attack_speed
return dps
#레벨 당 exp총량
def level_per_exp():
edic={1:10,2:10}
for i in range(3, 100):
if i % 10 == 0:
edic[i] = edic.get(i-1)*2
else:
edic[i] = edic.get(i-1)+edic.get(i-2)
return edic
#몬스터 처치 옵션
def Monster_dead(hero, monster_exp):
print("Kill the monster!!")
hero.exp += monster_exp
hero.print_info()
#캐릭터 맵 움직이는 거 도식화, monster_fiting과 연결
def map_schematic():
map_list[-2] = 'M'
for i in range(map_x_size):
if map_list[i] == 'M':
monster.print_info()
monster_fiting()
map_list[i] = "C"
map_list[i-1] = 0
print(map_list)
if map_list[-1] == "C":
print("--------------------\nGame Over\n--------------------")
#확률 옵션
def percentage_condition(percent):
true_condition = random.binomial(n=1, p= percent/100, size=1)
if true_condition[0] == 1:
return True
#회피 옵션
def evasion_condition(evasion_rate):
if percentage_condition(evasion_rate):
print("--------------------\nMiss!\n--------------------")
#공격시 옵션(치명타, 회피)
def attak_condition():
if percentage_condition(monster.evasion_rate):
print("--------------------\nMiss!\n--------------------")
else:
if percentage_condition(hero.critical_hit_rate):
print("--------------------\nCritical Hitting!\n--------------------")
monster.HP -= hero.charactor_dps()*2
print("Monster HP: ", monster.HP)
else:
print("--------------------\nHitting!\n--------------------")
monster.HP -= hero.charactor_dps()
print("Monster HP: ", monster.HP)
#몬스터 만날시 싸움 옵션, Monster_dead 연결
def monster_fiting():
while True:
attak_condition()
if monster.HP <= 0:
Monster_dead(hero, monster.exp)
if hero.exp >= level_exp[hero.level]:
hero.level_up()
hero.print_info()
break
def time_calculator():
time = map_x_size/hero.speed + monster.HP/hero.charactor_dps()
print("Total Time: ",time)
if __name__ =="__main__":
print("--------------------\nGame Start\n--------------------")
level_exp = level_per_exp()
#level, hp, atk, speed, attack_speed, criticl_hit_rate, evasion_rate, exp
hero = Character(1,100,10,1,1,50,1,0)
hero.print_info()
#hp, atk, attack_speed, evasion_rate, exp
monster = Monster(300, 1, 1, 1, 10)
map_schematic()
time_calculator() | StarcoderdataPython |
190694 | <gh_stars>0
'''class Pessoa(object):
def __init__(self, nome, idade, peso):
self.nome = nome
self.idade = idade
self.peso = peso
def andar(self):
print('anda')
pessoa1 = Pessoa("Juliana", 23, 75)
pessoa2 = Pessoa("Carlos", 39, 72)
print(pessoa1.nome)
pessoa1.andar()
def fatorial(n):
if n == 0:
return 0
if n == 1:
return 1
if n > 1:
return fatorial(n-1) * n
print(fatorial(5))''' | StarcoderdataPython |
1607286 | <reponame>anil-allipilli/Sponsor<filename>accounts/migrations/0005_sponser_mysponsees.py
# Generated by Django 3.0 on 2020-12-02 19:56
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('accounts', '0004_auto_20201111_1945'),
]
operations = [
migrations.AddField(
model_name='sponser',
name='mysponsees',
field=models.ManyToManyField(to='accounts.Sponsee'),
),
]
| StarcoderdataPython |
105725 | <gh_stars>0
#!/usr/bin/env python3
import sys
with open(sys.argv[1]) as file:
for line in (line.rstrip() for line in file):
values = line.split()
history = set()
duplicates = []
for i in reversed(values):
if i not in history:
history.add(i)
elif i not in duplicates:
duplicates.insert(0, i)
print(' '.join(duplicates))
| StarcoderdataPython |
180858 | <reponame>wangrui1121/huaweicloud-sdk-python
# Copyright 2018 Huawei Technologies Co.,Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may not use
# this file except in compliance with the License. You may obtain a copy of the
# License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software distributed
# under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
# CONDITIONS OF ANY KIND, either express or implied. See the License for the
# specific language governing permissions and limitations under the License.
import testtools
from openstack.vpc.v1 import quota
IDENTIFIER = 'IDENTIFIER'
EXAMPLE = {"type": "vpc", "used": 4, "quota": 150, "min": 0}
class TestQuota(testtools.TestCase):
def test_basic(self):
sot = quota.Quota()
self.assertEqual('quota', sot.resource_key)
self.assertEqual('quotas.resources', sot.resources_key)
self.assertEqual('/quotas', sot.base_path)
self.assertEqual('vpc', sot.service.service_type)
self.assertFalse(sot.allow_create)
self.assertFalse(sot.allow_get)
self.assertFalse(sot.allow_update)
self.assertFalse(sot.allow_delete)
self.assertTrue(sot.allow_list)
self.assertDictEqual(
{'limit': 'limit', 'marker': 'marker', 'type': 'type'},
sot._query_mapping._mapping)
def test_make_it(self):
sot = quota.Quota(**EXAMPLE)
self.assertEqual(EXAMPLE['type'], sot.type)
self.assertEqual(EXAMPLE['used'], sot.used)
self.assertEqual(EXAMPLE['quota'], sot.quota)
self.assertEqual(EXAMPLE['min'], sot.min)
| StarcoderdataPython |
56556 | """
Intialize the Pygate application
"""
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
app.config.from_object("config")
db = SQLAlchemy(app)
from pygate import routes, models
| StarcoderdataPython |
125243 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('products', '0002_product_value'),
('orders', '0003_auto_20141225_2344'),
]
operations = [
migrations.CreateModel(
name='OrderItemProxy',
fields=[
],
options={
'verbose_name': 'Order Item',
'proxy': True,
'verbose_name_plural': 'Order Items',
},
bases=('products.product',),
),
]
| StarcoderdataPython |
4827933 | from Crypto.Cipher import AES
import binascii
keystring = '00000000000000000000000000000000'
iostoken = '107005ed3f3845aea7696d838df8389385b88224e4696b2e78be02cfc83d4d770143db63ee66b0cdff9f69917680151e'
key = bytes.fromhex(keystring)
cipher = AES.new(key, AES.MODE_ECB)
token = cipher.decrypt(bytes.fromhex(iostoken[:64]))
print(token) | StarcoderdataPython |
1790100 | """Module that contains the enumerates for the image controls.
Enumerates:
Rule.
"""
from enum import Enum
class Control(Enum):
"""Enumerate of controls"""
pixel_size = 'pixel_size'
bands_len = 'bands_len'
dig_level = 'dig_level'
rad_balance = 'rad_balance'
srid = 'srid'
nodata = 'nodata'
aall = 'aall'
| StarcoderdataPython |
3304896 | <gh_stars>1-10
# @l2g 1898 python3
# [1898] Maximum Number of Removable Characters
# Difficulty: Medium
# https://leetcode.com/problems/maximum-number-of-removable-characters
#
# You are given two strings s and p where p is a subsequence of s.
# You are also given a distinct 0-indexed integer array removable containing a subset of indices of s (s is also 0-indexed).
# You want to choose an integer k (0 <= k <= removable.length) such that,
# after removing k characters from s using the first k indices in removable,
# p is still a subsequence of s.More formally,
# you will mark the character at s[removable[i]] for each 0 <= i < k,
# then remove all marked characters and check if p is still a subsequence.
# Return the maximum k you can choose such that p is still a subsequence of s after the removals.
# A subsequence of a string is a new string generated from the original string with some characters (can be none) deleted without changing the relative order of the remaining characters.
#
# Example 1:
#
# Input: s = "abcacb", p = "ab", removable = [3,1,0]
# Output: 2
# Explanation: After removing the characters at indices 3 and 1, "abcacb" becomes "accb".
# "ab" is a subsequence of "accb".
# If we remove the characters at indices 3,1,and 0,"abcacb" becomes "ccb",
# and "ab" is no longer a subsequence.
# Hence, the maximum k is 2.
#
# Example 2:
#
# Input: s = "abcbddddd", p = "abcd", removable = [3,2,1,4,5,6]
# Output: 1
# Explanation: After removing the character at index 3, "abcbddddd" becomes "abcddddd".
# "abcd" is a subsequence of "abcddddd".
#
# Example 3:
#
# Input: s = "abcab", p = "abc", removable = [0,1,2,3,4]
# Output: 0
# Explanation: If you remove the first index in the array removable, "abc" is no longer a subsequence.
#
#
# Constraints:
#
# 1 <= p.length <= s.length <= 10^5
# 0 <= removable.length < s.length
# 0 <= removable[i] < s.length
# p is a subsequence of s.
# s and p both consist of lowercase English letters.
# The elements in removable are distinct.
#
#
from typing import List
class Solution:
def maximumRemovals(self, s: str, p: str, removable: List[int]) -> int:
def is_subsequence(k: int) -> bool:
p_idx = s_idx = 0
unavailable = set(removable[:k])
while s_idx < len(s):
if s_idx in unavailable:
s_idx += 1
continue
if p[p_idx] == s[s_idx]:
p_idx += 1
if p_idx == len(p):
return True
s_idx += 1
return False
left = 0
right = len(removable)
while left < right:
middle = (left + right) // 2
if is_subsequence(middle + 1):
left = middle + 1
else:
right = middle
return left
if __name__ == "__main__":
import os
import pytest
pytest.main([os.path.join("tests", "test_1898.py")])
| StarcoderdataPython |
1633831 | import SimpleITK as sitk
import numpy as np
import os
import paths
import csv
import math
from scipy.io import loadmat
from skimage.measure import regionprops, marching_cubes_classic, mesh_surface_area
def divide_hcp(connectivity_matrix, hcp_connectivity):
''' divide the connectivity matrix by the hcp matrix'''
assert(connectivity_matrix.shape == hcp_connectivity.shape)
output_matrix = np.zeros(connectivity_matrix.shape)
for i in range(connectivity_matrix.shape[0]):
for j in range(connectivity_matrix.shape[1]):
if hcp_connectivity[i,j] != 0:
output_matrix[i,j] = connectivity_matrix[i,j]/hcp_connectivity[i,j]
return output_matrix
def get_hcp_connectivity_matrice(hcp_connectivity_matrices_path = paths.hcp_connectivity_matrices_path):
'''Get the pass-type and end-type connectivity matrices from HCP1021 subjects'''
end_matrix_path = os.path.join(hcp_connectivity_matrices_path, 'HCP1021.1mm.fib.gz.aal.count.end.connectivity.mat')
pass_matrix_path = os.path.join(hcp_connectivity_matrices_path, 'HCP1021.1mm.fib.gz.aal.count.pass.connectivity.mat')
end_obj = loadmat(end_matrix_path)
end_matrix = end_obj['connectivity']
pass_obj = loadmat(pass_matrix_path)
pass_matrix = pass_obj['connectivity']
return pass_matrix, end_matrix
def ReadImage(path):
''' This code returns the numpy nd array for a MR image at path'''
return sitk.GetArrayFromImage(sitk.ReadImage(path)).astype(np.float32)
def find_list(subject_id, list):
''' this is used to find the stroke lesion for a subject name '''
files = [file for file in list if subject_id in file]
return files[0]
def find_3d_surface(mask, voxel_spacing=(1.0,1.0,1.0)):
''' find the surface for a 3D object '''
verts, faces = marching_cubes_classic(volume=mask, spacing=voxel_spacing)
return mesh_surface_area(verts, faces)
def find_3d_roundness(mask):
''' find the roundess of a 3D object '''
mask_region_props = regionprops(mask.astype(int))
mask_area = mask_region_props[0].area
mask_equivDiameter = (6.0*mask_area/math.pi)**(1.0/3.0)
mask_major_axis_length = mask_region_props[0].major_axis_length
return mask_equivDiameter**2/mask_major_axis_length**2
def reshape_by_padding_upper_coords(image, new_shape, pad_value=None):
''' reshape the 3d matrix '''
shape = tuple(list(image.shape))
new_shape = tuple(np.max(np.concatenate((shape, new_shape)).reshape((2,len(shape))), axis=0))
if pad_value is None:
if len(shape)==2:
pad_value = image[0,0]
elif len(shape)==3:
pad_value = image[0, 0, 0]
else:
raise ValueError("Image must be either 2 or 3 dimensional")
res = np.ones(list(new_shape), dtype=image.dtype) * pad_value
if len(shape) == 2:
res[0:0+int(shape[0]), 0:0+int(shape[1])] = image
elif len(shape) == 3:
res[0:0+int(shape[0]), 0:0+int(shape[1]), 0:0+int(shape[2])] = image
return res
# ======================= Tools for connectivity matrix ============================================= #
def threshold_connectivity_matrix(connectivity_matrix, threshold=0.01):
''' threshold the connectiivty matrix in order to remove the noise'''
thresholded_connectivity_matrix= np.copy(connectivity_matrix)
thresholded_connectivity_matrix[connectivity_matrix <= threshold*np.amax(connectivity_matrix)] = 0.0
return thresholded_connectivity_matrix
def weight_conversion(W):
''' convert to the normalized version and binary version'''
W_bin = np.copy(W)
W_bin[W!=0]=1
W_nrm = np.copy(W)
W_nrm = W_nrm/np.amax(np.absolute(W))
return W_nrm, W_bin
def get_lesion_weights(stroke_mni_path):
''' get the weight vector(workshop paper)'''
aal_path = os.path.join(paths.dsi_studio_path, 'atlas', 'aal.nii.gz')
aal_nda = ReadImage(aal_path)
aal_182_218_182 = reshape_by_padding_upper_coords(aal_nda, (182,218,182), 0)
stroke_mni_nda = ReadImage(stroke_mni_path)
weights = np.zeros(int(np.amax(aal_182_218_182)), dtype=float)
for bp_number in range(int(np.amax(aal_182_218_182))):
mask = np.zeros(aal_182_218_182.shape, aal_182_218_182.dtype)
mask[aal_182_218_182==(bp_number+1)]=1
bp_size = float(np.count_nonzero(mask))
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_size = float(np.count_nonzero(stroke_in_bp))
weights[bp_number] = stroke_in_bp_size/bp_size
#weights[bp_number] = stroke_in_bp_size
return weights
def get_modified_lesion_weights(stroke_mni_path):
''' get the modified weight vector'''
aal_path = os.path.join(paths.dsi_studio_path, 'atlas', 'aal.nii.gz')
aal_nda = ReadImage(aal_path)
aal_182_218_182 = reshape_by_padding_upper_coords(aal_nda, (182,218,182), 0)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_volume = float(np.count_nonzero(stroke_mni_nda))
weights = np.zeros(int(np.amax(aal_182_218_182)), dtype=float)
for bp_number in range(int(np.amax(aal_182_218_182))):
mask = np.zeros(aal_182_218_182.shape, aal_182_218_182.dtype)
mask[aal_182_218_182==(bp_number+1)]=1
#bp_size = float(np.count_nonzero(mask))
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_volume_in_bp = float(np.count_nonzero(stroke_in_bp))
#weights[bp_number] = 1.0 + stroke_volume_in_bp/stroke_volume
weights[bp_number] = stroke_volume_in_bp/stroke_volume
#remaining_volume = stroke_volume - np.sum(weights)
#print(remaining_volume)
return weights
def get_train_dataset():
'''Give you the training dataset'''
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
gt_subject_paths.sort()
# The CSV file for train dataset
train_mRS_file = "ISLES2017_Training.csv"
train_mRS_path = os.path.join(paths.isles2017_dir, train_mRS_file)
assert(os.path.isfile(train_mRS_path))
# Read CSV file for Train dataset
train_dataset = {}
with open(train_mRS_path, 'rt') as csv_file:
csv_reader = csv.reader(csv_file)
for line in csv_reader:
if line[2] == '90' or line[2] == '88' or line[2] == '96' or line[2] == '97': # 90 days
subject_name = line[0]
gt_file = [file for file in gt_subject_paths if '/'+subject_name+'/' in file]
if gt_file:
train_dataset[subject_name]={}
train_dataset[subject_name]['mRS'] = line[1]
train_dataset[line[0]]['TICI'] = line[3]
train_dataset[line[0]]['TSS'] = line[4]
train_dataset[line[0]]['TTT'] = line[5]
train_dataset[line[0]]['ID'] = gt_file[0][-10:-4]
train_dataset[line[0]]['tracts'] = line[6]
return train_dataset
# Get the mRS for training subject from training_1 to training_48
def extract_gt_mRS():
'''extract the mRS for training subjects from training_1 to training_48'''
mRS_gt = np.zeros((40, ))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
mRS_gt[idx] = train_dataset[subject_name]['mRS']
return mRS_gt
def extract_tract_features():
''' extract number of tracts'''
train_dataset = get_train_dataset()
tracts = np.zeros((40, 1))
for idx, subject_name in enumerate(train_dataset.keys()):
tracts[idx] = train_dataset[subject_name]['tracts']
return tracts, ['tracts']
# Extract the volume of stroke in MNI152 space
def extract_volumetric_features():
# The ground truth lesions in MNI space
volumetric_list = ["volume"]
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
# Volumetric Features
volumetric_features = np.zeros((40,1))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
#volumetric features
stroke_mni_nda = ReadImage(stroke_mni_path)
volumetric_features[idx] = np.count_nonzero(stroke_mni_nda)
return volumetric_features, volumetric_list
def extract_spatial_features():
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
spatial_list = ["centroid_z", "centroid_y", "centroid_x"]
# Volumetric Features
spatial_features = np.zeros((40,3))
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_regions = regionprops(stroke_mni_nda.astype(int))
stroke_centroid = stroke_regions[0].centroid
spatial_features[idx, :] = stroke_centroid
return spatial_features, spatial_list
def extract_morphological_features():
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
assert(len(stroke_mni_paths) == 43)
morphological_list = ["major", "minor", "major/minor", "surface", "solidity", "roundness"]
# Volumetric Features
morphological_features = np.zeros((40,6), dtype=np.float32)
train_dataset = get_train_dataset()
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
stroke_regions = regionprops(stroke_mni_nda.astype(int))
stroke_major_axis_length = stroke_regions[0].major_axis_length
stroke_minor_axis_length = stroke_regions[0].minor_axis_length
stroke_surface = find_3d_surface(stroke_mni_nda.astype(int))
stroke_roundness = find_3d_roundness(stroke_mni_nda.astype(int))
morphological_features[idx, :] = stroke_major_axis_length, stroke_minor_axis_length, stroke_major_axis_length/stroke_minor_axis_length, stroke_surface, stroke_regions[0].solidity, stroke_roundness
return morphological_features, morphological_list
def extract_tractographic_features(weight_type, aal_regions=116):
# The ground truth lesion in subject space
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
# New connectivity matrices location
connectivity_train_dir = os.path.join(paths.dsi_studio_path, 'connectivity', 'gt_stroke')
# pass type locations
connectivity_pass_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'pass' in name and name.endswith('.mat')]
connectivity_pass_files.sort()
# end type locations
connectivity_end_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'end' in name and name.endswith('.mat')]
connectivity_end_files.sort()
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
tractographic_list = ["tract_aal_"+str(i) for i in range(1, aal_regions+1)]
assert(len(connectivity_pass_files) == len(connectivity_end_files) == len(stroke_mni_paths) == 43)
train_dataset = get_train_dataset()
# Tractographic Features
W_dsi_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_nrm_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_bin_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_dsi_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_nrm_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_bin_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
connectivity_pass_file = find_list(subject_id, connectivity_pass_files)
connectivity_pass_obj = loadmat(connectivity_pass_file)
thresholded_connectivity_pass = threshold_connectivity_matrix(connectivity_pass_obj['connectivity'], 0)
W_nrm_pass, W_bin_pass = weight_conversion(thresholded_connectivity_pass)
connectivity_end_file = find_list(subject_id, connectivity_end_files)
connectivity_end_obj = loadmat(connectivity_end_file)
thresholded_connectivity_end = threshold_connectivity_matrix(connectivity_end_obj['connectivity'], 0)
W_nrm_end, W_bin_end = weight_conversion(thresholded_connectivity_end)
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
# =================================== Weight Vector ========================================== #
# Get the lesion weights
if 'ori' in weight_type:
lesion_weights = get_lesion_weights(stroke_mni_path)
# Get the modified lesion weights
if 'mod' in weight_type:
lesion_weights = get_modified_lesion_weights(stroke_mni_path)
# No weight
if 'one' in weight_type:
lesion_weights = np.ones((1,aal_regions), dtype=np.float32)
# weighted connectivity histogram
W_dsi_pass_histogram_features[idx, :] = np.multiply(np.sum(thresholded_connectivity_pass, axis=0), lesion_weights)
W_nrm_pass_histogram_features[idx, :] = np.multiply(np.sum(W_nrm_pass, axis=0), lesion_weights)
W_bin_pass_histogram_features[idx, :] = np.multiply(np.sum(W_bin_pass, axis=0), lesion_weights)
W_dsi_end_histogram_features[idx, :] = np.multiply(np.sum(thresholded_connectivity_end, axis=0), lesion_weights)
W_nrm_end_histogram_features[idx, :] = np.multiply(np.sum(W_nrm_end, axis=0), lesion_weights)
W_bin_end_histogram_features[idx, :] = np.multiply(np.sum(W_bin_end, axis=0), lesion_weights)
return W_dsi_pass_histogram_features, W_nrm_pass_histogram_features, W_bin_pass_histogram_features, W_dsi_end_histogram_features, W_nrm_end_histogram_features, W_bin_end_histogram_features, tractographic_list
def extract_volumetric_spatial_features(atlas_name):
'''extract volumetric spatial features'''
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
train_dataset = get_train_dataset()
atlas_path = os.path.join(paths.dsi_studio_path, 'atlas', atlas_name+'.nii.gz')
atlas_nda = ReadImage(atlas_path)
if atlas_name == 'aal':
atlas_nda = reshape_by_padding_upper_coords(atlas_nda, (182,218,182), 0)
volumetric_spatial_features = np.zeros((40, int(np.amax(atlas_nda))+1), dtype=float)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
whole_stroke_volume = float(np.count_nonzero(stroke_mni_nda))
for bp_number in range(1, int(np.amax(atlas_nda)+1)):
mask = np.zeros(atlas_nda.shape, atlas_nda.dtype)
mask[atlas_nda==(bp_number)]=1
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_volume = np.count_nonzero(stroke_in_bp)
volumetric_spatial_features[idx, bp_number] = stroke_in_bp_volume
total_stroke_volume_bp = np.sum(volumetric_spatial_features[idx, :])
volumetric_spatial_features[idx, 0] = whole_stroke_volume - total_stroke_volume_bp
volumetric_spatial_list =['volume_'+atlas_name+'_'+str(i) for i in range(0, int(np.amax(atlas_nda)+1))]
return volumetric_spatial_features, volumetric_spatial_list
def extract_modified_volumetric_spatial_features(atlas_name):
'''extract volumetric spatial features considering the total volume of the stroke lesion'''
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
train_dataset = get_train_dataset()
atlas_path = os.path.join(paths.dsi_studio_path, 'atlas', atlas_name+'.nii.gz')
atlas_nda = ReadImage(atlas_path)
if atlas_name == 'aal':
atlas_nda = reshape_by_padding_upper_coords(atlas_nda, (182,218,182), 0)
modified_volumetric_spatial_features = np.zeros((40, int(np.amax(atlas_nda))), dtype=float)
for idx, subject_name in enumerate(train_dataset.keys()):
subject_id = train_dataset[subject_name]['ID']
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
stroke_mni_nda = ReadImage(stroke_mni_path)
whole_stroke_volume = float(np.count_nonzero(stroke_mni_nda))
for bp_number in range(1, int(np.amax(atlas_nda))+1):
mask = np.zeros(atlas_nda.shape, atlas_nda.dtype)
mask[atlas_nda==(bp_number)]=1
stroke_in_bp = np.multiply(mask, stroke_mni_nda)
stroke_in_bp_volume = float(np.count_nonzero(stroke_in_bp))
modified_volumetric_spatial_features[idx, bp_number-1] = stroke_in_bp_volume / whole_stroke_volume
volumetric_spatial_list =['volume_'+atlas_name+'_'+str(i) for i in range(1, int(np.amax(atlas_nda))+1)]
assert((len(volumetric_spatial_list))==modified_volumetric_spatial_features.shape[1])
return modified_volumetric_spatial_features, volumetric_spatial_list
def extract_new_tractographic_features(weight_type, aal_regions=116):
# The ground truth lesion in subject space
gt_subject_paths = [os.path.join(root, name) for root, dirs, files in os.walk(paths.isles2017_training_dir) for name in files if '.OT.' in name and '__MACOSX' not in root and name.endswith('.nii')]
# New connectivity matrices location
connectivity_train_dir = os.path.join(paths.dsi_studio_path, 'connectivity', 'gt_stroke')
# pass type locations
connectivity_pass_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'pass' in name and name.endswith('.mat')]
connectivity_pass_files.sort()
# end type locations
connectivity_end_files = [os.path.join(root, name) for root, dirs, files in os.walk(connectivity_train_dir) for name in files if 'count' in name and 'ncount' not in name and 'connectivity' in name and 'end' in name and name.endswith('.mat')]
connectivity_end_files.sort()
# The ground truth lesions in MNI space
stroke_mni_dir = os.path.join(paths.dsi_studio_path, 'gt_stroke')
stroke_mni_paths = [os.path.join(root, name) for root, dirs, files in os.walk(stroke_mni_dir) for name in files if name.endswith('nii.gz')]
stroke_mni_paths.sort()
tractographic_list = ["tract_aal_"+str(i) for i in range(1, aal_regions+1)]
assert(len(connectivity_pass_files) == len(connectivity_end_files) == len(stroke_mni_paths) == 43)
train_dataset = get_train_dataset()
# Tractographic Features
W_pass_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
W_end_histogram_features = np.zeros((40, aal_regions), dtype=np.float32)
for idx, subject_name in enumerate(train_dataset.keys()):
HCP_pass, HCP_end = get_hcp_connectivity_matrice()
subject_id = train_dataset[subject_name]['ID']
connectivity_pass_file = find_list(subject_id, connectivity_pass_files)
connectivity_pass_obj = loadmat(connectivity_pass_file)
connectivity_pass_matrix = connectivity_pass_obj['connectivity']
#normalized_pass_matrix = divide_hcp(connectivity_pass_matrix, HCP_pass)
connectivity_end_file = find_list(subject_id, connectivity_end_files)
connectivity_end_obj = loadmat(connectivity_end_file)
connectivity_end_matrix = connectivity_end_obj['connectivity']
#normalized_end_matrix = divide_hcp(connectivity_pass_matrix, HCP_end)
stroke_mni_path = find_list(subject_id, stroke_mni_paths)
# =================================== Weight Vector ========================================== #
# Get the lesion weights
if 'ori' in weight_type:
lesion_weights = get_lesion_weights(stroke_mni_path)
# Get the modified lesion weights
if 'mod' in weight_type:
lesion_weights = get_modified_lesion_weights(stroke_mni_path)
# No weight
if 'one' in weight_type:
lesion_weights = np.ones((1,aal_regions), dtype=np.float32)
normalized_pass_matrix = np.divide(np.sum(connectivity_pass_matrix, axis=0), np.sum(HCP_pass, axis=0))
normalized_end_matrix = np.divide(np.sum(connectivity_end_matrix, axis=0), np.sum(HCP_end, axis=0))
# weighted connectivity histogram
W_pass_histogram_features[idx, :] = np.multiply(normalized_pass_matrix, lesion_weights)
W_end_histogram_features[idx, :] = np.multiply(normalized_end_matrix, lesion_weights)
return W_pass_histogram_features, W_end_histogram_features, tractographic_list
| StarcoderdataPython |
1681516 | <gh_stars>1000+
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""
Test MindData Profiling Analyzer Support
"""
import csv
import json
import os
import numpy as np
import mindspore.common.dtype as mstype
import mindspore.dataset as ds
import mindspore.dataset.transforms.c_transforms as C
from mindspore.profiler.parser.minddata_analyzer import MinddataProfilingAnalyzer
class TestMinddataProfilingAnalyzer():
"""
Test the MinddataProfilingAnalyzer class
"""
def setup_class(self):
"""
Run once for the class
"""
# Define filenames and path used for the MinddataProfilingAnalyzer tests. Use device_id=7.
self._PIPELINE_FILE = "./pipeline_profiling_7.json"
self._CPU_UTIL_FILE = "./minddata_cpu_utilization_7.json"
self._DATASET_ITERATOR_FILE = "./dataset_iterator_profiling_7.txt"
self._SUMMARY_JSON_FILE = "./minddata_pipeline_summary_7.json"
self._SUMMARY_CSV_FILE = "./minddata_pipeline_summary_7.csv"
self._ANALYZE_FILE_PATH = "./"
# This is the set of keys for success case
self._EXPECTED_SUMMARY_KEYS_SUCCESS = \
['avg_cpu_pct', 'avg_cpu_pct_per_worker', 'children_ids', 'num_workers', 'op_ids', 'op_names',
'parent_id', 'per_batch_time', 'per_pipeline_time', 'per_push_queue_time', 'pipeline_ops',
'queue_average_size', 'queue_empty_freq_pct', 'queue_utilization_pct']
def setup_method(self):
"""
Run before each test function.
"""
# Confirm MindData Profiling files do not yet exist
assert os.path.exists(self._PIPELINE_FILE) is False
assert os.path.exists(self._CPU_UTIL_FILE) is False
assert os.path.exists(self._DATASET_ITERATOR_FILE) is False
# Confirm MindData Profiling analyze summary files do not yet exist
assert os.path.exists(self._SUMMARY_JSON_FILE) is False
assert os.path.exists(self._SUMMARY_CSV_FILE) is False
# Set the MindData Profiling environment variables
os.environ['PROFILING_MODE'] = 'true'
os.environ['MINDDATA_PROFILING_DIR'] = '.'
os.environ['RANK_ID'] = '7'
def teardown_method(self):
"""
Run after each test function.
"""
# Delete MindData profiling files generated from the test.
os.remove(self._PIPELINE_FILE)
os.remove(self._CPU_UTIL_FILE)
os.remove(self._DATASET_ITERATOR_FILE)
# Delete MindData profiling analyze summary files generated from the test.
os.remove(self._SUMMARY_JSON_FILE)
os.remove(self._SUMMARY_CSV_FILE)
# Disable MindData Profiling environment variables
del os.environ['PROFILING_MODE']
del os.environ['MINDDATA_PROFILING_DIR']
del os.environ['RANK_ID']
def get_csv_result(self, file_pathname):
"""
Get result from the CSV file.
Args:
file_pathname (str): The CSV file pathname.
Returns:
list[list], the parsed CSV information.
"""
result = []
with open(file_pathname, 'r') as csvfile:
csv_reader = csv.reader(csvfile)
for row in csv_reader:
result.append(row)
return result
def verify_md_summary(self, md_summary_dict, EXPECTED_SUMMARY_KEYS):
"""
Verify the content of the 3 variations of the MindData Profiling analyze summary output.
"""
# Confirm MindData Profiling analyze summary files are created
assert os.path.exists(self._SUMMARY_JSON_FILE) is True
assert os.path.exists(self._SUMMARY_CSV_FILE) is True
# Build a list of the sorted returned keys
summary_returned_keys = list(md_summary_dict.keys())
summary_returned_keys.sort()
# 1. Confirm expected keys are in returned keys
for k in EXPECTED_SUMMARY_KEYS:
assert k in summary_returned_keys
# Read summary JSON file
with open(self._SUMMARY_JSON_FILE) as f:
summary_json_data = json.load(f)
# Build a list of the sorted JSON keys
summary_json_keys = list(summary_json_data.keys())
summary_json_keys.sort()
# 2a. Confirm expected keys are in JSON file keys
for k in EXPECTED_SUMMARY_KEYS:
assert k in summary_json_keys
# 2b. Confirm returned dictionary keys are identical to JSON file keys
np.testing.assert_array_equal(summary_returned_keys, summary_json_keys)
# Read summary CSV file
summary_csv_data = self.get_csv_result(self._SUMMARY_CSV_FILE)
# Build a list of the sorted CSV keys from the first column in the CSV file
summary_csv_keys = []
for x in summary_csv_data:
summary_csv_keys.append(x[0])
summary_csv_keys.sort()
# 3a. Confirm expected keys are in the first column of the CSV file
for k in EXPECTED_SUMMARY_KEYS:
assert k in summary_csv_keys
# 3b. Confirm returned dictionary keys are identical to CSV file first column keys
np.testing.assert_array_equal(summary_returned_keys, summary_csv_keys)
def mysource(self):
"""Source for data values"""
for i in range(8000):
yield (np.array([i]),)
def test_analyze_basic(self):
"""
Test MindData profiling analyze summary files exist with basic pipeline.
Also test basic content (subset of keys and values) from the returned summary result.
"""
# Create this basic and common linear pipeline
# Generator -> Map -> Batch -> Repeat -> EpochCtrl
data1 = ds.GeneratorDataset(self.mysource, ["col1"])
type_cast_op = C.TypeCast(mstype.int32)
data1 = data1.map(operations=type_cast_op, input_columns="col1")
data1 = data1.batch(16)
data1 = data1.repeat(2)
num_iter = 0
# Note: If create_tuple_iterator() is called with num_epochs>1, then EpochCtrlOp is added to the pipeline
for _ in data1.create_dict_iterator(num_epochs=2):
num_iter = num_iter + 1
# Confirm number of rows returned
assert num_iter == 1000
# Confirm MindData Profiling files are created
assert os.path.exists(self._PIPELINE_FILE) is True
assert os.path.exists(self._CPU_UTIL_FILE) is True
assert os.path.exists(self._DATASET_ITERATOR_FILE) is True
# Call MindData Analyzer for generated MindData profiling files to generate MindData pipeline summary result
md_analyzer = MinddataProfilingAnalyzer(self._ANALYZE_FILE_PATH, 7, self._ANALYZE_FILE_PATH)
md_summary_dict = md_analyzer.analyze()
# Verify MindData Profiling Analyze Summary output
# Note: MindData Analyzer returns the result in 3 formats:
# 1. returned dictionary
# 2. JSON file
# 3. CSV file
self.verify_md_summary(md_summary_dict, self._EXPECTED_SUMMARY_KEYS_SUCCESS)
# 4. Verify non-variant values or number of values in the tested pipeline for certain keys
# of the returned dictionary
# Note: Values of num_workers are not tested since default may change in the future
# Note: Values related to queue metrics are not tested since they may vary on different execution environments
assert md_summary_dict["pipeline_ops"] == ["EpochCtrl(id=0)", "Repeat(id=1)", "Batch(id=2)", "Map(id=3)",
"Generator(id=4)"]
assert md_summary_dict["op_names"] == ["EpochCtrl", "Repeat", "Batch", "Map", "Generator"]
assert md_summary_dict["op_ids"] == [0, 1, 2, 3, 4]
assert len(md_summary_dict["num_workers"]) == 5
assert len(md_summary_dict["queue_average_size"]) == 5
assert len(md_summary_dict["queue_utilization_pct"]) == 5
assert len(md_summary_dict["queue_empty_freq_pct"]) == 5
assert md_summary_dict["children_ids"] == [[1], [2], [3], [4], []]
assert md_summary_dict["parent_id"] == [-1, 0, 1, 2, 3]
assert len(md_summary_dict["avg_cpu_pct"]) == 5
def test_analyze_sequential_pipelines_invalid(self):
"""
Test invalid scenario in which MinddataProfilingAnalyzer is called for two sequential pipelines.
"""
# Create the pipeline
# Generator -> Map -> Batch -> EpochCtrl
data1 = ds.GeneratorDataset(self.mysource, ["col1"])
type_cast_op = C.TypeCast(mstype.int32)
data1 = data1.map(operations=type_cast_op, input_columns="col1")
data1 = data1.batch(64)
# Phase 1 - For the pipeline, call create_tuple_iterator with num_epochs>1
# Note: This pipeline has 4 ops: Generator -> Map -> Batch -> EpochCtrl
num_iter = 0
# Note: If create_tuple_iterator() is called with num_epochs>1, then EpochCtrlOp is added to the pipeline
for _ in data1.create_dict_iterator(num_epochs=2):
num_iter = num_iter + 1
# Confirm number of rows returned
assert num_iter == 125
# Confirm MindData Profiling files are created
assert os.path.exists(self._PIPELINE_FILE) is True
assert os.path.exists(self._CPU_UTIL_FILE) is True
assert os.path.exists(self._DATASET_ITERATOR_FILE) is True
# Phase 2 - For the pipeline, call create_tuple_iterator with num_epochs=1
# Note: This pipeline has 3 ops: Generator -> Map -> Batch
num_iter = 0
# Note: If create_tuple_iterator() is called with num_epochs=1, then EpochCtrlOp is NOT added to the pipeline
for _ in data1.create_dict_iterator(num_epochs=1):
num_iter = num_iter + 1
# Confirm number of rows returned
assert num_iter == 125
# Confirm MindData Profiling files are created
# Note: There is an MD bug in which which the pipeline file is not recreated;
# it still has 4 ops instead of 3 ops
assert os.path.exists(self._PIPELINE_FILE) is True
assert os.path.exists(self._CPU_UTIL_FILE) is True
assert os.path.exists(self._DATASET_ITERATOR_FILE) is True
# Call MindData Analyzer for generated MindData profiling files to generate MindData pipeline summary result
md_analyzer = MinddataProfilingAnalyzer(self._ANALYZE_FILE_PATH, 7, self._ANALYZE_FILE_PATH)
md_summary_dict = md_analyzer.analyze()
# Verify MindData Profiling Analyze Summary output
self.verify_md_summary(md_summary_dict, self._EXPECTED_SUMMARY_KEYS_SUCCESS)
# Confirm pipeline data contains info for 3 ops
assert md_summary_dict["pipeline_ops"] == ["Batch(id=0)", "Map(id=1)", "Generator(id=2)"]
# Verify CPU util data contains info for 3 ops
assert len(md_summary_dict["avg_cpu_pct"]) == 3
| StarcoderdataPython |
145779 | import structlog
log = structlog.getLogger(__name__)
class AuthManager(object):
'''Manager responsible for authentication.
Manager uses API instance ``api`` for basic auth operations and
provides additional logic on top.
:param `opentaxii.auth.api.OpenTAXIIAuthAPI` api:
instance of Auth API class
'''
def __init__(self, api):
self.api = api
def authenticate(self, username, password):
'''Authenticate a user.
:param str username: username
:param str password: password
:return: auth token
:rtype: string
'''
return self.api.authenticate(username, password)
def get_account(self, token):
'''Get account for auth token.
:param str token: auth token
:return: an account entity
:rtype: `opentaxii.entities.Account`
'''
return self.api.get_account(token)
def create_account(self, username, password):
'''Create an account.
NOTE: Additional method that is only used in the helper scripts
shipped with OpenTAXII.
'''
account = self.api.create_account(username, password)
log.info("account.created", username=account.username)
return account
| StarcoderdataPython |
4832637 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
from collections import defaultdict
import sys
import pytest
from tvm import tir
from tvm.script import tir as T
from tvm.tir.schedule.testing import verify_trace_roundtrip
# pylint: disable=no-member,invalid-name,unused-variable
@T.prim_func
def elementwise(a: T.handle, b: T.handle) -> None:
A = T.match_buffer(a, (128, 257, 1470))
B = T.match_buffer(b, (128, 257, 1470))
for i, j, k in T.grid(128, 257, 1470):
with T.block("B"):
vi, vj, vk = T.axis.remap("SSS", [i, j, k])
B[vi, vj, vk] = A[vi, vj, vk] * 2.0
@T.prim_func
def tiled_conv2d_with_padding(
inputs: T.Buffer[(1, 224, 224, 3), "float32"],
weight: T.Buffer[(7, 7, 3, 64), "float32"],
conv2d_nhwc: T.Buffer[(1, 112, 112, 64), "float32"],
) -> None:
PadInput = T.alloc_buffer([1, 230, 230, 3], dtype="float32")
for i0, i1, i2, i3 in T.grid(1, 230, 230, 3):
with T.block("PadInput"):
i0_1, i1_1, i2_1, i3_1 = T.axis.remap("SSSS", [i0, i1, i2, i3])
T.reads(inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1])
T.writes(PadInput[i0_1, i1_1, i2_1, i3_1])
PadInput[i0_1, i1_1, i2_1, i3_1] = T.if_then_else(
3 <= i1_1 and i1_1 < 227 and 3 <= i2_1 and i2_1 < 227,
inputs[i0_1, i1_1 - 3, i2_1 - 3, i3_1],
T.float32(0),
dtype="float32",
)
for (
i0_0,
i1_0,
i2_0,
i3_0,
i0_1_1,
i1_1_1,
i2_1_1,
i3_1_1,
i4_0,
i5_0,
i6_0,
i0_2,
i1_2,
i2_2,
i3_2,
i4_1,
i5_1,
i6_1,
i0_3,
i1_3,
i2_3,
i3_3,
) in T.grid(1, 1, 4, 1, 1, 2, 4, 1, 7, 7, 1, 1, 1, 1, 1, 1, 1, 3, 1, 56, 7, 64):
with T.block("conv2d_nhwc"):
n = T.axis.spatial(1, 0)
h = T.axis.spatial(112, i1_1_1 * 56 + i1_3)
w = T.axis.spatial(112, i2_0 * 28 + i2_1_1 * 7 + i2_3)
co, rh, rw, rc = T.axis.remap("SRRR", [i3_3, i4_0, i5_0, i6_1])
T.reads(
conv2d_nhwc[n, h, w, co],
PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc],
weight[rh, rw, rc, co],
)
T.writes(conv2d_nhwc[n, h, w, co])
with T.init():
conv2d_nhwc[n, h, w, co] = T.float32(0)
conv2d_nhwc[n, h, w, co] = (
conv2d_nhwc[n, h, w, co]
+ PadInput[n, h * 2 + rh, w * 2 + rw, co // 64 * 3 + rc] * weight[rh, rw, rc, co]
)
# pylint: enable=no-member,invalid-name,unused-variable
def test_sample_categorical():
"""Test sample categorical sampling function"""
n = 1000
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
counter = defaultdict(int)
candidates = [5, 2, 7, 1]
probs = [0.15, 0.55, 0.05, 0.25]
for _ in range(n):
v = sch.get(sch.sample_categorical(candidates, probs))
counter[v] += 1
for i, prob in enumerate(probs):
assert (prob - 0.07) * n <= counter[candidates[i]] <= (prob + 0.07) * n
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_categorical_copy():
"""Check the random variable sampling results after schedule copy"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [1, 2, 3, 4]
probs = [0.1, 0.2, 0.3, 0.4]
rv_decisions = []
for _ in range(n):
rv = sch.sample_categorical(candidates, probs) # pylint: disable=invalid-name
rv_decisions.append((rv, sch.get(rv)))
sch_copy = sch.copy()
for rv, decision in rv_decisions: # pylint: disable=invalid-name
decision_copy = sch_copy.get(rv)
assert int(decision) == int(decision_copy)
def test_sample_categorical_serialize():
"""Check the random variable sampling results after schedule serialization"""
n = 100
sch = tir.Schedule(elementwise, seed=42, debug_mask="all")
candidates = [5, 6, 7, 8]
probs = [0.23, 0.19, 0.37, 0.21]
decisions = []
for _ in range(n):
rv = sch.get(sch.sample_categorical(candidates, probs)) # pylint: disable=invalid-name
decisions.append(rv)
new_sch = verify_trace_roundtrip(sch, mod=elementwise)
for i, new_inst in enumerate(new_sch.trace.insts):
assert decisions[i] == candidates[new_sch.trace.decisions[new_inst].value]
def test_sample_perfect_tile_power_of_two():
sch = tir.Schedule(elementwise, debug_mask="all")
i, _, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 128
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_prime():
sch = tir.Schedule(elementwise, debug_mask="all")
_, i, _ = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 257
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_perfect_tile_composite():
sch = tir.Schedule(elementwise, debug_mask="all")
_, _, i = sch.get_loops(sch.get_block("B"))
factors = sch.sample_perfect_tile(i, n=4)
factors = [sch.get(i) for i in factors]
prod = factors[0] * factors[1] * factors[2] * factors[3]
assert prod == 1470
verify_trace_roundtrip(sch, mod=elementwise)
def test_sample_compute_location():
n = 100
sch = tir.Schedule(tiled_conv2d_with_padding, seed=42, debug_mask="all")
pad_input = sch.get_block("PadInput")
decision_dict = dict()
for _ in range(n):
_ = sch.sample_compute_location(pad_input) # pylint: disable=invalid-name
decision = sch.trace.decisions[sch.trace.insts[-1]]
decision_dict[decision] = decision_dict[decision] + 1 if decision in decision_dict else 1
n_candidates = 8
expected_rate = 1.0 / n_candidates
for _, cnt in decision_dict.items():
assert (expected_rate - 0.03) * n <= cnt <= (expected_rate + 0.03) * n
if __name__ == "__main__":
sys.exit(pytest.main([__file__] + sys.argv[1:]))
| StarcoderdataPython |
3257796 | from pyopencga.rest_clients._parent_rest_clients import _ParentRestClient
class Tool(_ParentRestClient):
"""
This class contains methods for the Analysis - Tool webservices
"""
def __init__(self, configuration, session_id=None, login_handler=None, *args, **kwargs):
_category = 'analysis/tool'
super(Tool, self).__init__(configuration, _category, session_id, login_handler, *args, **kwargs)
def execute(self, data, **options):
"""
Execute an analysis using an internal or external tool
URL: /{apiVersion}/analysis/tool/execute
"""
return self._post('execute', data=data, **options)
| StarcoderdataPython |
1615806 | from contextlib import contextmanager
from copy import deepcopy
from functools import partial
import sys
import warnings
import numpy as np
from numpy.testing import assert_equal
import pytest
from numpy.testing import assert_allclose
from expyfun import ExperimentController, visual, _experiment_controller
from expyfun._experiment_controller import _get_dev_db
from expyfun._utils import (_TempDir, fake_button_press, _check_skip_backend,
fake_mouse_click, requires_opengl21,
_wait_secs as wait_secs, known_config_types,
_new_pyglet)
from expyfun._sound_controllers._sound_controller import _SOUND_CARD_KEYS
from expyfun.stimuli import get_tdt_rates
std_args = ['test'] # experiment name
std_kwargs = dict(output_dir=None, full_screen=False, window_size=(8, 8),
participant='foo', session='01', stim_db=0.0, noise_db=0.0,
verbose=True, version='dev')
def dummy_print(string):
"""Print."""
print(string)
@pytest.mark.parametrize('ws', [(2, 1), (1, 1)])
def test_unit_conversions(hide_window, ws):
"""Test unit conversions."""
kwargs = deepcopy(std_kwargs)
kwargs['stim_fs'] = 44100
kwargs['window_size'] = ws
with ExperimentController(*std_args, **kwargs) as ec:
verts = np.random.rand(2, 4)
for to in ['norm', 'pix', 'deg', 'cm']:
for fro in ['norm', 'pix', 'deg', 'cm']:
v2 = ec._convert_units(verts, fro, to)
v2 = ec._convert_units(v2, to, fro)
assert_allclose(verts, v2)
# test that degrees yield equiv. pixels in both directions
verts = np.ones((2, 1))
v0 = ec._convert_units(verts, 'deg', 'pix')
verts = np.zeros((2, 1))
v1 = ec._convert_units(verts, 'deg', 'pix')
v2 = v0 - v1 # must check deviation from zero position
assert_allclose(v2[0], v2[1])
pytest.raises(ValueError, ec._convert_units, verts, 'deg', 'nothing')
pytest.raises(RuntimeError, ec._convert_units, verts[0], 'deg', 'pix')
def test_validate_audio(hide_window):
"""Test that validate_audio can pass through samples."""
with ExperimentController(*std_args, suppress_resamp=True,
**std_kwargs) as ec:
ec.set_stim_db(_get_dev_db(ec.audio_type) - 40) # 0.01 RMS
assert ec._stim_scaler == 1.
for shape in ((1000,), (1, 1000), (2, 1000)):
samples_in = np.zeros(shape)
samples_out = ec._validate_audio(samples_in)
assert samples_out.shape == (1000, 2)
assert samples_out.dtype == np.float32
assert samples_out is not samples_in
for order in 'CF':
samples_in = np.zeros((2, 1000), dtype=np.float32, order=order)
samples_out = ec._validate_audio(samples_in)
assert samples_out.shape == samples_in.shape[::-1]
assert samples_out.dtype == np.float32
# ensure that we have not bade a copy, just a view
assert samples_out.base is samples_in
def test_data_line(hide_window):
"""Test writing of data lines."""
entries = [['foo'],
['bar', 'bar\tbar'],
['bar2', r'bar\tbar'],
['fb', None, -0.5]]
# this is what should be written to the file for each one
goal_vals = ['None', 'bar\\tbar', 'bar\\\\tbar', 'None']
assert_equal(len(entries), len(goal_vals))
temp_dir = _TempDir()
with std_kwargs_changed(output_dir=temp_dir):
with ExperimentController(*std_args, stim_fs=44100,
**std_kwargs) as ec:
for ent in entries:
ec.write_data_line(*ent)
fname = ec._data_file.name
with open(fname) as fid:
lines = fid.readlines()
# check the header
assert_equal(len(lines), len(entries) + 4) # header, colnames, flip, stop
assert_equal(lines[0][0], '#') # first line is a comment
for x in ['timestamp', 'event', 'value']: # second line is col header
assert (x in lines[1])
assert ('flip' in lines[2]) # ec.__init__ ends with a flip
assert ('stop' in lines[-1]) # last line is stop (from __exit__)
outs = lines[1].strip().split('\t')
assert (all(l1 == l2 for l1, l2 in zip(outs, ['timestamp',
'event', 'value'])))
# check the entries
ts = []
for line, ent, gv in zip(lines[3:], entries, goal_vals):
outs = line.strip().split('\t')
assert_equal(len(outs), 3)
# check timestamping
if len(ent) == 3 and ent[2] is not None:
assert_equal(outs[0], str(ent[2]))
else:
ts.append(float(outs[0]))
# check events
assert_equal(outs[1], ent[0])
# check values
assert_equal(outs[2], gv)
# make sure we got monotonically increasing timestamps
ts = np.array(ts)
assert (np.all(ts[1:] >= ts[:-1]))
@contextmanager
def std_kwargs_changed(**kwargs):
"""Use modified std_kwargs."""
old_vals = dict()
for key, val in kwargs.items():
old_vals[key] = std_kwargs[key]
std_kwargs[key] = val
try:
yield
finally:
for key, val in old_vals.items():
std_kwargs[key] = val
def test_degenerate():
"""Test degenerate EC conditions."""
pytest.raises(TypeError, ExperimentController, *std_args,
audio_controller=1, stim_fs=44100, **std_kwargs)
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller='foo', stim_fs=44100, **std_kwargs)
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller=dict(TYPE='foo'), stim_fs=44100,
**std_kwargs)
# monitor, etc.
pytest.raises(TypeError, ExperimentController, *std_args,
monitor='foo', **std_kwargs)
pytest.raises(KeyError, ExperimentController, *std_args,
monitor=dict(), **std_kwargs)
pytest.raises(ValueError, ExperimentController, *std_args,
response_device='foo', **std_kwargs)
with std_kwargs_changed(window_size=10.):
pytest.raises(ValueError, ExperimentController, *std_args,
**std_kwargs)
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller='sound_card', response_device='tdt',
**std_kwargs)
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller='pyglet', response_device='keyboard',
trigger_controller='sound_card', **std_kwargs)
# test type checking for 'session'
with std_kwargs_changed(session=1):
pytest.raises(TypeError, ExperimentController, *std_args,
audio_controller='sound_card', stim_fs=44100,
**std_kwargs)
# test value checking for trigger controller
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller='sound_card', trigger_controller='foo',
stim_fs=44100, **std_kwargs)
# test value checking for RMS checker
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller='sound_card', check_rms=True, stim_fs=44100,
**std_kwargs)
@pytest.mark.timeout(20)
def test_ec(ac, hide_window, monkeypatch):
"""Test EC methods."""
if ac == 'tdt':
rd, tc, fs = 'tdt', 'tdt', get_tdt_rates()['25k']
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller=dict(TYPE=ac, TDT_MODEL='foo'),
**std_kwargs)
else:
_check_skip_backend(ac)
rd, tc, fs = 'keyboard', 'dummy', 44100
for suppress in (True, False):
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
with ExperimentController(
*std_args, audio_controller=ac, response_device=rd,
trigger_controller=tc, stim_fs=100.,
suppress_resamp=suppress, **std_kwargs) as ec:
pass
w = [ww for ww in w if 'TDT is in dummy mode' in str(ww.message)]
assert len(w) == (1 if ac == 'tdt' else 0)
SAFE_DELAY = 0.2
with ExperimentController(
*std_args, audio_controller=ac, response_device=rd,
trigger_controller=tc, stim_fs=fs, **std_kwargs) as ec:
assert (ec.participant == std_kwargs['participant'])
assert (ec.session == std_kwargs['session'])
assert (ec.exp_name == std_args[0])
stamp = ec.current_time
ec.write_data_line('hello')
ec.wait_until(stamp + 0.02)
ec.screen_prompt('test', 0.01, 0, None)
ec.screen_prompt('test', 0.01, 0, ['1'])
ec.screen_prompt(['test', 'ing'], 0.01, 0, ['1'])
ec.screen_prompt('test', 1e-3, click=True)
pytest.raises(ValueError, ec.screen_prompt, 'foo', np.inf, 0, [])
pytest.raises(TypeError, ec.screen_prompt, 3, 0.01, 0, None)
assert_equal(ec.wait_one_press(0.01), (None, None))
assert (ec.wait_one_press(0.01, timestamp=False) is None)
assert_equal(ec.wait_for_presses(0.01), [])
assert_equal(ec.wait_for_presses(0.01, timestamp=False), [])
pytest.raises(ValueError, ec.get_presses)
ec.listen_presses()
assert_equal(ec.get_presses(), [])
assert_equal(ec.get_presses(kind='presses'), [])
pytest.raises(ValueError, ec.get_presses, kind='foo')
if rd == 'tdt':
# TDT does not have key release events, so should raise an
# exception if asked for them:
pytest.raises(RuntimeError, ec.get_presses, kind='releases')
pytest.raises(RuntimeError, ec.get_presses, kind='both')
else:
assert_equal(ec.get_presses(kind='both'), [])
assert_equal(ec.get_presses(kind='releases'), [])
ec.set_noise_db(0)
ec.set_stim_db(20)
# test buffer data handling
ec.set_rms_checking(None)
ec.load_buffer([0, 0, 0, 0, 0, 0])
ec.load_buffer([])
pytest.raises(ValueError, ec.load_buffer, [0, 2, 0, 0, 0, 0])
ec.load_buffer(np.zeros((100,)))
with pytest.raises(ValueError, match='100 did not match .* count 2'):
ec.load_buffer(np.zeros((100, 1)))
with pytest.raises(ValueError, match='100 did not match .* count 2'):
ec.load_buffer(np.zeros((100, 2)))
ec.load_buffer(np.zeros((1, 100)))
ec.load_buffer(np.zeros((2, 100)))
data = np.zeros(int(5e6), np.float32) # too long for TDT
if fs == get_tdt_rates()['25k']:
pytest.raises(RuntimeError, ec.load_buffer, data)
else:
ec.load_buffer(data)
ec.load_buffer(np.zeros(2))
del data
pytest.raises(ValueError, ec.stamp_triggers, 'foo')
pytest.raises(ValueError, ec.stamp_triggers, 0)
pytest.raises(ValueError, ec.stamp_triggers, 3)
pytest.raises(ValueError, ec.stamp_triggers, 1, check='foo')
print(ec._tc) # test __repr__
if tc == 'dummy':
assert_equal(ec._tc._trigger_list, [])
ec.stamp_triggers(3, check='int4')
ec.stamp_triggers(2)
ec.stamp_triggers([2, 4, 8])
if tc == 'dummy':
assert_equal(ec._tc._trigger_list, [3, 2, 2, 4, 8])
ec._tc._trigger_list = list()
pytest.raises(ValueError, ec.load_buffer, np.zeros((100, 3)))
pytest.raises(ValueError, ec.load_buffer, np.zeros((3, 100)))
pytest.raises(ValueError, ec.load_buffer, np.zeros((1, 1, 1)))
# test RMS checking
pytest.raises(ValueError, ec.set_rms_checking, 'foo')
# click: RMS 0.0135, should pass 'fullfile' and fail 'windowed'
click = np.zeros((int(ec.fs / 4),)) # 250 ms
click[len(click) // 2] = 1.
click[len(click) // 2 + 1] = -1.
# noise: RMS 0.03, should fail both 'fullfile' and 'windowed'
noise = np.random.normal(scale=0.03, size=(int(ec.fs / 4),))
ec.set_rms_checking(None)
ec.load_buffer(click) # should go unchecked
ec.load_buffer(noise) # should go unchecked
ec.set_rms_checking('wholefile')
ec.load_buffer(click) # should pass
with pytest.warns(UserWarning, match='exceeds stated'):
ec.load_buffer(noise)
ec.wait_secs(SAFE_DELAY)
ec.set_rms_checking('windowed')
with pytest.warns(UserWarning, match='exceeds stated'):
ec.load_buffer(click)
ec.wait_secs(SAFE_DELAY)
with pytest.warns(UserWarning, match='exceeds stated'):
ec.load_buffer(noise)
if ac != 'tdt': # too many samples there
monkeypatch.setattr(_experiment_controller, '_SLOW_LIMIT', 1)
with pytest.warns(UserWarning, match='samples is slow'):
ec.load_buffer(np.zeros(2, dtype=np.float32))
monkeypatch.setattr(_experiment_controller, '_SLOW_LIMIT', 1e7)
ec.stop()
ec.set_visible()
ec.set_visible(False)
ec.call_on_every_flip(partial(dummy_print, 'called start stimuli'))
ec.wait_secs(SAFE_DELAY)
# Note: we put some wait_secs in here because otherwise the delay in
# play start (e.g. for trigdel and onsetdel) can
# mess things up! So we probably eventually should add
# some safeguard against stopping too quickly after starting...
#
# First: identify_trial
#
noise = np.random.normal(scale=0.01, size=(int(ec.fs),))
ec.load_buffer(noise)
pytest.raises(RuntimeError, ec.start_stimulus) # order violation
assert (ec._playing is False)
if tc == 'dummy':
assert_equal(ec._tc._trigger_list, [])
ec.start_stimulus(start_of_trial=False) # should work
if tc == 'dummy':
assert_equal(ec._tc._trigger_list, [1])
ec.wait_secs(SAFE_DELAY)
assert (ec._playing is True)
pytest.raises(RuntimeError, ec.trial_ok) # order violation
ec.stop()
assert (ec._playing is False)
# only binary for TTL
pytest.raises(KeyError, ec.identify_trial, ec_id='foo') # need ttl_id
pytest.raises(TypeError, ec.identify_trial, ec_id='foo', ttl_id='bar')
pytest.raises(ValueError, ec.identify_trial, ec_id='foo', ttl_id=[2])
assert (ec._playing is False)
if tc == 'dummy':
ec._tc._trigger_list = list()
ec.identify_trial(ec_id='foo', ttl_id=[0, 1])
assert (ec._playing is False)
#
# Second: start_stimuli
#
pytest.raises(RuntimeError, ec.identify_trial, ec_id='foo', ttl_id=[0])
assert (ec._playing is False)
pytest.raises(RuntimeError, ec.trial_ok) # order violation
assert (ec._playing is False)
ec.start_stimulus(flip=False, when=-1)
if tc == 'dummy':
assert_equal(ec._tc._trigger_list, [4, 8, 1])
if ac != 'tdt':
# dummy TDT version won't do this check properly, as
# ec._ac._playing -> GetTagVal('playing') always gives False
pytest.raises(RuntimeError, ec.play) # already played, must stop
ec.wait_secs(SAFE_DELAY)
ec.stop()
assert (ec._playing is False)
#
# Third: trial_ok
#
pytest.raises(RuntimeError, ec.start_stimulus) # order violation
pytest.raises(RuntimeError, ec.identify_trial) # order violation
ec.trial_ok()
# double-check
pytest.raises(RuntimeError, ec.start_stimulus) # order violation
ec.start_stimulus(start_of_trial=False) # should work
pytest.raises(RuntimeError, ec.trial_ok) # order violation
ec.wait_secs(SAFE_DELAY)
ec.stop()
assert (ec._playing is False)
ec.flip(-np.inf)
assert (ec._playing is False)
ec.estimate_screen_fs()
assert (ec._playing is False)
ec.play()
ec.wait_secs(SAFE_DELAY)
assert (ec._playing is True)
ec.call_on_every_flip(None)
# something funny with the ring buffer in testing on OSX
if sys.platform != 'darwin':
ec.call_on_next_flip(ec.start_noise())
ec.flip()
ec.wait_secs(SAFE_DELAY)
ec.stop_noise()
ec.stop()
assert (ec._playing is False)
ec.stop_noise()
ec.wait_secs(SAFE_DELAY)
ec.start_stimulus(start_of_trial=False)
ec.stop()
ec.start_stimulus(start_of_trial=False)
ec.get_mouse_position()
ec.listen_clicks()
ec.get_clicks()
ec.toggle_cursor(False)
ec.toggle_cursor(True, True)
ec.move_mouse_to((0, 0)) # center of the window
ec.wait_secs(0.001)
print(ec.id_types)
print(ec.stim_db)
print(ec.noise_db)
print(ec.on_next_flip_functions)
print(ec.on_every_flip_functions)
print(ec.window)
# we need to monkey-patch for old Pyglet
try:
from PIL import Image
Image.fromstring
except AttributeError:
Image.fromstring = None
data = ec.screenshot()
# HiDPI
sizes = [tuple(std_kwargs['window_size']),
tuple(np.array(std_kwargs['window_size']) * 2)]
assert data.shape[:2] in sizes
print(ec.fs) # test fs support
wait_secs(0.01)
test_pix = (11.3, 0.5, 110003)
print(test_pix)
# test __repr__
assert all([x in repr(ec) for x in ['foo', '"test"', '01']])
ec.refocus() # smoke test for refocusing
del ec
@pytest.mark.parametrize('screen_num', (None, 0))
@pytest.mark.parametrize('monitor', (
None,
dict(SCREEN_WIDTH=10, SCREEN_DISTANCE=10, SCREEN_SIZE_PIX=(1000, 1000)),
))
def test_screen_monitor(screen_num, monitor, hide_window):
"""Test screen and monitor option support."""
with ExperimentController(
*std_args, screen_num=screen_num, monitor=monitor,
**std_kwargs):
pass
full_kwargs = deepcopy(std_kwargs)
full_kwargs['full_screen'] = True
with pytest.raises(RuntimeError, match='resolution set incorrectly'):
ExperimentController(*std_args, **full_kwargs)
with pytest.raises(TypeError, match='must be a dict'):
ExperimentController(*std_args, monitor=1, **std_kwargs)
with pytest.raises(KeyError, match='is missing required keys'):
ExperimentController(*std_args, monitor={}, **std_kwargs)
def test_tdtpy_failure(hide_window):
"""Test that failed TDTpy import raises ImportError."""
try:
from tdt.util import connect_rpcox # noqa, analysis:ignore
except ImportError:
pass
else:
pytest.skip('Cannot test TDT import failure')
ac = dict(TYPE='tdt', TDT_MODEL='RP2')
with pytest.raises(ImportError, match='No module named'):
ExperimentController(
*std_args, audio_controller=ac, response_device='keyboard',
trigger_controller='tdt', stim_fs=100.,
suppress_resamp=True, **std_kwargs)
@pytest.mark.timeout(10)
def test_button_presses_and_window_size(hide_window):
"""Test EC window_size=None and button press capture."""
with ExperimentController(*std_args, audio_controller='sound_card',
response_device='keyboard', window_size=None,
output_dir=None, full_screen=False, session='01',
participant='foo', trigger_controller='dummy',
force_quit='escape', version='dev') as ec:
ec.listen_presses()
ec.get_presses()
assert_equal(ec.get_presses(), [])
fake_button_press(ec, '1', 0.5)
assert_equal(ec.screen_prompt('press 1', live_keys=['1'],
max_wait=1.5), '1')
ec.listen_presses()
assert_equal(ec.get_presses(), [])
fake_button_press(ec, '1')
assert_equal(ec.get_presses(timestamp=False), [('1',)])
ec.listen_presses()
fake_button_press(ec, '1')
presses = ec.get_presses(timestamp=True, relative_to=0.2)
assert_equal(len(presses), 1)
assert_equal(len(presses[0]), 2)
assert_equal(presses[0][0], '1')
assert (isinstance(presses[0][1], float))
ec.listen_presses()
fake_button_press(ec, '1')
presses = ec.get_presses(timestamp=True, relative_to=0.1,
return_kinds=True)
assert_equal(len(presses), 1)
assert_equal(len(presses[0]), 3)
assert_equal(presses[0][::2], ('1', 'press'))
assert (isinstance(presses[0][1], float))
ec.listen_presses()
fake_button_press(ec, '1')
presses = ec.get_presses(timestamp=False, return_kinds=True)
assert_equal(presses, [('1', 'press')])
ec.listen_presses()
ec.screen_text('press 1 again')
ec.flip()
fake_button_press(ec, '1', 0.3)
assert_equal(ec.wait_one_press(1.5, live_keys=[1])[0], '1')
ec.screen_text('press 1 one last time')
ec.flip()
fake_button_press(ec, '1', 0.3)
out = ec.wait_for_presses(1.5, live_keys=['1'], timestamp=False)
assert_equal(out[0], '1')
fake_button_press(ec, 'a', 0.3)
fake_button_press(ec, 'return', 0.5)
assert ec.text_input() == 'A'
fake_button_press(ec, 'a', 0.3)
fake_button_press(ec, 'space', 0.35)
fake_button_press(ec, 'backspace', 0.4)
fake_button_press(ec, 'comma', 0.45)
fake_button_press(ec, 'return', 0.5)
# XXX this fails on OSX travis for some reason
new_pyglet = _new_pyglet()
bad = sys.platform == 'darwin'
bad |= sys.platform == 'win32' and new_pyglet
if not bad:
assert ec.text_input(all_caps=False).strip() == 'a'
@pytest.mark.timeout(10)
@requires_opengl21
def test_mouse_clicks(hide_window):
"""Test EC mouse click support."""
with ExperimentController(*std_args, participant='foo', session='01',
output_dir=None, version='dev') as ec:
rect = visual.Rectangle(ec, [0, 0, 2, 2])
fake_mouse_click(ec, [1, 2], delay=0.3)
assert_equal(ec.wait_for_click_on(rect, 1.5, timestamp=False)[0],
('left', 1, 2))
pytest.raises(TypeError, ec.wait_for_click_on, (rect, rect), 1.5)
fake_mouse_click(ec, [2, 1], 'middle', delay=0.3)
out = ec.wait_one_click(1.5, 0., ['middle'], timestamp=True)
assert (out[3] < 1.5)
assert_equal(out[:3], ('middle', 2, 1))
fake_mouse_click(ec, [3, 2], 'left', delay=0.3)
fake_mouse_click(ec, [4, 5], 'right', delay=0.3)
out = ec.wait_for_clicks(1.5, timestamp=False)
assert_equal(len(out), 2)
assert (any(o == ('left', 3, 2) for o in out))
assert (any(o == ('right', 4, 5) for o in out))
out = ec.wait_for_clicks(0.1)
assert_equal(len(out), 0)
@requires_opengl21
@pytest.mark.timeout(30)
def test_background_color(hide_window):
"""Test setting background color"""
with ExperimentController(*std_args, participant='foo', session='01',
output_dir=None, version='dev') as ec:
print((ec.window.width, ec.window.height))
ec.set_background_color('red')
ss = ec.screenshot()[:, :, :3]
red_mask = (ss == [255, 0, 0]).all(axis=-1)
assert (red_mask.all())
ec.set_background_color('white')
ss = ec.screenshot()[:, :, :3]
white_mask = (ss == [255] * 3).all(axis=-1)
assert (white_mask.all())
ec.flip()
ec.set_background_color('0.5')
visual.Rectangle(ec, [0, 0, 1, 1], fill_color='black').draw()
ss = ec.screenshot()[:, :, :3]
gray_mask = ((ss == [127] * 3).all(axis=-1) |
(ss == [128] * 3).all(axis=-1))
assert (gray_mask.any())
black_mask = (ss == [0] * 3).all(axis=-1)
assert (black_mask.any())
assert (np.logical_or(gray_mask, black_mask).all())
def test_tdt_delay(hide_window):
"""Test the tdt_delay parameter."""
with ExperimentController(*std_args,
audio_controller=dict(TYPE='tdt', TDT_DELAY=0),
**std_kwargs) as ec:
assert_equal(ec._ac._used_params['TDT_DELAY'], 0)
with ExperimentController(*std_args,
audio_controller=dict(TYPE='tdt', TDT_DELAY=1),
**std_kwargs) as ec:
assert_equal(ec._ac._used_params['TDT_DELAY'], 1)
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller=dict(TYPE='tdt', TDT_DELAY='foo'),
**std_kwargs)
pytest.raises(OverflowError, ExperimentController, *std_args,
audio_controller=dict(TYPE='tdt', TDT_DELAY=np.inf),
**std_kwargs)
pytest.raises(TypeError, ExperimentController, *std_args,
audio_controller=dict(TYPE='tdt', TDT_DELAY=np.ones(2)),
**std_kwargs)
pytest.raises(ValueError, ExperimentController, *std_args,
audio_controller=dict(TYPE='tdt', TDT_DELAY=-1),
**std_kwargs)
def test_sound_card_triggering(hide_window):
"""Test using the sound card as a trigger controller."""
audio_controller = dict(TYPE='sound_card', SOUND_CARD_TRIGGER_CHANNELS='0')
with pytest.raises(ValueError, match='SOUND_CARD_TRIGGER_CHANNELS is zer'):
ExperimentController(*std_args,
audio_controller=audio_controller,
trigger_controller='sound_card',
suppress_resamp=True,
**std_kwargs)
audio_controller.update(SOUND_CARD_TRIGGER_CHANNELS='1')
# Use 1 trigger ch and 1 output ch because this should work on all systems
with ExperimentController(*std_args,
audio_controller=audio_controller,
trigger_controller='sound_card',
n_channels=1,
suppress_resamp=True,
**std_kwargs) as ec:
ec.identify_trial(ttl_id=[1, 0], ec_id='')
ec.load_buffer([1e-2])
ec.start_stimulus()
ec.stop()
# Test the drift triggers
audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=0.001)
with ExperimentController(*std_args,
audio_controller=audio_controller,
trigger_controller='sound_card',
n_channels=1,
**std_kwargs) as ec:
ec.identify_trial(ttl_id=[1, 0], ec_id='')
with pytest.warns(UserWarning, match='Drift triggers overlap with '
'onset triggers.'):
ec.load_buffer(np.zeros(ec.stim_fs))
ec.start_stimulus()
ec.stop()
audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[1.1, 0.3, -0.3,
'end'])
with ExperimentController(*std_args,
audio_controller=audio_controller,
trigger_controller='sound_card',
n_channels=1,
**std_kwargs) as ec:
ec.identify_trial(ttl_id=[1, 0], ec_id='')
with pytest.warns(UserWarning, match='Drift trigger at 1.1 seconds '
'occurs outside stimulus window, not stamping '
'trigger.'):
ec.load_buffer(np.zeros(ec.stim_fs))
ec.start_stimulus()
ec.stop()
audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[0.5, 0.501])
with ExperimentController(*std_args,
audio_controller=audio_controller,
trigger_controller='sound_card',
n_channels=1,
**std_kwargs) as ec:
ec.identify_trial(ttl_id=[1, 0], ec_id='')
with pytest.warns(UserWarning, match='Some 2-triggers overlap.*'):
ec.load_buffer(np.zeros(ec.stim_fs))
ec.start_stimulus()
ec.stop()
audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[])
with ExperimentController(*std_args,
audio_controller=audio_controller,
trigger_controller='sound_card',
n_channels=1,
**std_kwargs) as ec:
ec.identify_trial(ttl_id=[1, 0], ec_id='')
ec.load_buffer(np.zeros(ec.stim_fs))
ec.start_stimulus()
ec.stop()
audio_controller.update(SOUND_CARD_DRIFT_TRIGGER=[0.2, 0.5, -0.3])
with ExperimentController(*std_args,
audio_controller=audio_controller,
trigger_controller='sound_card',
n_channels=1,
**std_kwargs) as ec:
ec.identify_trial(ttl_id=[1, 0], ec_id='')
ec.load_buffer(np.zeros(ec.stim_fs))
ec.start_stimulus()
ec.stop()
class _FakeJoystick(object):
device = 'FakeJoystick'
on_joybutton_press = lambda self, joystick, button: None # noqa
open = lambda self, window, exclusive: None # noqa
x = 0.125
def test_joystick(hide_window, monkeypatch):
"""Test joystick support."""
import pyglet
fake = _FakeJoystick()
monkeypatch.setattr(pyglet.input, 'get_joysticks', lambda: [fake])
with ExperimentController(*std_args, joystick=True, **std_kwargs) as ec:
ec.listen_joystick_button_presses()
fake.on_joybutton_press(fake, 1)
presses = ec.get_joystick_button_presses()
assert len(presses) == 1
assert presses[0][0] == '1'
assert ec.get_joystick_value('x') == 0.125
def test_sound_card_params():
"""Test that sound card params are known keys."""
for key in _SOUND_CARD_KEYS:
if key != 'TYPE':
assert key in known_config_types, key
| StarcoderdataPython |
47980 | <gh_stars>10-100
from setuptools import setup, find_packages
from dnevnik import __version__
with open("README.md", "r", encoding="utf-8") as fh:
long_description = fh.read()
setup(
name='dnevnik-mos-ru',
version=__version__,
description="This package is kind of wrapper for dnevnik.mos.ru API service",
long_description_content_type="text/markdown",
long_description=long_description,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
url='https://github.com/IvanProgramming/dnevnik_mos_ru',
project_urls={
"Bug Tracker": "https://github.com/IvanProgramming/dnevnik_mos_ru/issues",
},
author='Ivan',
packages=find_packages(),
install_requires=[
'requests',
'selenium',
'bs4',
'lxml',
'pydantic',
'Inject'
]
)
| StarcoderdataPython |
148623 | import sublime
import sublime_plugin
class NodejsAutocompleteCommand(sublime_plugin.TextCommand):
def run(self, edit):
view = self.view
view_sel = view.sel()
if not view_sel:
return
pos = view_sel[0].begin()
self.view.insert(edit, pos, ".doSth()")
| StarcoderdataPython |
139616 | <reponame>Ayushk4/tsat
import time
import logging
import os
class Speedometer(object):
def __init__(self, batch_size, frequent=50,
batches_per_epoch=None, epochs=None):
self.batch_size = batch_size
self.frequent = frequent
self.batches_per_epoch = batches_per_epoch
self.epochs = epochs
self.epoch = -1
self.init = False
self.tic = 0
self.last_count = 0
self.data_in_time = 0.0
self.data_transfer_time = 0.0
self.forward_time = 0.0
self.backward_time = 0.0
self.optimizer_time = 0.0
self.metric_time = 0.0
def __call__(self, param):
"""Callback to Show speed."""
count = param.nbatch
if self.last_count > count:
self.init = False
self.last_count = count
self.data_in_time += param.data_in_time
self.data_transfer_time += param.data_transfer_time
self.forward_time += param.forward_time
self.backward_time += param.backward_time
self.optimizer_time += param.optimizer_time
self.metric_time += param.metric_time
if self.init:
if count % self.frequent == 0:
speed = self.frequent * self.batch_size / (time.time() - self.tic)
data_in_time = self.data_in_time / self.frequent
data_transfer_time = self.data_transfer_time / self.frequent
forward_time = self.forward_time / self.frequent
backward_time = self.backward_time / self.frequent
optimizer_time = self.optimizer_time / self.frequent
metric_time = self.metric_time / self.frequent
eta = ((self.epochs - self.epoch - 1) * self.batches_per_epoch + self.batches_per_epoch - param.nbatch) \
* self.batch_size / speed
eta = int(eta / 60.0)
eta_m = eta % 60
eta_h = int((eta - eta_m) / 60) % 24
eta_d = int((eta - eta_m - eta_h * 60) / (24 * 60))
s = ''
if param.eval_metric is not None:
prefix = "Epoch[%d] Batch [%d]\t" % (param.epoch, count)
name, value = param.eval_metric.get()
s = prefix + "Speed: %.2f samples/s ETA: %d d %2d h %2d m\tData: %.3f Tran: %.3f F: %.3f B: %.3f O: %.3f M: %.3f\tTrain-" \
% (speed, eta_d, eta_h, eta_m, data_in_time, data_transfer_time, forward_time, backward_time, optimizer_time, metric_time)
for n, v in zip(name, value):
s += "%s=%f,\t" % (n, v)
else:
prefix = "Epoch[%d] Batch [%d]\t" % (param.epoch, count)
s = prefix + "Speed: %.2f ETA: %d d %2d h %2d m samples/s\tData: %.3f Tran: %.3f F: %.3f B: %.3f O: %.3f M: %.3f" \
% (speed, eta_d, eta_h, eta_m, data_in_time, data_transfer_time, forward_time, backward_time, optimizer_time, metric_time)
if param.rank is not None:
s = 'Rank[%3d]' % param.rank + s
logging.info(s)
print(s)
self.tic = time.time()
self.data_in_time = 0.0
self.data_transfer_time = 0.0
self.forward_time = 0.0
self.backward_time = 0.0
self.optimizer_time = 0.0
self.metric_time = 0.0
else:
self.init = True
self.epoch += 1
if param.eval_metric is not None:
name, value = param.eval_metric.get()
s = "Epoch[%d] Batch [%d]\tSpeed: - samples/sec ETA: - d - h - m\tTrain-" % (param.epoch, 0)
for n, v in zip(name, value):
s += "%s=%f,\t" % (n, v)
else:
s = "Epoch[%d] Batch [%d]\tSpeed: - samples/sec ETA: - d - h - m" % (param.epoch, 0)
if param.rank is not None:
s = 'Rank[%3d]' % param.rank + s
logging.info(s)
print(s)
self.tic = time.time()
| StarcoderdataPython |
189140 | <reponame>capalmer1013/musical-compass
import os
import matplotlib.pyplot as plt
from flask import Flask, session, request, redirect, send_file
from flask_session import Session
import pyoauth2
from . import helpers
app = Flask(__name__)
app.config.from_object("musical_compass.config")
Session(app)
api_url = 'https://api.spotify.com/v1/'
scope = 'user-top-read'
spotify_client = pyoauth2.Client(
os.environ['SPOTIFY_CLIENT_ID'],
os.environ['SPOTIFY_CLIENT_SECRET'],
site=api_url,
authorize_url='https://accounts.spotify.com/authorize',
token_url='https://accounts.spotify.com/api/token'
)
@app.route('/')
def index():
if request.args.get("code"):
session['authorized_client'] = spotify_client.auth_code.get_token(
request.args.get("code"),
redirect_uri=os.environ['SPOTIFY_REDIRECT_URI']
)
return redirect('/results')
if not session.get('authorized_client'):
auth_url = spotify_client.auth_code.authorize_url(redirect_uri=os.environ['SPOTIFY_REDIRECT_URI'], scope=scope)
return f'<h2><a href="{auth_url}">Get Musical Compass Results</a></h2>'
profile = session['authorized_client'].get('me/').parsed
return '<h2>Hi, {} ' \
'<div><a href="/results">Get Musical Compass Results</a></div>'.format(profile['display_name'])
@app.route('/sign_out')
def sign_out():
session.clear()
return redirect('/')
@app.route('/results')
def results():
if not session.get('authorized_client'):
return redirect('/')
else:
x_axis_key = 'acousticness'
y_axis_key = 'valence'
try:
(x_axis_value, y_axis_value) = helpers.get_compass_values(x_axis_key, y_axis_key)
except helpers.NoListeningDataException as e:
return str(e)
# Plot it
fig, ax = plt.subplots(figsize=(15, 12))
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
ax.axvline(0, color = 'black', linestyle='dashed', lw=2)
ax.axhline(0, color = 'black', linestyle='dashed', lw=2)
x_axis_title = x_axis_key.title()
y_axis_title = 'Happiness'
plot_title = 'Musical Compass\n{}: {}, {}: {}'.format(
x_axis_title,
round(x_axis_value, 2),
y_axis_title,
round(y_axis_value, 2)
)
ax.set_title(plot_title)
ax.set_xlabel(x_axis_title)
ax.set_ylabel(y_axis_title)
ax.fill_between([-1, 0],0,-1,alpha=1, color='#c8e4bc') # LibLeft
ax.fill_between([0, 1], -1, 0, alpha=1, color='#f5f5a7') # LibRight
ax.fill_between([-1, 0], 0, 1, alpha=1, color='#f9baba') # AuthLeft
ax.fill_between([0, 1], 0, 1, alpha=1, color='#92d9f8') # AuthRight
ax.fill_between([-.3, .3], -.3, .3, alpha=1, color='#808080') # Grill
plt.plot(x_axis_value, y_axis_value, 'ro')
# plt.show()
profile = session['authorized_client'].get('me/').parsed
filename = 'generated_compasses/musical_compass-{}.png'.format(profile['id'])
plt.savefig('musical_compass/{}'.format(filename))
return send_file(filename, mimetype='image/png')
| StarcoderdataPython |
3350696 | #!/usr/bin/env python
import pickle
import os
import argparse
import numpy as np
import pandas as pd
# load packages required for analysis
import statsmodels.api as sm
import statsmodels as sm
import matplotlib
import matplotlib.pyplot as plt
import seaborn as sns
from scipy import stats
from trasig.utils import str2bool
if __name__ == '__main__':
# parse command-line arguments
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input', required=True, default='../input/',
help="string, folder to find TraSig's inputs")
parser.add_argument('-o', '--output', required=True, default='../output/',
help="string, folder to find TraSig's outputs")
parser.add_argument('-d', '--project', required=True, help="string, project name")
parser.add_argument('-g', '--preprocess', required=True, help="string, preprocessing steps applied to the "
"data / project, default None", default="None")
parser.add_argument('-b', '--modelName', required=True, help="string, name of the trajectory model")
parser.add_argument('-t', '--listType', required=False,
default='ligand_receptor', help="string, optional, "
"interaction list type, default ligand_receptor")
parser.add_argument('-e', '--otherIdentifier', required=False,
default="None", help="string, optional, other identifier for the output, default None")
parser.add_argument('-l', '--nLap', required=False, default=20, help="integer, optional, "
"sliding window size, default 20")
parser.add_argument('-m', '--metric', required=False, default='dot', help="string, optional, "
"scoring metric, default dot")
parser.add_argument('-z', '--nan2zero', required=False, type=str2bool,
default=True, help="boolean, optional, if treat nan as zero, default True")
parser.add_argument('-n', '--numPerms', required=False,
default=10000, help="integer, optional, number of permutations, default 10000")
parser.add_argument('-s', '--startingTreatment', required=False,
default="smallerWindow", help="string, optional, way to treat values at the beginning of an "
"edge with sliding window size smaller than nLap, "
"None/parent/discard/smallerWindow, default smallerWindow, "
"need to provide an extra input 'path_info.pickle' "
"for 'parent' option")
args = parser.parse_args()
print(args)
# set parameters for data
input_path = args.input
output_path = args.output
project = args.project
preprocess = args.preprocess
model_name = args.modelName
list_type = args.listType
others = args.otherIdentifier
if preprocess != "None":
_preprocess = f"_{preprocess}"
else:
_preprocess = ""
if others == "None":
others = ""
# set parameters for calculating metrics
n_lap = int(args.nLap)
metrics = [args.metric]
nan2zero = args.nan2zero
num_perms = int(args.numPerms)
startingTreatment = args.startingTreatment
if startingTreatment != "None":
_startingTreatment = f"_{startingTreatment}"
else:
_startingTreatment = ""
### load inputs
suffix = f"{project}_{list_type}{_preprocess}_{model_name}"
suffix = f"{suffix}{_startingTreatment}_nlap_{n_lap}{others}"
child_suffix = f"{suffix}_{metrics[0]}_{int(np.log10(num_perms))}"
# get interaction file (list of (ligand, receptor/target))
filename = f"{list_type}_{project}{_preprocess}.pickle"
with open(os.path.join(input_path, filename), 'rb') as handle:
interaction_list = pickle.load(handle)
# load expression data
filename = f"{project}{_preprocess}_lr.txt"
print("Load: ", filename)
data_file = os.path.join(input_path, filename)
df = pd.read_csv(data_file, index_col=0)
cell_exps = df.values
gene_names = list(df.columns.values) # assume unique
# (optional) load corresponding between sampling time and path
filename = f"sampling_time_per_path_{project}{_preprocess}_{model_name}.pickle"
with open(os.path.join(input_path, filename), 'rb') as handle:
time2path = pickle.load(handle)
path2time = dict()
for k, ps in time2path.items():
for p in ps:
path2time[p] = k
# load path & time assignment
# original assignment
hid_var_file = f"{project}{_preprocess}_{model_name}_it2_hid_var.pickle"
with open(os.path.join(input_path, hid_var_file), 'rb') as handle:
hid_var = pickle.load(handle, encoding="latin1")
unique_paths = np.unique(hid_var["cell_path"])
all_times = [round(i, 2) for i in np.arange(0, 1.01, 0.01)] # all possible labels for cell time
cell_paths_o = hid_var["cell_path"]
cell_times_o = hid_var["cell_time"]
### load outputs
# load the scores on the original data
_n = 0
_columns = dict.fromkeys(metrics)
for m in metrics:
_columns[m] = []
_columns.update({'pair': [], 'gene_pair_id': []})
# load results
filename = f"{suffix}_metrics_{_n}.pickle"
data_file = os.path.join(output_path, filename)
with open(data_file, 'rb') as handle:
results = pickle.load(handle)
for pair, mets in results.items():
for m in metrics:
_columns[m] += list(mets[m])
_columns['pair'] += list(np.repeat(pair, len(mets[m])))
_columns['gene_pair_id'] += list(range(len(mets[m])))
df = pd.DataFrame(_columns)
num_pairs = len(results[pair][m])
# load permutation results
filename = f"{suffix}_permutation_results.pickle"
data_file = os.path.join(output_path, filename)
with open(data_file, 'rb') as handle:
pair2counts = pickle.load(handle)
# turn to p-values
for pair, _ in pair2counts.items():
for m in metrics:
pair2counts[pair][m] = (pair2counts[pair][m] + 1) / (num_perms + 1)
# add to the dataframe
_columns = dict.fromkeys(metrics)
for m in metrics:
_columns[m] = []
for pair, counts in pair2counts.items():
for m in metrics:
_columns[m] += list(counts[m])
for m in metrics:
df[f"{m}_p"] = _columns[m]
# add ligand target info
df['ligand'] = [interaction_list[int(i)][0] for i in df['gene_pair_id']]
df['target'] = [interaction_list[int(i)][1] for i in df['gene_pair_id']]
ligand_list = np.unique(df['ligand'])
# add more info about cell clusters
df['sender'] = [i.split('_')[0] for i in df['pair']]
df['receiver'] = [i.split('_')[1] for i in df['pair']]
df['sender'] = df['sender'].astype('int')
df['receiver'] = df['receiver'].astype('int')
df['time-sender'] = [path2time[i] for i in df['sender']]
df['time-receiver'] = [path2time[i] for i in df['receiver']]
## label clusters using the true labels of the majority of cells (for plotting)
# build path2label
unique_days = np.unique(hid_var['cell_labels'])
cell_paths = np.unique(hid_var["cell_path"])
_dict = dict.fromkeys(range(len(cell_paths)))
for i, cur_path in enumerate(cell_paths):
# print("------current path", cur_path)
# get data corresponding to a path
condition = hid_var["cell_path"] == cur_path
cur_labels = hid_var['cell_labels'][condition]
try:
cur_labels = [i.decode('UTF-8') for i in cur_labels]
except AttributeError:
pass
# get the sampling time for the majority cells
mode, count = stats.mode(cur_labels)
major_percent = round(float(count[0]) / len(cur_labels), 2)
# print(mode[0], major_percent)
cur_label = mode[0]
# add more labels if cells of the major cell type make less than 90% of the whole population
if major_percent < 0.9:
cur_label += '(' + str(major_percent) + ')'
labels, counts = np.unique(cur_labels, return_counts=True)
sorted_counts, idxs = np.unique(counts, return_index=True)
# print(zip(sorted_counts, labels[idxs]))
count = 0
while major_percent < 0.9:
# add more labels until major_percent >= 0.9
add_counts = sorted_counts[::-1][1 + count]
_add_percent = round(add_counts / len(cur_labels), 2)
major_percent += _add_percent
# print(major_percent)
cur_label += '\n '
cur_label += labels[idxs][::-1][1 + count]
cur_label += '(' + str(round(_add_percent, 2)) + ')'
count += 1
_dict[cur_path] = cur_label
path2label = _dict
## Adjust p-values for multiple comparisons
_p = df['dot_p'].values.copy()
for pair in results.keys():
condition = np.where(df['pair'] == pair)[0]
adjusted = sm.stats.multitest.fdrcorrection(df['dot_p'].values[condition])
_p[condition] = adjusted[1]
df['dot_p_adjusted'] = _p
### Infer interactions among cell clusters (edges)
# reset output path to save analysis results
output_path = f"{output_path}/analysis"
if not os.path.exists(output_path):
os.makedirs(output_path)
print(f"Analysis outputs to be saved at {output_path}")
df_pool = pd.DataFrame(list(set(df['pair'])))
df_pool.columns = ['pair']
df_pool['sender'] = [i.split('_')[0] for i in df_pool['pair']]
df_pool['receiver'] = [i.split('_')[1] for i in df_pool['pair']]
df_pool['sender'] = df_pool['sender'].astype('int')
df_pool['receiver'] = df_pool['receiver'].astype('int')
df_pool['time-sender'] = [path2time[i] for i in df_pool['sender']]
df_pool['time-receiver'] = [path2time[i] for i in df_pool['receiver']]
# df_pool = df_pool[df_pool['time-sender'] == df_pool['time-receiver']] # if only keep pairs sampled at the same time
## Calculate summary score over all ligand-receptor pairs
cutoff = 0.05
name_p = 'dot_p_adjusted'
_counts = []
for p in df_pool['pair']:
condition = df['pair'] == p
_counts.append((df[condition][name_p] < cutoff).sum())
df_pool['counts'] = _counts
# subset only contains significant pairs
condition = df[name_p] < cutoff
df_sig = df[condition].copy()
df_sig.reset_index(inplace=True)
# order clusters (edges / paths) by sampling time
path_order_time = []
for k, v in time2path.items():
path_order_time = path_order_time + v
df_pool['sender'] = pd.Categorical(df_pool['sender'], path_order_time)
df_pool['receiver'] = pd.Categorical(df_pool['receiver'], path_order_time)
df_pool.sort_values(['sender', 'receiver'], inplace=True)
_vmin = min(df_pool['counts'])
_vmax = max(df_pool['counts'])
method = "TraSig"
metric = 'counts'
_center_value = _vmin
# plot only pairs sampled at the same time
df_plot = df_pool[df_pool['time-sender'] == df_pool['time-receiver']].pivot(index='sender', columns='receiver', values=metric)
# sort by column names
df_plot = df_plot.sort_index(axis=1)
# sns.set_style("white")
plt.figure(figsize=(5, 5))
sns.set_context("paper", font_scale=2)
ax = sns.heatmap(df_plot.values, xticklabels=True, yticklabels=True,
vmin=_vmin, vmax=_vmax, center=_center_value, cmap="RdBu_r")
plt.xticks(rotation=90)
plt.ylabel("Sender")
plt.xlabel('Receiver')
ax.set_xticklabels(df_plot.index.values)
ax.set_yticklabels(df_plot.index.values)
if 'tf' in model_name:
_traj = 'CSHMM'
plt.title(f"{method} using \n output from {_traj}")
else:
_traj = model_name.split('_')[1].capitalize()
plt.title(f"{method} using \n output from {_traj}")
filename = f"{child_suffix}_summary_scores.png"
plt.savefig(os.path.join(output_path, filename), bbox_inches="tight", dpi=300, format="png")
filename = f"{child_suffix}_summary_scores.eps"
plt.savefig(os.path.join(output_path, filename), bbox_inches="tight", dpi=300, format="eps")
# save summary score
df_pool['sender-label'] = df_pool['sender'].replace(path2label)
df_pool['receiver-label'] = df_pool['receiver'].replace(path2label)
cols_order = ['sender', 'sender-label', 'receiver', 'receiver-label', 'counts']
df_out = df_pool[cols_order].copy()
filename = f"{child_suffix}_summary_score.csv"
df_out.to_csv(os.path.join(output_path, filename), index=False)
## Save significant ligand-receptor pairs
df_sig['sender-label'] = df_sig['sender'].replace(path2label)
df_sig['receiver-label'] = df_sig['receiver'].replace(path2label)
# sort ligand-receptors in each cluster pair by their scores
_dfs = []
pairs_ts = np.unique(df_sig['pair'])
for pair in pairs_ts:
condition = df_sig['pair'] == pair
_dfs.append(df_sig[condition].sort_values('dot', ascending=False))
df_sorted = pd.concat(_dfs)
cols_order = ['pair', 'time-sender', 'sender', 'sender-label', 'time-receiver', 'receiver', 'receiver-label',
'ligand', 'target', 'dot', 'dot_p', 'dot_p_adjusted']
df_sorted = df_sorted[cols_order]
df_sorted.columns = ['interaction pair', 'sender sampling time', 'sender', 'sender-label', 'receiver sampling time',
'receiver', 'receiver-label', 'ligand', 'target', 'score', 'score p-value',
'score p-value adjusted']
filename = f"{child_suffix}_significant_pairs.csv"
df_sorted.to_csv(os.path.join(output_path, filename), index=False) | StarcoderdataPython |
3353883 | # =============================================================================
# Fog Clustering Unit Tests Utilities
# =============================================================================
class Clusters(object):
def __init__(self, clusters):
self.groups = set(tuple(sorted(values, key=str)) for values in clusters)
def __eq__(self, other):
return self.groups == other.groups
def __iter__(self):
return iter(self.groups)
def __len__(self):
return len(self.groups)
def __repr__(self):
return 'Clusters(%s)' % self.groups.__repr__()
| StarcoderdataPython |
1747014 | <reponame>UriyaBA/Minesweeper
import json
import pygame
from drawable import Drawable
class Tile(Drawable):
COLOR_UNREVEALED = (193, 192, 193)
COLOR_REVEALED = (193, 192, 193)
COLOR_REVEALED_MINE = (255, 192, 193)
MINE_DANGER = 9
# Load indicative 'danger level' colors from external json file
with open('json/colors_danger.json', 'r') as f:
data = json.load(f)
COLORS_DANGER = data['colors']
SPRITE_FLAG = pygame.image.load("sprites/flag.png")
SPRITE_MINE = pygame.image.load("sprites/mine.png")
def __init__(self, surrounding_mines_num, row, col, minefield, screen):
self.surrounding_mines_num = surrounding_mines_num
self.row = row
self.col = col
self.minefield = minefield
self.screen = screen
self.valid_neighbors = []
self.flagged = False
self.revealed = False
self.w = minefield.TILE_SIZE
self.h = minefield.TILE_SIZE
self.x = self.w * self.col
self.y = self.h * self.row
def draw_sprite(self, spr):
self.screen.blit(spr, (self.x, self.y))
def draw_danger(self):
if(self.surrounding_mines_num == Tile.MINE_DANGER):
return
pygame.font.init()
myfont = pygame.font.SysFont('Futura', 57)
textsurface = myfont.render(
str(self.surrounding_mines_num), False, Tile.COLORS_DANGER[self.surrounding_mines_num])
self.screen.blit(textsurface, (self.x + 15, self.y + 7))
def draw(self):
# Assign background color
if(not self.revealed):
color = Tile.COLOR_UNREVEALED
elif(self.surrounding_mines_num == 0):
color = Tile.COLORS_DANGER[0]
elif(self.surrounding_mines_num != Tile.MINE_DANGER):
color = Tile.COLOR_REVEALED
elif(self.flagged):
color = Tile.COLOR_UNREVEALED
else:
color = Tile.COLOR_REVEALED_MINE
pygame.draw.rect(self.screen, color, pygame.Rect(
self.x, self.y, self.w, self.h))
if(self.revealed and self.surrounding_mines_num != 0):
self.draw_danger()
if(self.flagged):
self.draw_sprite(Tile.SPRITE_FLAG)
elif(self.surrounding_mines_num == Tile.MINE_DANGER and self.revealed):
self.draw_sprite(Tile.SPRITE_MINE)
| StarcoderdataPython |
87803 | from importlib import import_module
from os import environ
import typing as t
from . import global_settings
from ..errors.server import SettingsFileNotFoundError, ImproperlyConfigured
from ..errors.misc import DataTypeMismatchError
__all__ = ("get_settings_module", "settings")
ENVIRONMENT_VARIABLE = "NAVYCUT_SETTINGS_MODULE"
empty = object()
class LazySettings:
def __init__(self) -> None:
self.settings_modules = environ.get(ENVIRONMENT_VARIABLE, None)
if self.settings_modules is None:
raise SettingsFileNotFoundError
self._wrapped = Settings(self.settings_modules)
def __getattr__(self, name):
val = getattr(self._wrapped, name)
if name == 'SECRET_KEY' and not val:
raise ImproperlyConfigured("The SECRET_KEY setting must not be empty.")
self.__dict__[name] = val
return val
@property
def configured(self):
"""Return True if the settings have already been configured."""
return self._wrapped is not empty
class Settings:
def __init__(self, settings_module):
for setting in dir(global_settings):
if setting.isupper():
setattr(self, setting, getattr(global_settings, setting))
self.SETTINGS_MODULE = settings_module
try:
mod = import_module(self.SETTINGS_MODULE)
except:
raise SettingsFileNotFoundError(self.SETTINGS_MODULE, None)
tuple_settings = (
'ALLOWED_HOSTS',
"INSTALLED_APPS",
)
for setting in dir(mod):
if setting.isupper():
setting_value = getattr(mod, setting)
if (setting in tuple_settings and
not isinstance(setting_value, (list, tuple))):
raise DataTypeMismatchError(setting_value, "settings file", "list or tuple")
setattr(self, setting, setting_value)
setattr(self, "SETTINGS_FILE_NAME", self.SETTINGS_MODULE)
settings:t.Type["LazySettings"] = LazySettings() | StarcoderdataPython |
1609809 | <reponame>autodidacticon/quickstart-amazon-eks
import logging
from crhelper import CfnResource
from time import sleep
import json
import boto3
from semantic_version import Version
from random import choice
execution_trust_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'Service': ['resources.cloudformation.amazonaws.com', 'lambda.amazonaws.com']
},
'Action': 'sts:AssumeRole'
}
]
}
log_trust_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Principal': {
'Service': ['cloudformation.amazonaws.com', 'resources.cloudformation.amazonaws.com']
},
'Action': 'sts:AssumeRole'
}
]
}
log_policy = {
'Version': '2012-10-17',
'Statement': [
{
'Effect': 'Allow',
'Action': ['logs:CreateLogGroup', 'logs:CreateLogStream', 'logs:DescribeLogGroups',
'logs:DescribeLogStreams', 'logs:PutLogEvents', 'cloudwatch:ListMetrics',
'cloudwatch:PutMetricData'],
'Resource': '*'
}
]
}
logger = logging.getLogger(__name__)
helper = CfnResource(json_logging=True, log_level='DEBUG')
cfn = boto3.client('cloudformation')
ssm = boto3.client('ssm')
iam = boto3.client("iam")
sts = boto3.client("sts")
identity = sts.get_caller_identity()
account_id = identity['Account']
partition = identity['Arn'].split(':')[1]
def put_role(role_name, policy, trust_policy):
retries = 5
while True:
try:
try:
response = iam.create_role(Path='/', RoleName=role_name, AssumeRolePolicyDocument=json.dumps(trust_policy))
role_arn = response['Role']['Arn']
except iam.exceptions.EntityAlreadyExistsException:
role_arn = f"arn:{partition}:iam::{account_id}:role/{role_name}"
try:
response = iam.create_policy(Path='/', PolicyName=role_name, PolicyDocument=json.dumps(policy))
arn = response['Policy']['Arn']
except iam.exceptions.EntityAlreadyExistsException:
arn = f"arn:{partition}:iam::{account_id}:policy/{role_name}"
versions = iam.list_policy_versions(PolicyArn=arn)['Versions']
if len(versions) >= 5:
oldest = [v for v in versions if not v['IsDefaultVersion']][-1]['VersionId']
iam.delete_policy_version(PolicyArn=arn, VersionId=oldest)
while True:
try:
iam.create_policy_version(PolicyArn=arn, PolicyDocument=json.dumps(policy), SetAsDefault=True)
break
except Exception as e:
if 'you must delete an existing version' in str(e):
versions = iam.list_policy_versions(PolicyArn=arn)['Versions']
oldest = [v for v in versions if not v['IsDefaultVersion']][-1]['VersionId']
iam.delete_policy_version(PolicyArn=arn, VersionId=oldest)
continue
raise
iam.attach_role_policy(RoleName=role_name, PolicyArn=arn)
return role_arn
except Exception as e:
print(e)
retries -= 1
if retries < 1:
raise
sleep(choice(range(1,10)))
def get_current_version(type_name):
try:
return Version(ssm.get_parameter(Name=f"/cfn-registry/{type_name}/version")['Parameter']['Value'])
except ssm.exceptions.ParameterNotFound:
return Version('0.0.0')
def set_version(type_name, type_version):
ssm.put_parameter(Name=f"/cfn-registry/{type_name}/version", Value=type_version, Type='String', Overwrite=True)
def stabilize(token):
p = cfn.describe_type_registration(RegistrationToken=token)
while p['ProgressStatus'] == "IN_PROGRESS":
sleep(5)
p = cfn.describe_type_registration(RegistrationToken=token)
if p['ProgressStatus'] == 'FAILED':
if 'to finish before submitting another deployment request for ' not in p['Description']:
raise Exception(p['Description'])
return None
return p['TypeVersionArn']
@helper.create
@helper.update
def register(event, _):
logger.error(f"event: {json.dumps(event)}")
type_name = event['ResourceProperties']['TypeName'].replace("::", "-").lower()
version = Version(event['ResourceProperties'].get('Version', '0.0.0'))
if version != Version('0.0.0') and version <= get_current_version(type_name):
print("version already registered is greater than this version, leaving as is.")
if not cfn.list_type_versions(Type='RESOURCE', TypeName=event['ResourceProperties']['TypeName'])['TypeVersionSummaries']:
print("resource missing, re-registering...")
else:
try:
arn = cfn.describe_type(Type='RESOURCE', TypeName=event['ResourceProperties']['TypeName'])['Arn']
return arn
except cfn.exceptions.TypeNotFoundException:
print("resource missing, re-registering...")
execution_role_arn = put_role(type_name, event['ResourceProperties']['IamPolicy'], execution_trust_policy)
log_role_arn = put_role('CloudFormationRegistryResourceLogRole', log_policy, log_trust_policy)
kwargs = {
"Type": 'RESOURCE',
"TypeName": event['ResourceProperties']['TypeName'],
"SchemaHandlerPackage": event['ResourceProperties']['SchemaHandlerPackage'],
"LoggingConfig": {
"LogRoleArn": log_role_arn,
"LogGroupName": f"/cloudformation/registry/{type_name}"
},
"ExecutionRoleArn": execution_role_arn
}
retries = 3
while True:
try:
try:
response = cfn.register_type(**kwargs)
except cfn.exceptions.CFNRegistryException as e:
if "Maximum number of versions exceeded" not in str(e):
raise
delete_oldest(event['ResourceProperties']['TypeName'])
continue
version_arn = stabilize(response['RegistrationToken'])
break
except Exception as e:
if not retries:
raise
retries -= 1
logger.error(e, exc_info=True)
sleep(60)
if version_arn:
cfn.set_type_default_version(Arn=version_arn)
set_version(type_name, event['ResourceProperties'].get('Version', '0.0.0'))
return version_arn
def delete_oldest(name):
versions = cfn.list_type_versions(Type='RESOURCE', TypeName=name)['TypeVersionSummaries']
if len(versions) < 2:
return
try:
try:
cfn.deregister_type(Arn=versions[0]['Arn'])
except cfn.exceptions.CFNRegistryException as e:
if "is the default version" not in str(e):
raise
cfn.deregister_type(Arn=versions[1]['Arn'])
except cfn.exceptions.TypeNotFoundException:
print("version already deleted...")
@helper.delete
def delete(event, _):
# We don't know whether other stacks are using the resource type, so we retain the resource after delete.
return
def lambda_handler(event, context):
helper(event, context)
| StarcoderdataPython |
86148 | from django.test import TestCase
from explorer.actions import generate_report_action
from explorer.tests.factories import SimpleQueryFactory
from explorer import app_settings
from explorer.utils import passes_blacklist, schema_info, param, swap_params, extract_params, shared_dict_update, EXPLORER_PARAM_TOKEN, execute_query
class TestSqlBlacklist(TestCase):
def setUp(self):
self.orig = app_settings.EXPLORER_SQL_BLACKLIST
def tearDown(self):
app_settings.EXPLORER_SQL_BLACKLIST = self.orig
def test_overriding_blacklist(self):
app_settings.EXPLORER_SQL_BLACKLIST = []
r = SimpleQueryFactory(sql="SELECT 1+1 AS \"DELETE\";")
fn = generate_report_action()
result = fn(None, None, [r, ])
self.assertEqual(result.content, 'DELETE\r\n2\r\n')
def test_default_blacklist_prevents_deletes(self):
r = SimpleQueryFactory(sql="SELECT 1+1 AS \"DELETE\";")
fn = generate_report_action()
result = fn(None, None, [r, ])
self.assertEqual(result.content, '0')
def test_queries_modifying_functions_are_ok(self):
sql = "SELECT 1+1 AS TWO; drop view foo;"
self.assertTrue(passes_blacklist(sql))
def test_queries_deleting_stuff_are_not_ok(self):
sql = "'distraction'; delete from table; SELECT 1+1 AS TWO; drop view foo;"
self.assertFalse(passes_blacklist(sql))
def test_queries_dropping_views_is_ok_and_not_case_sensitive(self):
sql = "SELECT 1+1 AS TWO; drop ViEw foo;"
self.assertTrue(passes_blacklist(sql))
class TestSchemaInfo(TestCase):
def test_schema_info_returns_valid_data(self):
res = schema_info()
tables = [a[1] for a in res]
self.assertIn('explorer_query', tables)
def test_app_exclusion_list(self):
app_settings.EXPLORER_SCHEMA_EXCLUDE_APPS = ('explorer',)
res = schema_info()
app_settings.EXPLORER_SCHEMA_EXCLUDE_APPS = ('',)
tables = [a[1] for a in res]
self.assertNotIn('explorer_query', tables)
class TestParams(TestCase):
def test_swappable_params_are_built_correctly(self):
expected = EXPLORER_PARAM_TOKEN + 'foo' + EXPLORER_PARAM_TOKEN
self.assertEqual(expected, param('foo'))
def test_params_get_swapped(self):
sql = 'please swap $$this$$ and $$that$$'
expected = 'please swap here and there'
params = {'this': 'here', 'that': 'there'}
got = swap_params(sql, params)
self.assertEqual(got, expected)
def test_empty_params_does_nothing(self):
sql = 'please swap $$this$$ and $$that$$'
params = None
got = swap_params(sql, params)
self.assertEqual(got, sql)
def test_non_string_param_gets_swapper(self):
sql = 'please swap $$this$$'
expected = 'please swap 1'
params = {'this': 1}
got = swap_params(sql, params)
self.assertEqual(got, expected)
def test_extracting_params(self):
sql = 'please swap $$this$$'
expected = {'this': ''}
self.assertEqual(extract_params(sql), expected)
def test_shared_dict_update(self):
source = {'foo': 1, 'bar': 2}
target = {'bar': None} # ha ha!
self.assertEqual({'bar': 2}, shared_dict_update(target, source)) | StarcoderdataPython |
3292138 | # project/db_migrate.py
from views import db
from _config import DATABASE_PATH
import sqlite3
#from datetime import datetime
# with sqlite3.connect(DATABASE_PATH) as connection:
# c = connection.cursor()
# c.execute("""ALTER TABLE tasks RENAME TO old_tasks""")
# db.create_all()
# c.execute("""SELECT name, due_date, priority,
# status FROM old_tasks ORDER BY task_id ASC""")
# data = [(row[0], row[1], row[2], row[3],
# datetime.now(), 1) for row in c.fetchall()]
# c.executemany("""INSERT INTO tasks (name, due_date, priority, status,
# posted_date, user_id) VALUES (?, ?, ?, ?, ?, ?)""", data)
# c.execute("DROP TABLE old_tasks")
with sqlite3.connect(DATABASE_PATH) as connection:
c = connection.cursor()
c.execute("""ALTER TABLE users RENAME TO old_users""")
db.create_all()
c.execute("""SELECT name, email, password
FROM old_users
ORDER BY id ASC""")
data = [(row[0], row[1], row[2],
'user') for row in c.fetchall()]
c.executemany("""INSERT INTO users (name, email, password,
role) VALUES (?, ?, ?, ?)""", data)
c.execute("DROP TABLE old_users")
| StarcoderdataPython |
3270431 | import boto3
client = boto3.client('sqs')
response = client.receive_message(
QueueUrl='https://sqs.eu-west-1.amazonaws.com/164968468391/gymchecker',
AttributeNames=[
'All'
],
MessageAttributeNames=[
'All',
],
MaxNumberOfMessages=1,
VisibilityTimeout=123,
WaitTimeSeconds=20
)
print (response) | StarcoderdataPython |
1713377 | import sys
class OmasError():
"""Class for errors"""
count = 0
errorList = []
def __init__(self):
pass
def add(self,typeError,message,line):
self.count+=1
logString = typeError+" "+message+" "+"on line"+" "+str(line)
self.errorList.append({"logString":logString,"line":line})
#print(logString)
#sys.exit()
return logString
def show(self):
#print("# Se encontraron ",self.count,"errores.")
logString = "# Se encontraron "+str(self.count)+" errores."
return logString
def reset(self):
self.errorList = []
self.count = 0
def getTotalErrors(self):
return self.count
def haveErrors(self):
if(self.count>0):
return True
else:
return False
def getListErrors(self):
return self.errorList
| StarcoderdataPython |
81286 | import json
import os.path
class Scheme(dict):
"""Represents all of the data associated with a given scheme. In addition
to storing whether or not a scheme is roman, :class:`Scheme` partitions
a scheme's characters into important functional groups.
:class:`Scheme` is just a subclass of :class:`dict`.
:param data: a :class:`dict` of initial values. Note that the particular characters present here are also assumed to be the _preferred_ transliterations when transliterating to this scheme.
:param alternates: A map from keys appearing in `data` to lists of symbols with equal meaning. For example: M -> ['.n', .'m'] in ITRANS. This alternates is not used in transliterating to this scheme.
:param is_roman: `True` if the scheme is a romanization and `False`
otherwise.
"""
def __init__(self, data=None, is_roman=True, name=None):
super(Scheme, self).__init__(data or {})
self.is_roman = is_roman
self.name = name
def fix_lazy_anusvaara_except_padaantas(self, data_in, omit_sam, omit_yrl):
lines = data_in.split("\n")
lines_out = []
for line in lines:
words = line.split()
## We don't want ग्रामं गच्छ to turn into ग्रामङ् गच्छ or ग्रामम् गच्छ
words = [self.fix_lazy_anusvaara(word[:-1], omit_sam, omit_yrl) + word[-1] for word in words]
lines_out.append(" ".join(words))
return "\n".join(lines_out)
def fix_lazy_anusvaara(self, data_in, omit_sam=False, omit_yrl=False, ignore_padaanta=False):
from indic_transliteration import sanscript
if ignore_padaanta:
return self.fix_lazy_anusvaara_except_padaantas(data_in=data_in, omit_sam=omit_sam, omit_yrl=omit_yrl)
data_out = sanscript.transliterate(data=data_in, _from=self.name, _to=sanscript.DEVANAGARI)
data_out = sanscript.SCHEMES[sanscript.DEVANAGARI].fix_lazy_anusvaara(data_in=data_out, omit_sam=omit_sam, omit_yrl=omit_yrl)
return sanscript.transliterate(data=data_out, _from=sanscript.DEVANAGARI, _to=self.name)
def from_devanagari(self, data):
"""A convenience method"""
from indic_transliteration import sanscript
return sanscript.transliterate(data=data, _from=sanscript.DEVANAGARI, _to=self.name)
def load_scheme(file_path, cls, **kwargs):
import codecs
is_roman = "roman" in file_path
name = os.path.basename(file_path).replace(".json", "")
def scheme_maker(data):
if "vowels" not in data:
return data
return cls(data=data, name=name, is_roman=is_roman, **kwargs)
with codecs.open(file_path, "r", 'utf-8') as file_out:
scheme = json.load(file_out, object_hook=scheme_maker)
return scheme | StarcoderdataPython |
3206537 |
from .custom_logger import BackgroundCustomLogger
from . import api
__version__ = '1.3.2'
logger = BackgroundCustomLogger()
__all__ = [
BackgroundCustomLogger.__name__,
'logger',
'api'
]
| StarcoderdataPython |
1680748 | # -*- coding: utf-8 -*-
'''
salt.serializers.msgpack
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Implements MsgPack serializer.
'''
# Import Python libs
from __future__ import absolute_import, print_function, unicode_literals
import copy
import logging
# Import Salt Libs
import salt.utils.msgpack
from salt.serializers import DeserializationError, SerializationError
# Import 3rd-party libs
from salt.ext import six
log = logging.getLogger(__name__)
available = salt.utils.msgpack.HAS_MSGPACK
if not available:
def _fail():
raise RuntimeError('msgpack is not available')
def _serialize(obj, **options):
_fail()
def _deserialize(stream_or_string, **options):
_fail()
elif salt.utils.msgpack.version >= (0, 2, 0):
def _serialize(obj, **options):
try:
return salt.utils.msgpack.dumps(obj, **options)
except Exception as error: # pylint: disable=broad-except
raise SerializationError(error)
def _deserialize(stream_or_string, **options):
try:
options.setdefault('use_list', True)
options.setdefault('encoding', 'utf-8')
return salt.utils.msgpack.loads(stream_or_string, **options)
except Exception as error: # pylint: disable=broad-except
raise DeserializationError(error)
else: # msgpack.version < 0.2.0
def _encoder(obj):
'''
Since OrderedDict is identified as a dictionary, we can't make use of
msgpack custom types, we will need to convert by hand.
This means iterating through all elements of dictionaries, lists and
tuples.
'''
if isinstance(obj, dict):
data = [(key, _encoder(value)) for key, value in six.iteritems(obj)]
return dict(data)
elif isinstance(obj, (list, tuple)):
return [_encoder(value) for value in obj]
return copy.copy(obj)
def _decoder(obj):
return obj
def _serialize(obj, **options):
try:
obj = _encoder(obj)
return salt.utils.msgpack.dumps(obj, **options)
except Exception as error: # pylint: disable=broad-except
raise SerializationError(error)
def _deserialize(stream_or_string, **options):
options.setdefault('use_list', True)
try:
obj = salt.utils.msgpack.loads(stream_or_string)
return _decoder(obj)
except Exception as error: # pylint: disable=broad-except
raise DeserializationError(error)
serialize = _serialize
deserialize = _deserialize
serialize.__doc__ = '''
Serialize Python data to MsgPack.
:param obj: the data structure to serialize
:param options: options given to lower msgpack module.
'''
deserialize.__doc__ = '''
Deserialize any string of stream like object into a Python data structure.
:param stream_or_string: stream or string to deserialize.
:param options: options given to lower msgpack module.
'''
| StarcoderdataPython |
1734573 | <filename>dist/ba_root/mods/chatHandle/ChatCommands/commands/Cheats.py
from .Handlers import handlemsg, handlemsg_all, clientid_to_myself
import ba, _ba
Commands = ['kill', 'heal', 'curse', 'sleep', 'superpunch', 'gloves', 'shield', 'freeze', 'unfreeze', 'godmode']
CommandAliases = ['die', 'heath', 'cur', 'sp', 'punch', 'protect', 'ice', 'thaw', 'gm']
def ExcelCommand(command, arguments, clientid, accountid):
"""
Checks The Command And Run Function
Parameters:
command : str
arguments : str
clientid : int
accountid : int
Returns:
None
"""
if command in ['kill', 'die']:
kill(arguments, clientid)
elif command in ['heal', 'heath']:
heal(arguments, clientid)
elif command in ['curse', 'cur']:
curse(arguments, clientid)
elif command == 'sleep':
sleep(arguments, clientid)
elif command in ['sp', 'superpunch']:
super_punch(arguments, clientid)
elif command in ['gloves', 'punch']:
gloves(arguments, clientid)
elif command in ['shield', 'protect']:
shield(arguments, clientid)
elif command in ['freeze', 'ice']:
freeze(arguments, clientid)
elif command in ['unfreeze', 'thaw']:
un_freeze(arguments, clientid)
elif command in ['gm', 'godmode']:
god_mode(arguments, clientid)
def kill(arguments, clientid):
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
handlemsg(myself, ba.DieMessage())
elif arguments[0] == 'all':
handlemsg_all(ba.DieMessage())
else:
try:
req_player = int(arguments[0])
handlemsg(req_player, ba.DieMessage())
except:
return
def heal(arguments, clientid):
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
handlemsg(myself, ba.PowerupMessage(poweruptype='health'))
elif arguments[0] == 'all':
handlemsg_all(ba.PowerupMessage(poweruptype='health'))
else:
try:
req_player = int(arguments[0])
handlemsg(req_player, ba.PowerupMessage(poweruptype='health'))
except:
return
def curse(arguments, clientid):
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
handlemsg(myself, ba.PowerupMessage(poweruptype='curse'))
elif arguments[0] == 'all':
handlemsg_all(ba.PowerupMessage(poweruptype='curse'))
else:
try:
req_player = int(arguments[0])
handlemsg(req_player, ba.PowerupMessage(poweruptype='curse'))
except:
return
def sleep(arguments, clientid):
activity = _ba.get_foreground_host_activity()
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
activity.players[myself].actor.node.handlemessage('knockout', 8000)
elif arguments[0] == 'all':
for i in activity.players:
i.actor.node.handlemessage('knockout', 8000)
else:
try:
req_player = int(arguments[0])
activity.players[req_player].actor.node.handlemessage('knockout', 8000)
except:
return
def super_punch(arguments, clientid):
activity = _ba.get_foreground_host_activity()
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
if activity.players[myself].actor._punch_power_scale != 15:
activity.players[myself].actor._punch_power_scale = 15
activity.players[myself].actor._punch_cooldown = 0
else:
activity.players[myself].actor._punch_power_scale = 1.2
activity.players[myself].actor._punch_cooldown = 400
elif arguments[0] == 'all':
activity = _ba.get_foreground_host_activity()
for i in activity.players:
if i.actor._punch_power_scale != 15:
i.actor._punch_power_scale = 15
i.actor._punch_cooldown = 0
else:
i.actor._punch_power_scale = 1.2
i.actor._punch_cooldown = 400
else:
try:
activity = _ba.get_foreground_host_activity()
req_player = int(arguments[0])
if activity.players[req_player].actor._punch_power_scale != 15:
activity.players[req_player].actor._punch_power_scale = 15
activity.players[req_player].actor._punch_cooldown = 0
else:
activity.players[req_player].actor._punch_power_scale = 1.2
activity.players[req_player].actor._punch_cooldown = 400
except:
return
def gloves(arguments, clientid):
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
handlemsg(myself, ba.PowerupMessage(poweruptype='punch'))
elif arguments[0] == 'all':
handlemsg_all(ba.PowerupMessage(poweruptype='punch'))
else:
try:
req_player = int(arguments[0])
handlemsg(req_player, ba.PowerupMessage(poweruptype='punch'))
except:
return
def shield(arguments, clientid):
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
handlemsg(myself, ba.PowerupMessage(poweruptype='shield'))
elif arguments[0] == 'all':
handlemsg_all(ba.PowerupMessage(poweruptype='shield'))
else:
try:
req_player = int(arguments[0])
handlemsg(req_player, ba.PowerupMessage(poweruptype='shield'))
except:
return
def freeze(arguments, clientid):
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
handlemsg(myself, ba.FreezeMessage())
elif arguments[0] == 'all':
handlemsg_all(ba.FreezeMessage())
else:
try:
req_player = int(arguments[0])
handlemsg(req_player, ba.FreezeMessage())
except:
return
def un_freeze(arguments, clientid):
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
handlemsg(myself, ba.ThawMessage())
elif arguments[0] == 'all':
handlemsg_all(ba.ThawMessage())
else:
try:
req_player = int(arguments[0])
handlemsg(req_player, ba.ThawMessage())
except:
return
def god_mode(arguments, clientid):
if arguments == [] or arguments == ['']:
myself = clientid_to_myself(clientid)
activity = _ba.get_foreground_host_activity()
player = activity.players[myself].actor
if player._punch_power_scale != 7:
player._punch_power_scale = 7
player.node.hockey = True
player.node.invincible = True
else:
player._punch_power_scale = 1.2
player.node.hockey = False
player.node.invincible = False
elif arguments[0] == 'all':
activity = _ba.get_foreground_host_activity()
for i in activity.players:
if i.actor._punch_power_scale != 7:
i.actor._punch_power_scale = 7
i.actor.node.hockey = True
i.actor.node.invincible = True
else:
i.actor._punch_power_scale = 1.2
i.actor.node.hockey = False
i.actor.node.invincible = False
else:
activity = _ba.get_foreground_host_activity()
req_player = int(arguments[0])
player = activity.players[req_player].actor
if player._punch_power_scale != 7:
player._punch_power_scale = 7
player.node.hockey = True
player.node.invincible = True
else:
player._punch_power_scale = 1.2
player.node.hockey = False
player.node.invincible = False
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.