id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
8193844 | <reponame>s2t2/tweet-analyzer-py
#
# A NEAR REPLICA OF BOTCODE VERSION 2 (SEE THE "START" DIR)
#
import math
from collections import defaultdict
from operator import itemgetter
import time
from datetime import datetime
import numpy as np
import networkx as nx
##########################################################################
####################### BUILD RETWEET NX-(SUB)GRAPH FROM DICTIONNARY #####
##########################################################################
'''
Takes as input a csv file of retweet relationships and builds
a NetworkX object, in order to apply prebuilt mincut algorithms
'''
def buildRTGraph(graph, subNodes, lowerBound=0):
'''
INPUTS:
## graph (csv file)
a csv file with ID of user retweeting, user retweeted, and number of retweets. (see README for more details)
## subNodes (list of ints)
a list of users IDs if you want to only consider a subgraph of the RT graph
## lowerBound (int)
an int to only consider retweet relationship if retweet count from User1 to User2 is above bound (sparsify graph)
'''
G = nx.DiGraph()
count = 0
firstInter = list(np.unique(np.intersect1d(subNodes, list(graph.keys()))))
for node in firstInter:
count += 1
print("at user n" + str(count) + " on " + str(len(graph)))
unique2, counts = np.unique(graph[node], return_counts=True)
res = dict(zip(unique2, counts))
inter = np.unique(np.intersect1d(unique2, subNodes))
for i in inter:
w = res[i]
if(i != node and w >= lowerBound):
G.add_node(node)
G.add_node(i)
G.add_edge(node, i, weight=w)
return G
############################################################################
####################### BUILD/CUT ENERGY GRAPH #############################
############################################################################
'''
Takes as input the RT graph and builds the energy graph.
Then cuts the energy graph to classify
'''
def computeH(G, piBot, edgelist_data, graph_out, graph_in):
H = nx.DiGraph()
'''
INPUTS:
## G (ntwkX graph)
the Retweet Graph from buildRTGraph
## piBot (dict of floats)
a dictionnary with prior on bot probabilities. Keys are users_ids, values are prior bot scores.
## edgelist_data (list of tuples)
information about edges to build energy graph.
This list comes in part from the getLinkDataRestrained method
## graph_out (dict of ints)
a graph that stores out degrees of accounts in retweet graph
## graph_in (dict of ints)
a graph that stores in degrees of accounts in retweet graph
'''
user_data = {i: {
'user_id': i,
'out': graph_out[i],
'in': graph_in[i],
'old_prob': piBot[i],
'phi_0': max(0, -np.log(float(10**(-20) + (1 - piBot[i])))),
'phi_1': max(0, -np.log(float(10**(-20) + piBot[i]))),
'prob': 0,
'clustering': 0
} for i in G.nodes()}
set_1 = [(el[0], el[1]) for el in edgelist_data]
set_2 = [(el[1], el[0]) for el in edgelist_data]
set_3 = [(el, 0) for el in user_data]
set_4 = [(1, el) for el in user_data]
H.add_edges_from(set_1 + set_2 + set_3 + set_4, capacity=0)
for i in edgelist_data:
val_00 = i[2][0]
val_01 = i[2][1]
val_10 = i[2][2]
val_11 = i[2][3]
# edges between nodes
H[i[0]][i[1]]['capacity'] += 0.5 * (val_01 + val_10 - val_00 - val_11)
H[i[1]][i[0]]['capacity'] += 0.5 * (val_01 + val_10 - val_00 - val_11)
# edges to sink (bot energy)
H[i[0]][0]['capacity'] += 0.5 * val_11 + 0.25 * (val_10 - val_01)
H[i[1]][0]['capacity'] += 0.5 * val_11 + 0.25 * (val_01 - val_10)
# edges from source (human energy)
H[1][i[0]]['capacity'] += 0.5 * val_00 + 0.25 * (val_01 - val_10)
H[1][i[1]]['capacity'] += 0.5 * val_00 + 0.25 * (val_10 - val_01)
if(H[1][i[0]]['capacity'] < 0):
print("Neg capacity")
break
if(H[i[1]][0]['capacity'] < 0):
print("Neg capacity")
break
if(H[1][i[1]]['capacity'] < 0):
print("Neg capacity")
break
if(H[i[0]][0]['capacity'] < 0):
print("Neg capacity")
break
for i in user_data.keys():
H[1][i]['capacity'] += user_data[i]['phi_0']
if(H[1][i]['capacity'] < 0):
print("Neg capacity")
break
H[i][0]['capacity'] += user_data[i]['phi_1']
if(H[i][0]['capacity'] < 0):
print("Neg capacity")
break
cut_value, mc = nx.minimum_cut(H, 1, 0)
# mc = [nodes dont cut source edge (bots), nodes dont cut sink edge
# (humans)]
Bots = list(mc[0])
if 0 in Bots: # wrong cut set because nodes have sink edge (humans)
print("Double check")
Bots = list(mc[1])
Bots.remove(1)
return H, Bots, user_data
def compute_bot_probabilities(rt_graph, energy_graph, bot_names):
#print("Calculate bot probability for each labeled node in retweet graph")
#start_time = time.time()
PiBotFinal = {}
for counter, node in enumerate(rt_graph.nodes()):
if counter % 1000 == 0:
print(datetime.now().strftime("%Y-%m-%d %H:%M:%S"), "|", "NODE:", counter)
neighbors = list(np.unique([i for i in nx.all_neighbors(energy_graph, node) if i not in [0, 1]]))
ebots = list(np.unique(np.intersect1d(neighbors, bot_names)))
ehumans = list(set(neighbors) - set(ebots))
psi_l = sum([energy_graph[node][j]['capacity'] for j in ehumans]) - \
sum([energy_graph[node][i]['capacity'] for i in ebots])
# probability to be in 1 = notPL
psi_l_bis = psi_l + energy_graph[node][0]['capacity'] - energy_graph[1][node]['capacity']
if (psi_l_bis) > 12:
PiBotFinal[node] = 0
else:
# Probability in the target (0) class
PiBotFinal[node] = 1.0 / (1 + np.exp(psi_l_bis))
#print("--- %s seconds ---" % (time.time() - start_time))
return PiBotFinal
###############################################################################
####################### COMPUTE EDGES INFORMATION #############################
###############################################################################
'''
Takes as input the RT graph and retrieves information on edges
to further build H.
'''
def getLinkDataRestrained(G, weight_attr="weight"):
'''
INPUTS:
## G (ntwkX graph)
the Retweet Graph from buildRTGraph
'''
edges = G.edges(data=True)
e_dic = dict(((x, y), z[weight_attr]) for x, y, z in edges)
link_data = []
for e in e_dic:
i = e[0]
j = e[1]
rl = False
wrl = 0
if((j, i) in e_dic.keys()):
rl = True
wrl = e_dic[(j, i)]
link_data.append([i, j, True, rl, e_dic[e], wrl])
return link_data
##########################################################################
####################### POTENTIAL FUNCTION ###############################
##########################################################################
'''
Compute joint energy potential between two users
'''
# INPUTS:
# u1 (int) ID of user u1
# u2 (int) ID of user u2
# wlr (int) number of retweets from u1 to u2
# out_graph (dict of ints) a graph that stores out degrees of accounts in retweet graph
# in_graph (dict of ints) a graph that stores in degrees of accounts in retweet graph
# alpha (list of floats)
# a list containing hyperparams (mu, alpha_out, alpha_in)
# lambda00 = ratio of psi_00 to psi_01
# lambda11 = ratio of psi_11 to psi_01
## epsilon (int)
# exponent such that lambda01=lambda11+lambda00-1+epsilon
def psi(u1, u2, wlr, in_graph, out_graph, alpha, lambda00, lambda11, epsilon):
dout_u1 = out_graph[u1] # outdegree of u1 (number of retweets it did)
din_u2 = in_graph[u2] # indegree of u2 (number of retweets it received)
if dout_u1 == 0 or din_u2 == 0:
print("Relationship problem: " + str(u1) + " --> " + str(u2))
temp = alpha[1] / float(dout_u1) - 1 + alpha[2] / float(din_u2) - 1
if temp < 10:
psi_01 = wlr * alpha[0] / (1 + np.exp(temp))
else:
psi_01 = 0
lambda01 = 1
lambda10 = lambda00 + lambda11 - 1 + epsilon
psi_00 = lambda00 * psi_01
psi_01 = lambda01 * psi_01
psi_10 = lambda10 * psi_01
psi_11 = lambda11 * psi_01
return [psi_00, psi_01, psi_10, psi_11]
| StarcoderdataPython |
1731127 | <reponame>pbarton666/virtual_classroom<filename>dkr-py310/docker-student-portal-310/course_files/experimental/py_profile_1.py
try:
import cProfile as profiler
except:
import profile as profiler
def fib(n):
# from http://en.literateprograms.org/Fibonacci_numbers_(Python)
if n == 0:
return 0
elif n == 1:
return 1
else:
return fib(n-1) + fib(n-2)
def fib_seq(n):
seq = [ ]
if n > 0:
seq.extend(fib_seq(n-1))
seq.append(fib(n))
return seq
if __name__=='__main__':
print ('fib_seq')
print ( '=' * 80)
profiler.run('print (fib_seq(20)); print()')
| StarcoderdataPython |
9723909 | #!/usr/bin/env python3
# coding=utf-8
"""
Parser that uses the ELEXON API to return the following data types.
Production
Exchanges
Documentation:
https://www.elexon.co.uk/wp-content/uploads/2017/06/
bmrs_api_data_push_user_guide_v1.1.pdf
"""
import os
import arrow
import logging
import requests
import datetime as dt
import pandas as pd
from io import StringIO
from .lib.validation import validate
from .lib.utils import get_token
ELEXON_ENDPOINT = 'https://api.bmreports.com/BMRS/{}/v1'
REPORT_META = {
'B1620': {
'expected_fields': 13,
'skiprows': 5
},
'FUELINST': {
'expected_fields': 22,
'skiprows': 1
},
'INTERFUELHH': {
'expected_fields': 11,
'skiprows': 0
}
}
# 'hydro' key is for hydro production
# 'hydro storage' key is for hydro storage
RESOURCE_TYPE_TO_FUEL = {
'Biomass': 'biomass',
'Fossil Gas': 'gas',
'Fossil Hard coal': 'coal',
'Fossil Oil': 'oil',
'Hydro Pumped Storage': 'hydro storage',
'Hydro Run-of-river and poundage': 'hydro',
'Nuclear': 'nuclear',
'Solar': 'solar',
'Wind Onshore': 'wind',
'Wind Offshore': 'wind',
'Other': 'unknown'
}
EXCHANGES = {
'FR->GB': [3, 8, 9], # IFA, Eleclink, IFA2
'GB->GB-NIR': [4],
'GB->NL': [5],
'GB->IE': [6],
'BE->GB': [7],
'GB->NO-NO2': [10], # North Sea Link
}
FETCH_WIND_FROM_FUELINST = True
def query_ELEXON(report, session, params):
params['APIKey'] = get_token('ELEXON_TOKEN')
return session.get(ELEXON_ENDPOINT.format(report), params=params)
def query_exchange(session, target_datetime=None):
if target_datetime is None:
target_datetime = dt.date.today()
from_date = (target_datetime - dt.timedelta(days=1)).strftime('%Y-%m-%d')
to_date = target_datetime.strftime('%Y-%m-%d')
params = {
'FromDate': from_date,
'ToDate': to_date,
'ServiceType': 'csv'
}
response = query_ELEXON('INTERFUELHH', session, params)
return response.text
def query_production(session, target_datetime=None):
if target_datetime is None:
target_datetime = dt.datetime.now()
# we can only fetch one date at a time.
# if target_datetime is first 30 minutes of the day fetch the day before.
# otherwise fetch the day of target_datetime.
if target_datetime.time() <= dt.time(0, 30):
settlement_date = target_datetime.date() - dt.timedelta(1)
else:
settlement_date = target_datetime.date()
params = {
'SettlementDate': settlement_date.strftime('%Y-%m-%d'),
'Period': '*',
'ServiceType': 'csv'
}
response = query_ELEXON('B1620', session, params)
return response.text
def parse_exchange(zone_key1, zone_key2, csv_text, target_datetime=None,
logger=logging.getLogger(__name__)):
if not csv_text:
return None
report = REPORT_META['INTERFUELHH']
sorted_zone_keys = sorted([zone_key1, zone_key2])
exchange = '->'.join(sorted_zone_keys)
data_points = list()
lines = csv_text.split('\n')
# check field count in report is as expected
field_count = len(lines[1].split(','))
if field_count != report['expected_fields']:
raise ValueError(
'Expected {} fields in INTERFUELHH report, got {}'.format(
report['expected_fields'], field_count))
for line in lines[1:-1]:
fields = line.split(',')
# settlement date / period combinations are always local time
date = dt.datetime.strptime(fields[1], '%Y%m%d').date()
settlement_period = int(fields[2])
datetime = datetime_from_date_sp(date, settlement_period)
data = {
'sortedZoneKeys': exchange,
'datetime': datetime,
'source': 'bmreports.com'
}
# positive value implies import to GB
multiplier = -1 if 'GB' in sorted_zone_keys[0] else 1
net_flow = 0.0 # init
for column_index in EXCHANGES[exchange]:
# read out all columns providing values for this exchange
if fields[column_index] == "":
continue # no value provided for this exchange
net_flow += float(fields[column_index]) * multiplier
data['netFlow'] = net_flow
data_points.append(data)
return data_points
def parse_production(csv_text, target_datetime=None,
logger=logging.getLogger(__name__)):
if not csv_text:
return None
report = REPORT_META['B1620']
# create DataFrame from slice of CSV rows
df = pd.read_csv(StringIO(csv_text), skiprows=report['skiprows'] - 1)
# check field count in report is as expected
field_count = len(df.columns)
if field_count != report['expected_fields']:
raise ValueError(
'Expected {} fields in B1620 report, got {}'.format(
report['expected_fields'], len(df.columns)))
# filter out undesired columns
df = df.iloc[:-1, [7, 8, 9, 4]]
df['Settlement Date'] = df['Settlement Date'].apply(
lambda x: dt.datetime.strptime(x, '%Y-%m-%d'))
df['Settlement Period'] = df['Settlement Period'].astype(int)
df['datetime'] = df.apply(lambda x: datetime_from_date_sp(
x['Settlement Date'], x['Settlement Period']), axis=1)
# map from report fuel names to electricitymap fuel names
fuel_column = 'Power System Resource Type'
df[fuel_column] = df[fuel_column].apply(lambda x: RESOURCE_TYPE_TO_FUEL[x])
# loop through unique datetimes and create each data point
data_points = list()
for time in pd.unique(df['datetime']):
time_df = df[df['datetime'] == time]
data_point = {
'zoneKey': 'GB',
'datetime': time.to_pydatetime(),
'source': 'bmreports.com',
'production': dict(),
'storage': dict()
}
for row in time_df.iterrows():
fields = row[1].to_dict()
fuel = fields[fuel_column]
quantity = fields['Quantity']
# check if storage value and if so correct key
if 'storage' in fuel:
fuel_key = fuel.replace('storage', '').strip()
# ELEXON storage is negative when storing and positive when
# discharging (the opposite to electricitymap)
data_point['storage'][fuel_key] = quantity * -1
else:
# if/else structure allows summation of multiple quantities
# e.g. 'Wind Onshore' and 'Wind Offshore' both have the
# key 'wind' here.
if fuel in data_point['production'].keys():
data_point['production'][fuel] += quantity
else:
data_point['production'][fuel] = quantity
data_points.append(data_point)
return data_points
def datetime_from_date_sp(date, sp):
datetime = arrow.get(date).shift(minutes=30 * (sp - 1))
return datetime.replace(tzinfo='Europe/London').datetime
def _fetch_wind(target_datetime=None):
if target_datetime is None:
target_datetime = dt.datetime.now()
# line up with B1620 (main production report) search range
d = target_datetime.date()
start = d - dt.timedelta(hours=24)
end = dt.datetime.combine(d + dt.timedelta(days=1), dt.time(0))
session = requests.session()
params = {
'FromDateTime': start.strftime('%Y-%m-%d %H:%M:%S'),
'ToDateTime': end.strftime('%Y-%m-%d %H:%M:%S'),
'ServiceType': 'csv'
}
response = query_ELEXON('FUELINST', session, params)
csv_text = response.text
report = REPORT_META['FUELINST']
df = pd.read_csv(StringIO(csv_text), skiprows=report['skiprows'],
skipfooter=1, header=None)
field_count = len(df.columns)
if field_count != report['expected_fields']:
raise ValueError(
'Expected {} fields in FUELINST report, got {}'.format(
report['expected_fields'], len(df.columns)))
df = df.iloc[:, [1, 2, 3, 8]]
df.columns = ['Settlement Date', 'Settlement Period', 'published', 'Wind']
df['Settlement Date'] = df['Settlement Date'].apply(
lambda x: dt.datetime.strptime(str(x), '%Y%m%d'))
df['Settlement Period'] = df['Settlement Period'].astype(int)
df['datetime'] = df.apply(lambda x: datetime_from_date_sp(
x['Settlement Date'], x['Settlement Period']), axis=1)
df['published'] = df['published'].apply(
lambda x: dt.datetime.strptime(str(x), '%Y%m%d%H%M%S'))
# get the most recently published value for each datetime
idx = df.groupby('datetime')['published'].transform(max) == df['published']
df = df[idx]
return df[['datetime', 'Wind']]
def fetch_exchange(zone_key1, zone_key2, session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
session = session or requests.session()
response = query_exchange(session, target_datetime)
data = parse_exchange(zone_key1, zone_key2, response, target_datetime,
logger)
return data
def fetch_production(zone_key='GB', session=None, target_datetime=None,
logger=logging.getLogger(__name__)):
session = session or requests.session()
response = query_production(session, target_datetime)
data = parse_production(response, target_datetime, logger)
# At times B1620 has had poor quality data for wind so fetch from FUELINST
if FETCH_WIND_FROM_FUELINST:
wind = _fetch_wind(target_datetime)
for entry in data:
datetime = entry['datetime']
wind_row = wind[wind['datetime'] == datetime]
if len(wind_row):
entry['production']['wind'] = wind_row.iloc[0]['Wind']
else:
entry['production']['wind'] = None
required = ['coal', 'gas', 'nuclear']
expected_range = {
'coal': (0, 10000),
'gas': (100, 30000),
'nuclear': (100, 20000)
}
data = [x for x in data
if validate(
x, logger, required=required, expected_range=expected_range)]
return data
if __name__ == '__main__':
"""Main method, never used by the Electricity Map backend, but handy
for testing."""
print('fetch_production() ->')
print(fetch_production())
print('fetch_exchange(FR, GB) ->')
print(fetch_exchange('FR', 'GB'))
print('fetch_exchange(GB, IE) ->')
print(fetch_exchange('GB', 'IE'))
print('fetch_exchange(GB, NL) ->')
print(fetch_exchange('GB', 'NL'))
| StarcoderdataPython |
11209019 | # -*- coding:utf-8 -*-
from __future__ import unicode_literals
import unittest
from statik.common import ContentLoadable
from statik.markdown_config import MarkdownConfig
TEST_MARKDOWN_CONTENT = """---
title: This is a “title” with some non-standard characters
---
This is the “Markdown” body with some other non-standard characters.
"""
class TestNonAsciiChars(unittest.TestCase):
def test_parsing(self):
parsed = ContentLoadable(
from_string=TEST_MARKDOWN_CONTENT,
file_type="markdown",
name="test",
markdown_config=MarkdownConfig()
)
self.assertEqual(
"This is a “title” with some non-standard characters",
parsed.vars['title'],
)
self.assertEqual(
"<p>This is the “Markdown” body with some other non-standard characters.</p>",
parsed.content
)
if __name__ == "__main__":
unittest.main()
| StarcoderdataPython |
9632372 | """
settings.py
Configuration for Flask app
Important: Place your keys in the secret_keys.py module,
which should be kept out of version control.
"""
from google.appengine.api import app_identity
import os
from secret_keys import *
DEBUG_MODE = False
# Auto-set debug mode based on App Engine dev environ
if 'SERVER_SOFTWARE' in os.environ and os.environ['SERVER_SOFTWARE'].startswith('Dev'):
DEBUG_MODE = True
DEBUG = DEBUG_MODE
if DEBUG:
FT_TABLE = 'imazon_testing'
FT_TABLE_ID = '2676501'
else:
app_id = app_identity.get_application_id()
if app_id == 'imazon-sad-tool':
FT_TABLE = 'areas'
FT_TABLE_ID = '1089491'
elif app_id == 'imazon-prototype':
FT_TABLE = 'areas_testing'
FT_TABLE_ID = '1869271'
elif app_id == 'sad-training':
FT_TABLE = 'areas_training'
FT_TABLE_ID = '1898803'
# Set secret keys for CSRF protection
SECRET_KEY = CSRF_SECRET_KEY
CSRF_SESSION_KEY = SESSION_KEY
CSRF_ENABLED = True
| StarcoderdataPython |
3484820 | <reponame>RnoldR/multi_gpu<gh_stars>1-10
"""
This module prepares midi file data and feeds it to the neural
network for training
"""
import sys
import json
import yaml
import time
import h5py
import random
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import keras
from keras.layers import Dense
from keras.layers import Dropout
from keras.layers import CuDNNLSTM, GRU, CuDNNGRU, Input
from keras import regularizers
from keras.utils import multi_gpu_model
from keras.models import Model
import tensorflow as tf
LayerType = CuDNNGRU
def ohe(matrix, n):
cube = np.zeros((matrix.shape[0], matrix.shape[1], n), dtype=np.int)
for row in range(matrix.shape[0]):
for col in range(matrix.shape[1]):
cube[row, col, matrix[row, col]] = 1
return cube
class SequenceTrainer():
def get_par (self, pars, keys, default):
try:
p = pars
for key in keys:
p = p[key]
return p
except:
return default
def prepare_data(self, data, split):
"""
The original input consists of a list of 5 matrices of sequence
data (X) and a list of 5 matrices as target (Y)
X[i].shape = (n, seq length, # of categories (usually 128))
Y[i].shape = (n, # of categories)
Args:
data (list): list of X/Y_train, X/Y_val and X/Y_test
split (list): List containing training fraction and validation fraction
Returns:
Four arrays: X_train, Y_train, X_val, Y_val
"""
# Create one hot encoded vectors
train_data = ohe(data[0], 128)
val_data = ohe(data[1], 128)
# Be sure that Y follows an X sequence
X_train = train_data[:, :-1, :]
Y_train = train_data[:, -1:, :]
X_val = val_data[:, :-1, :]
Y_val = val_data[:, -1:, :]
# Remove 2nd index from Y, which is one
Y_train = Y_train.reshape((Y_train.shape[0], Y_train.shape[2]))
Y_val = Y_val.reshape((Y_val.shape[0], Y_val.shape[2]))
return X_train, X_val, Y_train, Y_val
def single_input_model(self, X, Y, layers, dropout):
""" Create a simple input/output network
This model can be trained to associate one voice with one target.
Args:
X (list of arrays): contains input data
Y (list of arrays): targets
layers (list): list of two lists of layers to be created. The first
list contains the sizes of RNN layers to be created;
the second list the sizes of Dense layers
dropout (float): dropout value; if > 0 a dropout layer is added
to each RNN or Dense layer
Returns:
The model
"""
rnn_layers = layers[0]
dense_layers = layers[1]
# In this test using the kernel regularizer = weight decay
l2k = self.l2k # Weights regularizer
l2a = self.l2a # activity regularizer
l2r = self.l2r # self.l2r # recurrent regularizer
print ('*** l2k =', l2k, 'l2a =', l2a, 'l2r =', l2r)
input_layer = Input(shape=(X.shape[1], X.shape[2]), name='Input_Layer')
if len(rnn_layers) == 1:
model = LayerType(rnn_layers[0],
kernel_regularizer=regularizers.l2(l2k),
recurrent_regularizer=regularizers.l2(l2r),
activity_regularizer=regularizers.l2(l2a),
name='RNN_1')(input_layer)
else:
model = LayerType(rnn_layers[0], return_sequences=True,
kernel_regularizer=regularizers.l2(l2k),
recurrent_regularizer=regularizers.l2(l2r),
activity_regularizer=regularizers.l2(l2a),
name='RNN_1')(input_layer)
for layer in range(1, len(rnn_layers) - 1):
model = LayerType(rnn_layers[layer],
return_sequences=True,
kernel_regularizer=regularizers.l2(l2k),
recurrent_regularizer=regularizers.l2(l2r),
activity_regularizer=regularizers.l2(l2a),
name='RNN_' + str(layer+1))(model)
if dropout > 0:
model= Dropout(dropout)(model)
name = 'RNN_{:d}'.format(len(rnn_layers))
model = LayerType(rnn_layers[-1],
kernel_regularizer=regularizers.l2(l2k),
recurrent_regularizer=regularizers.l2(l2r),
activity_regularizer=regularizers.l2(l2a),
name=name)(model)
if dropout > 0:
model= Dropout(dropout)(model)
for i, layer in enumerate(dense_layers):
model = Dense(layer, activation='relu',
kernel_regularizer=regularizers.l2(l2k),
activity_regularizer=regularizers.l2(l2a),
name='Dense_'+str(i))(model)
#model = BatchNormalization()(model)
if dropout > 0:
model = Dropout(dropout)(model)
model = Dense(Y.shape[1], activation='softmax', name='Dense_softmax')(model)
main_model = Model(inputs=input_layer, outputs=[model])
return main_model
def setup_model(self, model_def, X, Y, dropout, gpu):
""" Sets up a Neural Network to generate music
Args:
model_type (string): type of model to set up
X (array): input sequences
Y (array): imput target
layers (list): list containing the layer sizes for the model
dropout (float): dropout fraction
gpu (int): when > 1, a multi gpu model will be built
Returns:
the created model
"""
model_type = model_def['model']
layers = model_def['layers']
if gpu > 1:
with tf.device("/cpu:0"):
model = self.single_input_model(X, Y, layers, dropout)
model = multi_gpu_model(model, gpus=gpu)
print('Running a multi GPU model on a', model_type, 'model')
else:
with tf.device("/gpu:0"):
model = self.single_input_model(X, Y, layers, dropout)
print('Running a single GPU model on a', model_type, 'model')
model.compile(optimizer=keras.optimizers.Adam (),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
def train(self, hp, model_def, data, dropout, batch_size,
epochs, gpu):
hp = dict(hp)
split = 0.8
hp['batch_sizes'] = [batch_size]
hp['dropouts'] = [dropout]
X_train, X_val, Y_train, Y_val = self.prepare_data(data, split)
print('X shape', X_train.shape)
print('Number of training sequences:', len(X_train))
print('Number of validation sequences:', len(X_val))
print('Length of sequences is', X_train.shape[1])
model = self.setup_model(model_def, X_train, Y_train, dropout, gpu)
model.summary()
print('\nStarted training the model')
print('Batch size:', batch_size)
print('GPU\'s:', gpu)
print('Dropout:', dropout)
history = model.fit(X_train, Y_train,
verbose=1,
epochs=epochs,
batch_size=batch_size,
validation_data=(X_val, Y_val))
return history
def train_sequence(self, hyper_pars, notes_file):
self.hyper_pars = hyper_pars
model_types = self.get_par(hyper_pars, ['models'], None)
batch_sizes = self.get_par(hyper_pars, ['batch_sizes'], [128])
dropouts = self.get_par(hyper_pars, ['dropouts'], [0.3])
epochs = self.get_par(hyper_pars, ['epochs'], 100)
gpus = self.get_par(hyper_pars, ['gpus'], [1])
print('Tensorflow version:', tf.__version__)
print('Keras version:', keras.__version__)
#data = read_sequences(notes_file)
#self.stf(data)
#sys.exit()
train_data = np.genfromtxt('train.csv', delimiter=',', dtype=np.int)
val_data = np.genfromtxt('val.csv', delimiter=',', dtype=np.int)
n_runs = len(model_types) * len(dropouts) * len(batch_sizes) * \
len(gpus)
columns = ['Epochs', 'Model type', 'Dropouts', 'Batch size', 'GPU\'s',
'Acc', 'Val. Acc', 'Time']
run_no = 0
df = pd.DataFrame(np.zeros((n_runs, len(columns))), columns=columns)
for gpu in gpus:
for index in model_types:
model_def = hyper_pars[index]
for dropout in dropouts:
for batch_size in batch_sizes:
print('==>', index, '=', str(model_def))
self.l2r = 1e-6
self.l2k = 1e-6
self.l2a = 0.0
df.iloc[run_no]['Epochs'] = epochs
df.iloc[run_no]['Model type'] = len(model_def)
df.iloc[run_no]['Dropouts'] = dropout
df.iloc[run_no]['Batch size'] = batch_size
df.iloc[run_no]['GPU\'s'] = gpu
model_time = time.time()
history = self.train(hyper_pars, model_def,
(train_data, val_data),
dropout, batch_size, epochs, gpu)
model_time = time.time() - model_time
print('CPU time: {:.0f}'.format(model_time))
hist = history.history
df.iloc[run_no]['Acc'] = hist['acc'][-1]
df.iloc[run_no]['Val. Acc'] = hist['val_acc'][-1]
df.iloc[run_no]['Time'] = int(model_time)
df.to_csv('results.csv')
print(df)
run_no += 1
# for
# for
# for
# for
return
## Class: music_trainer ###
def main(argv):
# System wide constants for MusicData
seed = 42
np.random.seed(seed)
random.seed(seed)
notes_file = 'notes.h5'
config_file = 'config.yaml'
# Read hyperparameters
with open(config_file) as yaml_data:
hyper_pars = yaml.load(yaml_data)
# Initialize CPU time measurement
seconds = time.time()
SequenceTrainer().train_sequence(hyper_pars, notes_file)
seconds = int(time.time() - seconds + 0.5)
print('\n*** Ready in', seconds, 'seconds.')
if __name__ == '__main__':
pd.set_option('display.max_columns', None)
main(sys.argv[1:])
| StarcoderdataPython |
11366939 | <filename>codenames/preprocessing/preprocessor.py
from typing import List, TypeVar, Tuple
from numpy import ndarray
T = TypeVar('T')
def flatten(nested_list: List[List[T]]) -> List[T]:
return [item for sublist in nested_list for item in sublist]
class Preprocessor:
def process(self, image: ndarray) -> List[Tuple[ndarray, str]]:
raise NotImplementedError()
def process_batch(self, images: List[ndarray]) -> List[Tuple[ndarray, str]]:
return flatten(list(map(self.process, images)))
| StarcoderdataPython |
6463918 | <filename>webhook/admin.py
# Copyright 2004-present, Facebook. All Rights Reserved.
from django.contrib import admin
from .models import WebhookNotification
admin.site.register(WebhookNotification)
| StarcoderdataPython |
6591045 | from enum import Enum
class Category(Enum):
GUIDE = 1
CULTURE = 2
EXERCISES = 3
| StarcoderdataPython |
199092 | <filename>botengine/QueryMessage.py
import json
import requests
class Message(object):
"""
Message request classes
Send simple text queries.
"""
@property
def query(self):
"""
Query parameter can be a string
Default equal None, The user should fill this field
before sending the request
"""
return self._query
@query.setter
def query(self, query):
self._query = query
@property
def parameters(self):
"""
parameters dict
Default equal {}, Optional field
"""
return self._parameters
@parameters.setter
def parameters(self, parameters):
if type(parameters) is not dict:
raise TypeError('parameters should be a dict')
self._parameters = parameters
@property
def developer_access_token(self):
"""
The developer_access_token access token to
connect to botengine agent
"""
return self._developer_access_token
@developer_access_token.setter
def developer_access_token(self, developer_access_token):
self._developer_access_token = developer_access_token
def __init__(self, developer_access_token, base_url, session_id):
self.query = None
self.parameters = {}
self.developer_access_token = developer_access_token
self.base_url = base_url
self.session_id = session_id
def _prepare_headers(self):
Authorization = 'Bearer '+ self.developer_access_token
return {'Content-Type': 'application/json; charset=utf-8',
'Authorization': Authorization}
def _prepare_json_body(self):
if self.query is None:
raise ValueError("query cannot be None")
if self.parameters:
return {'sessionId':self.session_id, 'query':self.query,
'parameters': self.parameters}
else:
return {'sessionId':self.session_id, 'query':self.query}
def getresponse(self):
"""
Send all the data to agent and wait for response
"""
response = requests.post(self.base_url, headers=self._prepare_headers(),\
json=self._prepare_json_body())
return response
| StarcoderdataPython |
12802977 | <gh_stars>1000+
# Copyright 2019 The flink-ai-extended Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
from pyflink.java_gateway import get_gateway
class TFConfig(object):
def __init__(self, num_worker, num_ps, properties, python_file, func, env_path):
"""
:param num_worker: the number of TF workers
:param num_ps: the number of TF PS
:param properties: TF properties
:param python_file: the python file, the entry python file
:param func: the entry function name in the first python file
:param env_path: the path of env
"""
self._num_worker = num_worker
self._num_ps = num_ps
self._properties = properties
self._python_file = python_file
self._func = func
self._env_path = env_path
def java_config(self):
return get_gateway().jvm.com.alibaba.flink.ml.tensorflow.client.TFConfig(self._num_worker,
self._num_ps,
self._properties,
self._python_file,
self._func,
self._env_path)
| StarcoderdataPython |
3527264 | <reponame>EhrmannGit/lingvodoc<gh_stars>1-10
from lingvodoc.scripts.dictionary_dialeqt_converter import convert_all
from lingvodoc.queue.celery import celery
@celery.task
def async_convert_dictionary_new(dictionary_client_id,
dictionary_object_id,
blob_client_id,
blob_object_id,
client_id,
language_client_id,
language_object_id,
gist_client_id,
gist_object_id,
sqlalchemy_url,
storage,
locale_id,
task_key,
cache_kwargs):
convert_all(dictionary_client_id,
dictionary_object_id,
blob_client_id,
blob_object_id,
language_client_id,
language_object_id,
client_id,
gist_client_id,
gist_object_id,
sqlalchemy_url,
storage,
locale_id,
task_key,
cache_kwargs
)
return
| StarcoderdataPython |
11353649 | <reponame>afaucon/pywindrvmap<filename>windrvmap/__init__.py
from .__info__ import __package_name__
from .__info__ import __description__
from .__info__ import __url__
from .__info__ import __version__
from .__info__ import __author__
from .__info__ import __author_email__
from .__info__ import __license__
from .__info__ import __copyright__
from .windrvmap import ALL, USED, AVAILABLE, PHYSICAL, SHORTCUT, NETWORK_SHORTCUT, LOCAL_SHORTCUT
from .windrvmap import Drives
from .windrvmap import Config | StarcoderdataPython |
3457284 | from App.Routes.auth import r_auth
from App.Routes.my_profile import my_profile
from App.Routes.main import r_main
from App.Routes.hosting_services import r_hostingservices
from App.Routes.server_managetment import r_servermanagment
def registerRoutes(app):
app.registerBlueprint(r_auth)
app.registerBlueprint(my_profile)
app.registerBlueprint(r_main)
app.registerBlueprint(r_hostingservices)
app.registerBlueprint(r_servermanagment)
| StarcoderdataPython |
9619401 | <filename>tator/transcode/make_thumbnails.py<gh_stars>1-10
#!/usr/bin/env python
import argparse
import subprocess
import os
import json
import logging
import tempfile
from PIL import Image
from ..util import get_api
from ..util._upload_file import _upload_file
from .transcode import get_length_info
from ..openapi.tator_openapi.models import MessageResponse
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def parse_args():
parser = argparse.ArgumentParser(description='Makes thumbnails for a video.')
parser.add_argument('--host', type=str, default='https://www.tatorapp.com', help='Host URL.')
parser.add_argument('--token', type=str, help='REST API token.')
parser.add_argument('--media', type=int, help='Unique integer identifying a media.')
parser.add_argument('input', type=str, help='Path to input file.')
parser.add_argument("-o", "--output", type=str, help='Path to output thumbnail.')
parser.add_argument("-g", "--gif", type=str, help='Path to output thumbnail gif.')
return parser.parse_args()
def get_metadata(path):
cmd = [
"ffprobe",
"-v","error",
"-show_entries", "stream",
"-print_format", "json",
"-select_streams", "v",
"{}".format(path)
]
output = subprocess.run(cmd, stdout=subprocess.PIPE, check=True).stdout
logger.info("Got info = {}".format(output))
video_info = json.loads(output)
stream = video_info["streams"][0]
fps, num_frames = get_length_info(stream)
# Fill in object information based on probe
codec = stream["codec_name"]
width = stream["width"]
height = stream["height"]
return (codec, fps, num_frames, width, height)
def make_thumbnails(host, token, media_id, video_path, thumb_path, thumb_gif_path):
""" Makes thumbnails and gets metadata for original file.
"""
# Get metadata for original file.
codec, fps, num_frames, width, height = get_metadata(video_path)
# Create thumbnail.
cmd = ["ffmpeg", "-y", "-i", video_path, "-vf", "scale=256:-1", "-vframes", "1", thumb_path]
subprocess.run(cmd, check=True)
with tempfile.TemporaryDirectory() as dirname:
pts_scale = (fps / 3) * (10 / num_frames)
# Create gif thumbnail.
cmd1 = ["ffmpeg", "-y"]
if num_frames > 10000:
cmd2 = ["-skip_frame", "nokey"]
else:
cmd2 = []
cmd3 = ["-i", video_path, "-vf", f"scale=256:-1:flags=lanczos,setpts={pts_scale}*PTS",
"-r", "3", os.path.join(dirname, "%09d.jpg")]
cmd = cmd1 + cmd2 + cmd3
subprocess.run(cmd, check=True)
cmd = [
"ffmpeg", "-y", "-r", "3", "-i", os.path.join(dirname, '%09d.jpg'), "-vf",
"split[s0][s1];[s0]palettegen[p];[s1][p]paletteuse",
"-r", "3",
thumb_gif_path
]
subprocess.run(cmd, check=True)
# Upload thumbnail and thumbnail gif.
api = get_api(host, token)
media_obj = api.get_media(media_id)
for progress, thumbnail_info in _upload_file(api, media_obj.project, thumb_path,
media_id=media_id,
filename=os.path.basename(thumb_path)):
pass
for progress, thumbnail_gif_info in _upload_file(api, media_obj.project, thumb_gif_path,
media_id=media_id,
filename=os.path.basename(thumb_gif_path)):
pass
# Open images to get output resolution.
thumb_image = Image.open(thumb_path)
thumb_gif_image = Image.open(thumb_gif_path)
# Create image definitions for thumbnails.
thumb_def = {'path': thumbnail_info.key,
'size': os.stat(thumb_path).st_size,
'resolution': [thumb_image.height, thumb_image.width],
'mime': f'image/{thumb_image.format.lower()}'}
thumb_gif_def = {'path': thumbnail_gif_info.key,
'size': os.stat(thumb_gif_path).st_size,
'resolution': [thumb_gif_image.height, thumb_gif_image.width],
'mime': f'image/{thumb_gif_image.format.lower()}'}
response = api.create_image_file(media_id, role='thumbnail', image_definition=thumb_def)
assert isinstance(response, MessageResponse)
response = api.create_image_file(media_id, role='thumbnail_gif', image_definition=thumb_gif_def)
assert isinstance(response, MessageResponse)
# Update the media object.
response = api.update_media(media_id, media_update={
'num_frames': num_frames,
'fps': fps,
'codec': codec,
'width': width,
'height': height,
})
assert isinstance(response, MessageResponse)
logger.info(f'Thumbnail upload done! {response.message}')
if __name__ == '__main__':
args = parse_args()
make_thumbnails(args.host, args.token, args.media, args.input, args.output, args.gif)
| StarcoderdataPython |
11270414 | r"""
Affine factorization crystal of type `A`
"""
#*****************************************************************************
# Copyright (C) 2014 <NAME> <anne at math.ucdavis.edu>
#
# Distributed under the terms of the GNU General Public License (GPL)
# http://www.gnu.org/licenses/
#******************************************************************************
from sage.misc.lazy_attribute import lazy_attribute
from sage.structure.parent import Parent
from sage.structure.element_wrapper import ElementWrapper
from sage.structure.unique_representation import UniqueRepresentation
from sage.categories.classical_crystals import ClassicalCrystals
from sage.categories.crystals import CrystalMorphism
from sage.categories.enumerated_sets import EnumeratedSets
from sage.categories.homset import Hom
from sage.combinat.root_system.cartan_type import CartanType
from sage.combinat.root_system.weyl_group import WeylGroup
from sage.combinat.rsk import RSK
class AffineFactorizationCrystal(UniqueRepresentation, Parent):
r"""
The crystal on affine factorizations with a cut-point, as introduced
by [MS14]_.
INPUT:
- ``w`` -- an element in an (affine) Weyl group or a skew shape of `k`-bounded partitions (if `k` was specified)
- ``n`` -- the number of factors in the factorization
- ``x`` -- (default: ``None``) the cut point; if not specified it is determined as the minimal missing residue in ``w``
- ``k`` -- (default: ``None``) positive integer, specifies that ``w`` is `k`-bounded or a `k+1`-core when specified
EXAMPLES::
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([2,3,2,1])
sage: B = crystals.AffineFactorization(w,3); B
Crystal on affine factorizations of type A2 associated to s2*s3*s2*s1
sage: B.list()
[(1, s2, s3*s2*s1),
(1, s3*s2, s3*s1),
(1, s3*s2*s1, s3),
(s3, s2, s3*s1),
(s3, s2*s1, s3),
(s3*s2, s1, s3),
(s3*s2*s1, 1, s3),
(s3*s2*s1, s3, 1),
(s3*s2, 1, s3*s1),
(s3*s2, s3, s1),
(s3*s2, s3*s1, 1),
(s2, 1, s3*s2*s1),
(s2, s3, s2*s1),
(s2, s3*s2, s1),
(s2, s3*s2*s1, 1)]
We can also access the crystal by specifying a skew shape in terms of `k`-bounded partitions::
sage: crystals.AffineFactorization([[3,1,1],[1]], 3, k=3)
Crystal on affine factorizations of type A2 associated to s2*s3*s2*s1
We can compute the highest weight elements::
sage: hw = [w for w in B if w.is_highest_weight()]
sage: hw
[(1, s2, s3*s2*s1)]
sage: hw[0].weight()
(3, 1, 0)
And show that this crystal is isomorphic to the tableau model of the same weight::
sage: C = crystals.Tableaux(['A',2],shape=[3,1])
sage: GC = C.digraph()
sage: GB = B.digraph()
sage: GC.is_isomorphic(GB, edge_labels=True)
True
The crystal operators themselves move elements between adjacent factors::
sage: b = hw[0];b
(1, s2, s3*s2*s1)
sage: b.f(1)
(1, s3*s2, s3*s1)
The cut point `x` is not supposed to occur in the reduced words for `w`::
sage: B = crystals.AffineFactorization([[3,2],[2]],4,x=0,k=3)
Traceback (most recent call last):
...
ValueError: x cannot be in reduced word of s0*s3*s2
REFERENCES:
.. [MS14] <NAME> and <NAME>.
*Crystal approach to affine Schubert calculus*.
Int. Math. Res. Not. (2015).
:doi:`10.1093/imrn/rnv194`, :arxiv:`1408.0320`.
"""
@staticmethod
def __classcall_private__(cls, w, n, x = None, k = None):
r"""
Classcall to mend the input.
TESTS::
sage: A = crystals.AffineFactorization([[3,1],[1]], 4, k=3); A
Crystal on affine factorizations of type A3 associated to s3*s2*s1
sage: AC = crystals.AffineFactorization([Core([4,1],4),Core([1],4)], 4, k=3)
sage: AC is A
True
"""
if k is not None:
from sage.combinat.core import Core
from sage.combinat.partition import Partition
W = WeylGroup(['A',k,1], prefix='s')
if isinstance(w[0], Core):
w = [w[0].to_bounded_partition(), w[1].to_bounded_partition()]
else:
w = [Partition(w[0]), Partition(w[1])]
w0 = W.from_reduced_word(w[0].from_kbounded_to_reduced_word(k))
w1 = W.from_reduced_word(w[1].from_kbounded_to_reduced_word(k))
w = w0*(w1.inverse())
return super(AffineFactorizationCrystal, cls).__classcall__(cls, w, n, x)
def __init__(self, w, n, x = None):
r"""
EXAMPLES::
sage: B = crystals.AffineFactorization([[3,2],[2]],4,x=0,k=3)
Traceback (most recent call last):
...
ValueError: x cannot be in reduced word of s0*s3*s2
sage: B = crystals.AffineFactorization([[3,2],[2]],4,k=3)
sage: B.x
1
sage: B.w
s0*s3*s2
sage: B.k
3
sage: B.n
4
TESTS::
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([2,3,2,1])
sage: B = crystals.AffineFactorization(w,3)
sage: TestSuite(B).run()
"""
Parent.__init__(self, category = ClassicalCrystals())
self.n = n
self.k = w.parent().n-1
self.w = w
cartan_type = CartanType(['A',n-1])
self._cartan_type = cartan_type
from sage.combinat.sf.sf import SymmetricFunctions
from sage.rings.all import QQ
Sym = SymmetricFunctions(QQ)
s = Sym.schur()
support = s(w.stanley_symmetric_function()).support()
support = [ [0]*(n-len(mu))+[mu[len(mu)-i-1] for i in range(len(mu))] for mu in support]
generators = [tuple(p) for mu in support for p in affine_factorizations(w,n,mu)]
#generators = [tuple(p) for p in affine_factorizations(w, n)]
self.module_generators = [self(t) for t in generators]
if x is None:
if generators != []:
x = min( set(range(self.k+1)).difference(set(
sum([i.reduced_word() for i in generators[0]],[]))))
else:
x = 0
if x in set(w.reduced_word()):
raise ValueError("x cannot be in reduced word of {}".format(w))
self.x = x
def _repr_(self):
r"""
EXAMPLES::
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([3,2,1])
sage: crystals.AffineFactorization(w,4)
Crystal on affine factorizations of type A3 associated to s3*s2*s1
sage: crystals.AffineFactorization([[3,1],[1]], 4, k=3)
Crystal on affine factorizations of type A3 associated to s3*s2*s1
"""
return "Crystal on affine factorizations of type A{} associated to {}".format(self.n-1, self.w)
# temporary workaround while an_element is overriden by Parent
_an_element_ = EnumeratedSets.ParentMethods._an_element_
@lazy_attribute
def _tableaux_isomorphism(self):
"""
Return the isomorphism from ``self`` to the tableaux model.
EXAMPLES::
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([3,2,1])
sage: B = crystals.AffineFactorization(w,4)
sage: B._tableaux_isomorphism
['A', 3] Crystal morphism:
From: Crystal on affine factorizations of type A3 associated to s3*s2*s1
To: The crystal of tableaux of type ['A', 3] and shape(s) [[3]]
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([2,1,3,2])
sage: B = crystals.AffineFactorization(w,3)
sage: B._tableaux_isomorphism
['A', 2] Crystal morphism:
From: Crystal on affine factorizations of type A2 associated to s2*s3*s1*s2
To: The crystal of tableaux of type ['A', 2] and shape(s) [[2, 2]]
"""
# Constructing the tableaux crystal
from sage.combinat.crystals.tensor_product import CrystalOfTableaux
def mg_to_shape(mg):
l = list(mg.weight().to_vector())
while l and l[-1] == 0:
l.pop()
return l
sh = [mg_to_shape(mg) for mg in self.highest_weight_vectors()]
C = CrystalOfTableaux(self.cartan_type(), shapes=sh)
phi = FactorizationToTableaux(Hom(self, C, category=self.category()))
phi.register_as_coercion()
return phi
class Element(ElementWrapper):
def e(self, i):
r"""
Return the action of `e_i` on ``self``.
EXAMPLES::
sage: B = crystals.AffineFactorization([[3,1],[1]], 4, k=3)
sage: W = B.w.parent()
sage: t = B((W.one(),W.one(),W.from_reduced_word([3]),W.from_reduced_word([2,1]))); t
(1, 1, s3, s2*s1)
sage: t.e(1)
(1, 1, 1, s3*s2*s1)
"""
if i not in self.index_set():
raise ValueError("i must be in the index set")
b = self.bracketing(i)
if not b[0]:
return None
W = self.parent().w.parent()
x = self.parent().x
k = self.parent().k
n = self.parent().n
a = min(b[0])
left = [j for j in (self.value[n-i-1]).reduced_word() if j != (a+x)%(k+1)]
right = [(j-x)%(k+1) for j in (self.value[n-i]).reduced_word()]
m = max([j for j in range(a) if (j+x)%(k+1) not in left])
right += [m+1]
right.sort(reverse=True)
right = [(j+x)%(k+1) for j in right]
t = [self.value[j] for j in range(n-i-1)] + [W.from_reduced_word(left)] + [W.from_reduced_word(right)] + [self.value[j] for j in range(n-i+1,n)]
return self.parent()(tuple(t))
def f(self, i):
r"""
Return the action of `f_i` on ``self``.
EXAMPLES::
sage: B = crystals.AffineFactorization([[3,1],[1]], 4, k=3)
sage: W = B.w.parent()
sage: t = B((W.one(),W.one(),W.from_reduced_word([3]),W.from_reduced_word([2,1]))); t
(1, 1, s3, s2*s1)
sage: t.f(2)
(1, s3, 1, s2*s1)
sage: t.f(1)
(1, 1, s3*s2, s1)
"""
if i not in self.index_set():
raise ValueError("i must be in the index set")
b = self.bracketing(i)
if not b[1]:
return None
W = self.parent().w.parent()
x = self.parent().x
k = self.parent().k
n = self.parent().n
a = max(b[1])
right = [j for j in (self.value[n-i]).reduced_word() if j != (a+x)%(k+1)]
left = [(j-x)%(k+1) for j in (self.value[n-i-1]).reduced_word()]
m = min([j for j in range(a+1,k+2) if (j+x)%(k+1) not in right])
left += [m-1]
left.sort(reverse=True)
left = [(j+x)%(k+1) for j in left]
t = [self.value[j] for j in range(n-i-1)] + [W.from_reduced_word(left)] + [W.from_reduced_word(right)] + [self.value[j] for j in range(n-i+1,n)]
return self.parent()(tuple(t))
def bracketing(self, i):
r"""
Removes all bracketed letters between `i`-th and `i+1`-th entry.
EXAMPLES::
sage: B = crystals.AffineFactorization([[3,1],[1]], 3, k=3, x=4)
sage: W = B.w.parent()
sage: t = B((W.one(),W.from_reduced_word([3]),W.from_reduced_word([2,1]))); t
(1, s3, s2*s1)
sage: t.bracketing(1)
[[3], [2, 1]]
"""
n = self.parent().n
x = self.parent().x
k = self.parent().k
right = (self.value[n-i]).reduced_word()
left = (self.value[n-i-1]).reduced_word()
right_n = [(j-x)%(k+1) for j in right]
left_n = [(j-x)%(k+1) for j in left]
left_unbracketed = []
while left_n:
m = max(left_n)
left_n.remove(m)
l = [j for j in right_n if j>m]
if l:
right_n.remove(min(l))
else:
left_unbracketed += [m]
return [[j for j in left_unbracketed],[j for j in right_n]]
def to_tableau(self):
"""
Return the tableau representation of ``self``.
Uses the recording tableau of a minor variation of
Edelman-Greene insertion. See Theorem 4.11 in [MS14]_.
EXAMPLES::
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([2,1,3,2])
sage: B = crystals.AffineFactorization(w,3)
sage: for x in B:
....: x
....: x.to_tableau().pp()
(1, s2*s1, s3*s2)
1 1
2 2
(s2, s1, s3*s2)
1 1
2 3
(s2, s3*s1, s2)
1 2
2 3
(s2*s1, 1, s3*s2)
1 1
3 3
(s2*s1, s3, s2)
1 2
3 3
(s2*s1, s3*s2, 1)
2 2
3 3
"""
return self.parent()._tableaux_isomorphism(self)
def affine_factorizations(w, l, weight=None):
r"""
Return all factorizations of ``w`` into ``l`` factors or of weight ``weight``.
INPUT:
- ``w`` -- an (affine) permutation or element of the (affine) Weyl group
- ``l`` -- nonegative integer
- ``weight`` -- (default: None) tuple of nonnegative integers specifying the length of the factors
EXAMPLES::
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([3,2,3,1,0,1])
sage: from sage.combinat.crystals.affine_factorization import affine_factorizations
sage: affine_factorizations(w,4)
[[s2, s3, s0, s2*s1*s0],
[s2, s3, s2*s0, s1*s0],
[s2, s3, s2*s1*s0, s1],
[s2, s3*s2, s0, s1*s0],
[s2, s3*s2, s1*s0, s1],
[s2, s3*s2*s1, s0, s1],
[s3*s2, s3, s0, s1*s0],
[s3*s2, s3, s1*s0, s1],
[s3*s2, s3*s1, s0, s1],
[s3*s2*s1, s3, s0, s1]]
sage: W = WeylGroup(['A',2], prefix='s')
sage: w0 = W.long_element()
sage: affine_factorizations(w0,3)
[[1, s1, s2*s1],
[1, s2*s1, s2],
[s1, 1, s2*s1],
[s1, s2, s1],
[s1, s2*s1, 1],
[s2, s1, s2],
[s2*s1, 1, s2],
[s2*s1, s2, 1]]
sage: affine_factorizations(w0,3,(0,1,2))
[[1, s1, s2*s1]]
sage: affine_factorizations(w0,3,(1,1,1))
[[s1, s2, s1], [s2, s1, s2]]
sage: W = WeylGroup(['A',3], prefix='s')
sage: w0 = W.long_element()
sage: affine_factorizations(w0,6,(1,1,1,1,1,1))
[[s1, s2, s1, s3, s2, s1],
[s1, s2, s3, s1, s2, s1],
[s1, s2, s3, s2, s1, s2],
[s1, s3, s2, s1, s3, s2],
[s1, s3, s2, s3, s1, s2],
[s2, s1, s2, s3, s2, s1],
[s2, s1, s3, s2, s1, s3],
[s2, s1, s3, s2, s3, s1],
[s2, s3, s1, s2, s1, s3],
[s2, s3, s1, s2, s3, s1],
[s2, s3, s2, s1, s2, s3],
[s3, s1, s2, s1, s3, s2],
[s3, s1, s2, s3, s1, s2],
[s3, s2, s1, s2, s3, s2],
[s3, s2, s1, s3, s2, s3],
[s3, s2, s3, s1, s2, s3]]
sage: affine_factorizations(w0,6,(0,0,0,1,2,3))
[[1, 1, 1, s1, s2*s1, s3*s2*s1]]
"""
if weight is None:
if l==0:
if w.is_one():
return [[]]
else:
return []
else:
return [[u]+p for (u,v) in w.left_pieri_factorizations() for p in affine_factorizations(v,l-1) ]
else:
if l != len(weight):
return []
if l==0:
if w.is_one():
return [[]]
else:
return []
else:
return [[u]+p for (u,v) in w.left_pieri_factorizations(max_length=weight[0]) if u.length() == weight[0]
for p in affine_factorizations(v,l-1,weight[1:]) ]
#####################################################################
## Crystal isomorphisms
class FactorizationToTableaux(CrystalMorphism):
def _call_(self, x):
"""
Return the image of ``x`` under ``self``.
TESTS::
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([2,1,3,2])
sage: B = crystals.AffineFactorization(w,3)
sage: phi = B._tableaux_isomorphism
sage: [phi(b) for b in B]
[[[1, 1], [2, 2]],
[[1, 1], [2, 3]],
[[1, 2], [2, 3]],
[[1, 1], [3, 3]],
[[1, 2], [3, 3]],
[[2, 2], [3, 3]]]
"""
p = []
q = []
for i,factor in enumerate(reversed(x.value)):
word = factor.reduced_word()
p += [i+1]*len(word)
# We sort for those pesky commutative elements
# The word is most likely in reverse order to begin with
q += sorted(reversed(word))
C = self.codomain()
return C(RSK(p, q, insertion='EG')[1])
def is_isomorphism(self):
"""
Return ``True`` as this is an isomorphism.
EXAMPLES::
sage: W = WeylGroup(['A',3,1], prefix='s')
sage: w = W.from_reduced_word([2,1,3,2])
sage: B = crystals.AffineFactorization(w,3)
sage: phi = B._tableaux_isomorphism
sage: phi.is_isomorphism()
True
TESTS::
sage: W = WeylGroup(['A',4,1], prefix='s')
sage: w = W.from_reduced_word([2,1,3,2,4,3,2,1])
sage: B = crystals.AffineFactorization(w, 4)
sage: phi = B._tableaux_isomorphism
sage: all(phi(b).e(i) == phi(b.e(i)) and phi(b).f(i) == phi(b.f(i))
....: for b in B for i in B.index_set())
True
sage: set(phi(b) for b in B) == set(phi.codomain())
True
"""
return True
is_embedding = is_isomorphism
is_surjective = is_isomorphism
| StarcoderdataPython |
4805110 | from table import TableUtil
import json
class NormaliseKraken():
NO_EVENTS = {"lob_events": [], "market_orders": []}
ACTIVE_BID_LEVELS = set()
ACTIVE_ASK_LEVELS = set()
QUOTE_NO = 2
EVENT_NO = 0
ORDER_ID = 0
def __init__(self):
# Useful utility functions for quickly creating table entries
self.util = TableUtil()
def normalise(self, data) -> dict:
"""Rayman"""
# This function currently only supports LOB events and trade data.
lob_events = []
market_orders = []
# Kraken specific feed data parsing
if isinstance(data, dict):
print(f"Received message {json.dumps(data)}")
return self.NO_EVENTS
recv_ts = data[-1]
feed = data[2] # data[-3] is the channel name
if feed == "book-1000":
data = data[1] # Dictionary of orderbook snapshot/updates
if "bs" in data.keys(): # Snapshot bids
for bid in data["bs"]:
self._handle_lob_update("bs", lob_events, bid, 1, recv_ts)
if "as" in data.keys(): # Snapshots asks
for ask in data["as"]:
self._handle_lob_update("as", lob_events, ask, 2, recv_ts)
if "a" in data.keys():
for ask in data["a"]:
self._handle_lob_update("a", lob_events, ask, 2, recv_ts)
if "b" in data.keys():
for bid in data["b"]:
self._handle_lob_update("b", lob_events, bid, 1, recv_ts)
elif feed == "trade":
for trade in data[1]:
self._handle_market_order(market_orders, trade)
else:
print(f"Received message {json.dumps(data)}")
return self.NO_EVENTS
self.EVENT_NO += 1
# Creating final normalised data dictionary which will be returned to the Normaliser
normalised = {
"lob_events": lob_events,
"market_orders": market_orders
}
return normalised
def _handle_lob_update(self, key, lob_events, event, side, recv_ts):
if len(event) == 4:
return
price = float(event[0])
size = float(event[1])
ts = int(float(event[2])*10**3)
if key == "as" or key == "bs":
lob_action = 2
if key == "as":
self.ACTIVE_ASK_LEVELS.add(price)
else:
self.ACTIVE_BID_LEVELS.add(price)
elif key == "a":
if size == 0.0:
lob_action = 3
if price in self.ACTIVE_ASK_LEVELS:
self.ACTIVE_ASK_LEVELS.remove(price)
elif price not in self.ACTIVE_ASK_LEVELS:
lob_action = 2
self.ACTIVE_ASK_LEVELS.add(price)
else:
lob_action = 4
elif key == "b":
if size == 0.0:
lob_action = 3
if price in self.ACTIVE_BID_LEVELS:
self.ACTIVE_BID_LEVELS.remove(price)
elif price not in self.ACTIVE_BID_LEVELS:
lob_action = 2
self.ACTIVE_BID_LEVELS.add(price)
else:
lob_action = 4
lob_events.append(self.util.create_lob_event(
quote_no=self.QUOTE_NO,
event_no=self.EVENT_NO,
side=side,
price=price,
size=size,
lob_action=lob_action,
send_timestamp=ts,
receive_timestamp=recv_ts,
order_type=0,
))
self.QUOTE_NO += 1
def _handle_market_order(self, market_orders, trade):
market_orders.append(self.util.create_market_order(
order_id=self.ORDER_ID,
price=float(trade[0]),
timestamp=int(float(trade[2])*10e3),
side=1 if trade[3] == "b" else 2,
size=float(trade[2]),
msg_original_type=trade[4]
))
self.ORDER_ID += 1 | StarcoderdataPython |
4831541 | #!/usr/bin/env python3
from pathlib import Path
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import subprocess
results_dir = Path.cwd() / "example_output" / "data" / "out"
def get_airfoil() -> pd.DataFrame:
foil_geom_path = results_dir / "airfoil.csv"
df = pd.read_csv(foil_geom_path)
# Close the path:
first = df.values[0]
first_df = pd.DataFrame({df.columns[0]: [first[0]], df.columns[1]: [first[1]]})
result = df.append(first_df)
return result
def get_net_force(i: int) -> np.ndarray:
force_path = results_dir / f"net_force_{i:04d}.csv"
# -1 because my frame of reference is wrong in the Swift code.
result = pd.read_csv(force_path).values[0, ] * -1.0
return result
def generate_png(i: int, result_path: Path, airfoil: pd.DataFrame) -> None:
# figsize is image dimensions in inches
# dpi is dots/inc, defaulting to 100.
w_fig_pix = 1280
h_fix_pix = 720
dpi = 150.0
figsize = (w_fig_pix / dpi, h_fix_pix / dpi)
f = plt.figure(figsize=figsize, dpi=dpi)
plt.xlim(0, 128)
plt.ylim(0, 72)
# Draw the particles
df = pd.read_csv(result_path)
xvals = df["X"].values
yvals = df["Y"].values
# vxvals = df["VX"].values
# vyvals = df["VY"].values
fig = plt.scatter(xvals, yvals, c="#00aaff", s=0.5, marker=".")
# Overlay the net force, anchored on the first point.
# And scaled. A lot.
force = get_net_force(i) * 5
x0 = airfoil.X.values[0]
y0 = airfoil.Y.values[0]
xf = x0 + force[0]
yf = y0 + force[1]
plt.plot([x0, xf], [y0, yf], linewidth=0.5, color="#556688")
# Overlay the airfoil:
plt.plot(
airfoil.X.values, airfoil.Y.values, color="black", linewidth=0.5
)
plt.fill(
airfoil.X.values, airfoil.Y.values, color="#aabbdd"
)
plt.axis("off")
# Ditch the margins
plt.margins(0.0)
# Ensure generated PNG names have consecutive indices, to satisfy
# ffmpeg.
out_path = results_dir / f"frame_{i:04d}.png"
# Try again to get the saved image to have the desired size
# (9.6, 5.4) @ 100 dpi does not result in 960x540 px
plt.tight_layout(pad=0.0, h_pad=0.0, w_pad=0.0, rect=(0, 0, 1, 1))
f.savefig(out_path, bbox_inches="tight", pad_inches=0)
plt.close("all")
def make_movie() -> None:
# Convert PNGs to an animation at, e.g., 10 fps
# https://stackoverflow.com/a/13591474/2826337
# https://unix.stackexchange.com/a/86945
# The usage is really difficult to sort out -- in particular,
# the pattern for "-i".
# For H.264 settings see https://trac.ffmpeg.org/wiki/Encode/H.264
# and https://trac.ffmpeg.org/wiki/Encode/H.264
movie_path = Path.cwd() / "movie.mp4"
args = [
"ffmpeg", "-r", "30",
"-i", "frame_%04d.png", "-c:v", "libx264",
# for Quicktime:
"-pix_fmt", "yuv420p",
"-tune", "animation",
"-preset", "slow",
"-y", str(movie_path)]
subprocess.check_call(args, cwd=results_dir)
def main():
plt.close("all")
airfoil = get_airfoil()
csvs = sorted(results_dir.glob("positions_*.csv"))
for i, result_path in enumerate(csvs, start=1):
generate_png(i, result_path, airfoil)
make_movie()
if __name__ == "__main__":
main()
| StarcoderdataPython |
6411659 | <filename>tests/test_decorator.py
import unittest
import unishark
import time
from unishark.exception import MultipleErrors
class DecoratorTestCase(unittest.TestCase):
def test_data_driven_json_style(self):
@unishark.data_driven(*[{'a': 1, 'b': 2, 'sum': 3}, {'a': 3, 'b': 4, 'sum': 7}])
def mock_test(count, option=1, **param):
count.append(1)
self.assertEqual(option, 1)
self.assertEqual(param['a']+param['b'], param['sum'])
cnt = []
mock_test(cnt)
self.assertEqual(sum(cnt), 2)
def test_data_driven_args_style(self):
@unishark.data_driven(a=[1, 3, 0], b=[2, 4], sum=[3, 7])
def mock_test(count, option=1, **param):
count.append(1)
self.assertEqual(option, 1)
self.assertEqual(param['a']+param['b'], param['sum'])
cnt = []
mock_test(cnt)
self.assertEqual(sum(cnt), 2)
def test_data_driven_cross_multiply(self):
@unishark.data_driven(left=list(range(3)), i=list(range(3)))
@unishark.data_driven(right=list(range(3)), j=list(range(3)))
def mock_test(res, **param):
n = param['left'] * param['right']
i = param['i']
j = param['j']
self.assertEqual(n, res[i*3+j])
mock_test([0, 0, 0, 0, 1, 2, 0, 2, 4])
def test_data_driven_invalid_input_1(self):
@unishark.data_driven([{'a': 1}, {'a': 3}])
def mock_test(**param):
print(param['a'])
with self.assertRaises(TypeError):
mock_test()
def test_data_driven_invalid_input_2(self):
@unishark.data_driven(a=set(range(3)))
def mock_test(**param):
print(param['a'])
with self.assertRaises(TypeError):
mock_test()
def test_multi_treads_data_driven_json_style(self):
@unishark.multi_threading_data_driven(2, *[{'a': 1, 'b': 2, 'sum': 3}, {'a': 3, 'b': 4, 'sum': 7}])
def mock_test(count, option=1, **param):
count.append(1)
self.assertEqual(option, 1)
self.assertEqual(param['a']+param['b'], param['sum'])
cnt = []
mock_test(cnt)
self.assertEqual(sum(cnt), 2)
def test_multi_threads_data_driven_args_style(self):
@unishark.multi_threading_data_driven(2, a=[1, 3, 0], b=[2, 4], sum=[3, 7])
def mock_test(count, option=1, **param):
count.append(1)
self.assertEqual(option, 1)
self.assertEqual(param['a']+param['b'], param['sum'])
cnt = []
mock_test(cnt)
self.assertEqual(sum(cnt), 2)
def test_multi_threads_data_driven_time(self):
@unishark.multi_threading_data_driven(10, time=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def mock_test(count, **param):
count.append(1)
time.sleep(param['time'])
start = time.time()
cnt = []
mock_test(cnt)
self.assertEqual(sum(cnt), 10)
taken = time.time() - start
self.assertLess(taken, 3)
def test_multi_threads_data_driven_errors(self):
@unishark.multi_threading_data_driven(6, time=[1, 2, 1, 1, 1, 3])
def mock_test(**param):
if param['time'] == 1:
time.sleep(param['time'])
else:
raise AssertionError('Error thrown in thread.')
try:
mock_test()
raise AssertionError('No MultipleErrors caught.')
except MultipleErrors as e:
self.assertEqual(len(e), 2)
def test_multi_threads_data_driven_cross_multiply(self):
@unishark.multi_threading_data_driven(2, time1=[1, 2])
@unishark.multi_threading_data_driven(3, time2=[1, 1, 1])
def mock_test(**param):
t = param['time1'] * param['time2']
if t == 1:
time.sleep(t)
else:
raise AssertionError('Error thrown in thread.')
try:
mock_test()
raise AssertionError('No MultipleErrors caught.')
except MultipleErrors as e:
self.assertEqual(len(e), 1)
def test_multi_threads_data_driven_single_thread(self):
@unishark.multi_threading_data_driven(1, a=[1, 3], b=[2, 4], sum=[3, 7, 0])
def mock_test(count, option=1, **param):
count.append(1)
self.assertEqual(option, 1)
self.assertEqual(param['a']+param['b'], param['sum'])
cnt = []
mock_test(cnt)
self.assertEqual(sum(cnt), 2)
def test_multi_threads_data_driven_no_threads(self):
with self.assertRaises(TypeError):
@unishark.multi_threading_data_driven(*[{'a': 1, 'b': 2, 'sum': 3}, {'a': 3, 'b': 4, 'sum': 7}])
def mock_test(**param):
print('%d + %d = %d' % (param['a'], param['b'], param['sum']))
def test_multi_threads_data_driven_invalid_threads_1(self):
with self.assertRaises(ValueError):
@unishark.multi_threading_data_driven(0, time=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def mock_test(**param):
time.sleep(param['time'])
def test_multi_threads_data_driven_invalid_threads_2(self):
with self.assertRaises(ValueError):
@unishark.multi_threading_data_driven(-1, time=[1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
def mock_test(**param):
time.sleep(param['time'])
if __name__ == '__main__':
unittest.main(verbosity=2) | StarcoderdataPython |
11267460 | from colors import Colors
class ActionException(Exception):
pass
class Action:
def __init__(self, player, tile):
self.player = player
self.tile = tile
def perform(self):
raise NotImplemented
class Move(Action):
def perform(self):
try:
self.player.move(self.tile)
item = getattr(self.tile, "item", None)
if item:
return ("You walk. There is a %s on the floor." % item,
Colors.LIGHT_GRAY)
else:
return ("You walk", Colors.LIGHT_GRAY)
except ActionException as ex:
return ("You can't go that way: %s" % str(ex), Colors.DARK_RED)
class Wait(Action):
def __init__(self, message="You wait", color=None):
self.message = message
self.color = color or Colors.LIGHT_GRAY
def perform(self):
return (self.message, self.color)
class Drop(Action):
def __init__(self, player, tile, item):
super().__init__(player, tile)
self.item = item
def perform(self):
try:
item = self.player.drop(self.tile, self.item)
return ("You drop the %s" % item, Colors.LIGHT_GRAY)
except ActionException as ex:
return ("You can't drop that: %s" % ex, Colors.DARK_RED)
class PickUp(Action):
def perform(self):
try:
item = self.player.pickup(self.tile)
return ("You pick up the %s" % item, Colors.LIGHT_GRAY)
except ActionException as ex:
return ("You can't pick that up: %s" % ex, Colors.DARK_RED)
class Open(Action):
def perform(self):
try:
self.player.open(self.tile)
return ("You open the %s" % self.tile,
Colors.LIGHT_GRAY)
except ActionException as ex:
return ("You can't open the %s: %s" % (self.tile, ex),
Colors.DARK_RED)
except Exception as ex:
return ("There's nothing to open",
Colors.DARK_RED)
class Close(Action):
def perform(self):
try:
self.player.close(self.tile)
return ("You close the %s" % (self.tile),
Colors.LIGHT_GRAY)
except ActionException as ex:
return ("You can't close the %s: %s" % (self.tile, ex),
Colors.DARK_RED)
except Exception as ex:
return ("There's nothing to close", Colors.DARK_RED)
class Use(Action):
def __init__(self, player, tile, item):
super().__init__(player, tile)
self.item = item
def perform(self):
try:
result = self.player.use(self.tile, self.item)
return ("You use the %s on the %s: %s" % (self.item, self.tile, result or "nothing happens"), Colors.LIGHT_GRAY)
except ActionException as ex:
return ("You can't use the %s on a %s: %s" % (self.item, self.tile, ex), Colors.DARK_RED)
except Exception as ex:
return ("You don't know what to do with that", Colors.DARK_RED)
| StarcoderdataPython |
48389 | """ Test for act helpers """
import pytest
import act.api
def test_add_uri_fqdn() -> None: # type: ignore
""" Test for extraction of facts from uri with fqdn """
api = act.api.Act("", None, "error")
uri = "http://www.mnemonic.no/home"
facts = act.api.helpers.uri_facts(api, uri)
assert len(facts) == 4
assert api.fact("componentOf").source("fqdn", "www.mnemonic.no").destination("uri", uri) \
in facts
assert api.fact("componentOf").source("path", "/home").destination("uri", uri) in facts
assert api.fact("scheme", "http").source("uri", uri) in facts
assert api.fact("basename", "home").source("path", "/home") in facts
def test_uri_should_fail() -> None: # type: ignore
""" Test for extraction of facts from uri with ipv4 """
api = act.api.Act("", None, "error")
with pytest.raises(act.api.base.ValidationError):
act.api.helpers.uri_facts(api, "http://")
with pytest.raises(act.api.base.ValidationError):
act.api.helpers.uri_facts(api, "www.mnemonic.no")
with pytest.raises(act.api.base.ValidationError):
act.api.helpers.uri_facts(api, "127.0.0.1")
def test_add_uri_ipv4() -> None: # type: ignore
""" Test for extraction of facts from uri with ipv4 """
api = act.api.Act("", None, "error")
uri = "http://127.0.0.1:8080/home"
facts = act.api.helpers.uri_facts(api, uri)
assert len(facts) == 5
assert api.fact("componentOf").source("ipv4", "127.0.0.1").destination("uri", uri) in facts
assert api.fact("componentOf").source("path", "/home").destination("uri", uri) in facts
assert api.fact("scheme", "http").source("uri", uri) in facts
assert api.fact("basename", "home").source("path", "/home") in facts
assert api.fact("port", "8080").source("uri", uri) in facts
def test_add_uri_ipv6() -> None: # type: ignore
""" Test for extraction of facts from uri with ipv4 """
api = act.api.Act("", None, "error")
uri = "http://[2001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b]"
facts = act.api.helpers.uri_facts(api, uri)
assert len(facts) == 2
assert api.fact("scheme", "http").source("uri", uri) in facts
assert api.fact("componentOf").source("ipv6", "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b").destination("uri", uri) \
in facts
def test_add_uri_ipv6_with_port_path_query() -> None: # type: ignore
""" Test for extraction of facts from uri with ipv6, path and query """
api = act.api.Act("", None, "error")
uri = "http://[2001:fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b]:8080/path?q=a"
facts = act.api.helpers.uri_facts(api, uri)
assert len(facts) == 6
assert api.fact("scheme", "http").source("uri", uri) in facts
assert api.fact("componentOf").source("ipv6", "fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b").destination("uri", uri) \
in facts
assert api.fact("port", "8080").source("uri", uri) in facts
assert api.fact("componentOf").source("path", "/path").destination("uri", uri) in facts
assert api.fact("basename", "path").source("path", "/path") in facts
assert api.fact("componentOf").source("query", "q=a").destination("uri", uri) in facts
| StarcoderdataPython |
4819712 | <filename>motivation/calc_dedup.py
"""Takes in the paths of two directories and reads all dump files. Computes md5 hashes of the dumped pages and performs analysis"""
import os
import sys
import hashlib
def compute_hash(chunk):
hash_obj = hashlib.sha1(chunk)
hash = hash_obj.hexdigest()
return hash
def get_statistics_subpage(hash_table):
# print('Total hashes in hash table: ' + str(len(hash_table)))
count = 0
for key in hash_table:
num_chunks = len(hash_table[key])
count += num_chunks
# print('Total chunks: ' + str(count))
return count
def get_common_chunks(table1, total1, table2, total2):
common_hashes = list(set(table1.keys()) & set(table2.keys()))
# print('Total Common Hashes: ' + str(len(common_hashes)))
# print('Breakdown:')
tuples_count = {}
count1 = 0
count2 = 0
common_pages1 = []
common_pages2 = []
for key in common_hashes:
num_chunks1 = len(table1[key])
num_chunks2 = len(table2[key])
count1 += num_chunks1
count2 += num_chunks2
# if (num_chunks1 > 100 and num_chunks2 > 100):
# print(key, num_chunks1, num_chunks2)
common_pages1.extend(table1[key])
common_pages2.extend(table2[key])
number_tuple = '(' + str(num_chunks1) + ' ' + str(num_chunks2) + ')'
if number_tuple in tuples_count:
tuples_count[number_tuple] += 1
else:
tuples_count[number_tuple] = 1
# print('Chunks in Table 1 that are common: ' + str(count1) + ', ' +
# str(float(count1) / total1))
# print('Chunks in Table 2 that are common: ' + str(count2) + ', ' +
# str(float(count2) / total2))
percent1 = float(count1) / total1
percent2 = float(count2) / total2
return percent1, percent2
def read_dumps(dir, num_hashes, chunk_size):
table = {}
page_id = 0
for subdir, _, files in os.walk(dir):
for file in files:
zero_fp = 0
if file[:5] == "pages":
filename = os.path.join(subdir, file)
num_pages = 0
# Read the binary file
fo = open(filename, "rb")
mem = fo.read()
if mem:
chunks_found = 0
for i in range(len(mem) - chunk_size + 1):
start = i
end = i + chunk_size
chunk = mem[start:end]
# Compute hash
hash = compute_hash(chunk)
# Insert into table
# if hash != "0b8bf9fc37ad802cefa6733ec62b09d5f43a1b75":
chunks_found += 1
if hash in table:
table[hash].append(page_id)
else:
table[hash] = [page_id]
if chunks_found == num_hashes:
break
return table
def read_dumps_rabin(dir, num_hashes, chunk_size):
table = {}
period = 2 * chunk_size
for subdir, _, files in os.walk(dir):
for file in files:
if file[:5] == "pages":
filename = os.path.join(subdir, file)
num_pages = 0
# Read the binary file
fo = open(filename, "rb")
mem = fo.read()
if mem:
chunks_found = 0
start = 0
while start < len(mem) - period + 1:
chunk = mem[start:start + chunk_size]
match_chunk = mem[start:start + period]
# Compute hash
hash = compute_hash(chunk)
# Insert into table
# if hash != "0b8bf9fc37ad802cefa6733ec62b09d5f43a1b75":
chunks_found += 1
if hash in table:
flag = False
for matching_chunk in table[hash]:
if matching_chunk == match_chunk:
flag = True
break
if not flag:
table[hash].append(match_chunk)
else:
table[hash] = [match_chunk]
if chunks_found == num_hashes:
break
start += period
return table
def get_redundancy_rabin(dir, chunk_size, ref_table):
period = 2 * chunk_size
for subdir, _, files in os.walk(dir):
for file in files:
duplicate_bytes = 0
total_bytes = 0
if file[:5] == "pages":
filename = os.path.join(subdir, file)
# Read the binary file
fo = open(filename, "rb")
mem = fo.read()
if mem:
total_bytes += len(mem)
start = 0
while start < len(mem) - period + 1:
chunk = mem[start:start + chunk_size]
match_chunk = mem[start:start + period]
# Compute hash
hash = compute_hash(chunk)
# Insert into table
# if hash != "0b8bf9fc37ad802cefa6733ec62b09d5f43a1b75":
if hash in ref_table:
max_length = 0
for matching_chunk in ref_table[hash]:
match_length = 0
if matching_chunk[:chunk_size] == chunk:
# Check the remaining bytes
match_length = chunk_size
for i in range(period - chunk_size):
if matching_chunk[chunk_size +
i] == match_chunk[
chunk_size + i]:
match_length += 1
else:
break
if match_length > max_length:
max_length = match_length
duplicate_bytes += max_length
start += period
return float(duplicate_bytes) / total_bytes
def calc(name1, name2, chunk_size):
table1 = read_dumps(name1, 5000000, chunk_size)
table2 = read_dumps(name2, 5000000, chunk_size)
count1 = get_statistics_subpage(table1)
count2 = get_statistics_subpage(table2)
p1, p2 = get_common_chunks(table1, count1, table2, count2)
print(p1, p2)
return p1, p2
def calc_rabin(name1, name2, chunk_size):
table2 = read_dumps_rabin(name2, 5000000, chunk_size)
p1 = get_redundancy_rabin(name1, chunk_size, table2)
table1 = read_dumps_rabin(name1, 5000000, chunk_size)
p2 = get_redundancy_rabin(name2, chunk_size, table1)
print(p1, p2)
return p1, p2
| StarcoderdataPython |
6624899 | <reponame>timgates42/PokemonGo-Bot
from __future__ import print_function
import os
import sys
import importlib
import re
import requests
import zipfile
import shutil
class PluginLoader(object):
folder_cache = []
def _get_correct_path(self, path):
extension = os.path.splitext(path)[1]
if extension == '.zip':
correct_path = path
else:
correct_path = os.path.dirname(path)
return correct_path
def load_plugin(self, plugin):
github_plugin = GithubPlugin(plugin)
if github_plugin.is_valid_plugin():
if not github_plugin.is_already_installed():
github_plugin.install()
correct_path = github_plugin.get_plugin_folder()
else:
correct_path = self._get_correct_path(plugin)
if correct_path not in self.folder_cache:
self.folder_cache.append(correct_path)
sys.path.append(correct_path)
def remove_path(self, path):
correct_path = self._get_correct_path(path)
sys.path.remove(correct_path)
self.folder_cache.remove(correct_path)
def get_class(self, namespace_class):
[namespace, class_name] = namespace_class.split('.')
my_module = importlib.import_module(namespace)
return getattr(my_module, class_name)
class GithubPlugin(object):
PLUGINS_FOLDER = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'plugins')
def __init__(self, plugin_name):
self.plugin_name = plugin_name
self.plugin_parts = self.get_github_parts()
def is_valid_plugin(self):
return self.plugin_parts is not None
def get_github_parts(self):
groups = re.match('(.*)\/(.*)#(.*)', self.plugin_name)
if groups is None:
return None
parts = {}
parts['user'] = groups.group(1)
parts['repo'] = groups.group(2)
parts['sha'] = groups.group(3)
return parts
def get_installed_version(self):
if not self.is_already_installed():
return None
filename = os.path.join(self.get_plugin_folder(), '.sha')
print(filename)
with open(filename) as file:
return file.read().strip()
def get_local_destination(self):
parts = self.plugin_parts
if parts is None:
raise Exception('Not a valid github plugin')
file_name = '{}_{}_{}.zip'.format(parts['user'], parts['repo'], parts['sha'])
full_path = os.path.join(self.PLUGINS_FOLDER, file_name)
return full_path
def is_already_installed(self):
file_path = self.get_plugin_folder()
if not os.path.isdir(file_path):
return False
sha_file = os.path.join(file_path, '.sha')
if not os.path.isfile(sha_file):
return False
with open(sha_file) as file:
content = file.read().strip()
if content != self.plugin_parts['sha']:
return False
return True
def get_plugin_folder(self):
folder_name = '{}_{}'.format(self.plugin_parts['user'], self.plugin_parts['repo'])
return os.path.join(self.PLUGINS_FOLDER, folder_name)
def get_github_download_url(self):
parts = self.plugin_parts
if parts is None:
raise Exception('Not a valid github plugin')
github_url = 'https://github.com/{}/{}/archive/{}.zip'.format(parts['user'], parts['repo'], parts['sha'])
return github_url
def install(self):
self.download()
self.extract()
def extract(self):
dest = self.get_plugin_folder()
with zipfile.ZipFile(self.get_local_destination(), "r") as z:
z.extractall(dest)
github_folder = os.path.join(dest, '{}-{}'.format(self.plugin_parts['repo'], self.plugin_parts['sha']))
new_folder = os.path.join(dest, '{}'.format(self.plugin_parts['repo']))
shutil.move(github_folder, new_folder)
with open(os.path.join(dest, '.sha'), 'w') as file:
file.write(self.plugin_parts['sha'])
os.remove(self.get_local_destination())
def download(self):
url = self.get_github_download_url()
dest = self.get_local_destination()
r = requests.get(url, stream=True)
r.raise_for_status()
with open(dest, 'wb') as f:
for chunk in r.iter_content(chunk_size=1024):
if chunk:
f.write(chunk)
r.close()
return dest
| StarcoderdataPython |
11234303 | <reponame>granitecrow/OpenCV-Exploration<filename>faces.py
import cv2 as cv
import numpy as np
faceCascade = cv.CascadeClassifier("Resources/haarcascade_frontalface_default.xml")
img = cv.imread("Resources/lena.png")
imgGray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)
faces = faceCascade.detectMultiScale(imgGray, 1.1, 4)
for (x,y,w,h) in faces:
cv.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 2)
cv.imshow("image", img)
cv.waitKey(0) | StarcoderdataPython |
3433813 | """Implementations of various mixture models."""
import abc
import numpy as np
import torch
from torch import nn
from torch import distributions
import torch.nn.functional as F
from pytorch_generative.models import base
class MixtureModel(base.GenerativeModel):
"""Base class inherited by all mixture models in pytorch-generative.
Provides:
* A generic `forward()` method which returns the log likelihood of the input
under the distribution.` The log likelihood of the component distributions
must be defined by the subclasses via `_component_log_prob()`.
* A generic `sample()` method which returns samples from the distribution.
Samples from the component distribution must be defined by the subclasses via
`_component_sample()`.
"""
def __init__(self, n_components, n_features):
"""Initializes a new MixtureModel instance.
Args:
n_components: The number of component distributions.
n_features: The number of features (i.e. dimensions) in each component.
"""
super().__init__()
self.n_components = n_components
self.n_features = n_features
self.mixture_logits = nn.Parameter(torch.ones((n_components,)))
@abc.abstractmethod
def _component_log_prob(self):
"""Returns the log likelihood of the component distributions."""
def __call__(self, *args, **kwargs):
x = args[0]
self._original_shape = x.shape
x = x.view(self._original_shape[0], 1, self.n_features)
args = (x, *args[1:])
return super().__call__(*args, **kwargs)
def forward(self, x):
mixture_log_prob = torch.log_softmax(self.mixture_logits, dim=-1)
log_prob = mixture_log_prob + self._component_log_prob(x)
return torch.logsumexp(log_prob, dim=-1)
@abc.abstractmethod
def _component_sample(self, idxs):
"""Returns samples from the component distributions conditioned on idxs."""
def sample(self, n_samples):
with torch.no_grad():
shape = (n_samples,)
idxs = distributions.Categorical(logits=self.mixture_logits).sample(shape)
sample = self._component_sample(idxs)
return sample.view(n_samples, *self._original_shape[1:])
class GaussianMixtureModel(MixtureModel):
"""A categorical mixture of Gaussian distributions with diagonal covariance."""
def __init__(self, n_components, n_features):
super().__init__(n_components, n_features)
self.mean = nn.Parameter(torch.randn(n_components, n_features) * 0.01)
# NOTE: We initialize var = 1 <=> log(sqrt(var)) = 0.
self.log_std = nn.Parameter(torch.zeros(n_components, n_features))
def _component_log_prob(self, x):
z = -self.log_std - 0.5 * torch.log(torch.tensor(2 * np.pi))
log_prob = (
z - 0.5 * ((x.unsqueeze(dim=1) - self.mean) / self.log_std.exp()) ** 2
)
return log_prob.sum(-1)
def _component_sample(self, idxs):
mean, std = self.mean[idxs], self.log_std[idxs].exp()
return distributions.Normal(mean, std).sample()
class BernoulliMixtureModel(MixtureModel):
"""A categorical mixture of Bernoulli distributions."""
def __init__(self, n_components, n_features):
super().__init__(n_components, n_features)
self.logits = nn.Parameter(torch.rand(n_components, n_features))
def _component_log_prob(self, x):
logits, x = torch.broadcast_tensors(self.logits, x)
# binary_cross_entorpy_with_logits is equivalent to log Bern(x | p).
return -F.binary_cross_entropy_with_logits(logits, x, reduction="none").sum(-1)
def _component_sample(self, idxs):
logits = self.logits[idxs]
return distributions.Bernoulli(logits=logits).sample()
| StarcoderdataPython |
3228223 | <gh_stars>1-10
"""hearthstone_api.py file."""
from .hearthstone_game_data_api import HearthstoneGameDataApi
class HearthstoneApi:
"""Hearthstone API class.
Attributes:
client_id: A string client id supplied by Blizzard.
client_secret: A string client secret supplied by Blizzard.
"""
def __init__(self, client_id, client_secret):
"""Init HearthstoneApi."""
self.game_data = HearthstoneGameDataApi(client_id, client_secret)
| StarcoderdataPython |
8122744 | '''Python Script to check image size and resize if any or both of the dimensions is bigger than 1080.
This job will replace the old image by the new resized image'''
# importing libraries
import os
from PIL import Image
def image_resize(image_file):
''' Check image width and height. If width or/and height are bigger than
1080 pixels, image is resized. Biggest dimension will be 1080 pixels and
the other dimension is altered not affecting the aspect ratio'''
img=Image.open(image_file)
# img.size is a tuple (width,height)
img_width = img.size[0]
img_height = img.size[1]
maxwidth = 1080 # desired max width
maxheight = 1080 # desired max height
# width is bigger than maxwidth and it is the biggest dimension on the image or it is a square image
if img_width > maxwidth and img_width >= img_height:
wratio = (maxwidth/float(img_width))
hsize = int((float(img_height))*(float(wratio)))
img = img.resize((maxwidth,hsize), Image.ANTIALIAS)
img.save(image_file)
# height is bigger than maxheight and it is the biggest dimension on the image
elif img_height > maxheight and img_height > img_width:
hratio = (maxheight/float(img_height))
wsize = int((float(img_width))*(float(hratio)))
img = img.resize((wsize,maxheight), Image.ANTIALIAS)
img.save(image_file)
# get all the input images in the folder named input
InputImages = os.listdir('input')
InputPaths = [f'./input/{InputImage}' for InputImage in InputImages]
# apply function image_resize to all the images inside the input folder
for InputPath in InputPaths:
image_resize(InputPath)
| StarcoderdataPython |
11238273 | <reponame>dutradda/sqldataclass
import itertools
import asynctest
import pytest
from dbdaora import GeoSpatialQuery
from dbdaora.exceptions import EntityNotFoundError
@pytest.mark.asyncio
async def test_should_get_from_memory(
repository, serialized_fake_entity, fake_entity
):
await repository.memory_data_source.geoadd(
'fake:fake2:fake', *itertools.chain(*serialized_fake_entity)
)
entity = await repository.query(
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=5,
longitude=6,
max_distance=1,
).entity
assert entity == fake_entity
@pytest.mark.asyncio
async def test_should_raise_not_found_error(repository, fake_entity, mocker):
fake_query = GeoSpatialQuery(
repository,
memory=True,
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=1,
longitude=1,
max_distance=1,
)
with pytest.raises(EntityNotFoundError) as exc_info:
await repository.query(
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=1,
longitude=1,
max_distance=1,
).entity
assert exc_info.value.args == (fake_query,)
@pytest.mark.asyncio
async def test_should_raise_not_found_error_when_already_raised_before(
repository, mocker, fake_entity
):
expected_query = GeoSpatialQuery(
repository,
memory=True,
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=1,
longitude=1,
max_distance=1,
)
repository.memory_data_source.georadius = asynctest.CoroutineMock(
side_effect=[[]]
)
repository.memory_data_source.exists = asynctest.CoroutineMock(
side_effect=[True]
)
repository.memory_data_source.geoadd = asynctest.CoroutineMock()
with pytest.raises(EntityNotFoundError) as exc_info:
await repository.query(
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=1,
longitude=1,
max_distance=1,
).entity
assert exc_info.value.args == (expected_query,)
assert repository.memory_data_source.georadius.call_args_list == [
mocker.call(
key='fake:fake2:fake',
longitude=1,
latitude=1,
radius=1,
unit='km',
with_dist=True,
with_coord=True,
count=None,
),
]
assert repository.memory_data_source.exists.call_args_list == [
mocker.call('fake:fake2:fake')
]
assert not repository.memory_data_source.geoadd.called
@pytest.mark.asyncio
async def test_should_set_already_not_found_error(
repository, mocker, fake_entity
):
expected_query = GeoSpatialQuery(
repository,
memory=True,
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=1,
longitude=1,
max_distance=1,
)
repository.memory_data_source.georadius = asynctest.CoroutineMock(
side_effect=[[]]
)
repository.memory_data_source.exists = asynctest.CoroutineMock(
side_effect=[False]
)
repository.fallback_data_source.query = asynctest.CoroutineMock(
return_value=[]
)
repository.memory_data_source.geoadd = asynctest.CoroutineMock()
with pytest.raises(EntityNotFoundError) as exc_info:
await repository.query(
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=1,
longitude=1,
max_distance=1,
).entity
assert exc_info.value.args == (expected_query,)
assert repository.memory_data_source.georadius.call_args_list == [
mocker.call(
key='fake:fake2:fake',
longitude=1,
latitude=1,
radius=1,
unit='km',
with_dist=True,
with_coord=True,
count=None,
),
]
assert repository.memory_data_source.exists.call_args_list == [
mocker.call('fake:fake2:fake')
]
assert repository.fallback_data_source.query.call_args_list == [
mocker.call('fake:fake2:fake')
]
assert not repository.memory_data_source.geoadd.called
@pytest.mark.asyncio
async def test_should_get_from_fallback(
repository,
fake_entity,
fake_fallback_data_entity,
fake_fallback_data_entity2,
):
await repository.memory_data_source.delete('fake:fake2:fake')
repository.fallback_data_source.db[
'fake:fake2:m1'
] = fake_fallback_data_entity
repository.fallback_data_source.db[
'fake:fake2:m2'
] = fake_fallback_data_entity2
entity = await repository.query(
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=5,
longitude=6,
max_distance=1,
).entity
assert entity == fake_entity
assert repository.memory_data_source.exists('fake:fake2:fake')
@pytest.mark.asyncio
async def test_should_set_memory_after_got_fallback(
repository,
fake_entity,
mocker,
fake_fallback_data_entity,
fake_fallback_data_entity2,
):
repository.memory_data_source.georadius = asynctest.CoroutineMock(
side_effect=[[], fake_entity.data]
)
repository.memory_data_source.exists = asynctest.CoroutineMock(
side_effect=[False]
)
repository.fallback_data_source.db[
'fake:fake2:m1'
] = fake_fallback_data_entity
repository.fallback_data_source.db[
'fake:fake2:m2'
] = fake_fallback_data_entity2
repository.memory_data_source.geoadd = asynctest.CoroutineMock()
entity = await repository.query(
fake_id=fake_entity.fake_id,
fake2_id=fake_entity.fake2_id,
latitude=5,
longitude=6,
max_distance=1,
).entity
assert repository.memory_data_source.georadius.called
assert repository.memory_data_source.exists.called
assert repository.memory_data_source.geoadd.call_args_list == [
mocker.call(
'fake:fake2:fake',
longitude=6.000002324581146,
latitude=4.999999830436074,
member=b'm1',
),
mocker.call(
'fake:fake2:fake',
longitude=6.000002324581146,
latitude=4.999999830436074,
member=b'm2',
),
]
assert entity == fake_entity
| StarcoderdataPython |
92572 | <filename>python/dxa/__init__.py
#
# DX Library
# packaging file
# __init__.py
#
import numpy as np
import pandas as pd
import datetime as dt
# frame
from get_year_deltas import get_year_deltas
from constant_short_rate import constant_short_rate
from market_environment import market_environment
from plot_option_stats import plot_option_stats
# simulation
from sn_random_numbers import sn_random_numbers
from simulation_class import simulation_class
from geometric_brownian_motion import geometric_brownian_motion
from jump_diffusion import jump_diffusion
from square_root_diffusion import square_root_diffusion
# valuation
from valuation_class import valuation_class
from valuation_mcs_european import valuation_mcs_european
from valuation_mcs_american import valuation_mcs_american
# portfolio
from derivatives_position import derivatives_position
from derivatives_portfolio import derivatives_portfolio
| StarcoderdataPython |
3440186 | <filename>general/messages.py
def help():
msg = """\n**Commands:**\n
files -> Displays files options menu\n
help -> Displays commands list\n"""
return msg
def welcome():
msg = "Welcome to GWEN!"
print(msg) | StarcoderdataPython |
3422777 | <filename>simpleAPI/api/v1/serializers.py
from django.contrib.auth import get_user_model
from rest_framework import serializers
from companys.models import Company, News
from users.models import Profile
User = get_user_model()
class NewsSerializer(serializers.ModelSerializer):
class Meta:
model = News
fields = '__all__'
class CompanySerializer(serializers.ModelSerializer):
company_news = NewsSerializer(many=True, required=False)
class Meta:
model = Company
exclude = ['id']
class CompanySerializerNotAuth(serializers.ModelSerializer):
class Meta:
model = Company
exclude = ['id', 'company_news']
class ProfileSerializer(serializers.ModelSerializer):
company = serializers.StringRelatedField()
class Meta:
model = Profile
exclude = ['user']
class UserSerializer(serializers.ModelSerializer):
profile = ProfileSerializer()
class Meta:
model = User
fields = ['id', 'profile', 'username', 'first_name', 'last_name', 'date_joined']
def create(self, validated_data):
profile_data = validated_data.pop('profile')
user = User.objects.create(**validated_data)
Profile.objects.create(user=user, **profile_data)
return user
def update(self, instance, validated_data):
profile_data = validated_data.pop('profile')
profile = instance.profile
# * User Info
instance.first_name = validated_data.get(
'first_name', instance.first_name)
instance.last_name = validated_data.get(
'last_name', instance.last_name)
# * AccountProfile Info
profile.company = profile_data.get(
'company', profile.company)
profile.bio = profile_data.get(
'bio', profile.bio)
profile.location = profile_data.get(
'location', profile.location)
profile.birth_date = profile_data.get(
'birth_date', profile.birth_date)
profile.role = profile_data.get(
'role', profile.role)
profile.save()
return instance
| StarcoderdataPython |
6498579 | <filename>ab_iface.py
"""
* Copyright © 2020 drewg3r
* https://github.com/drewg3r/DM-2
Interface for 'about' window.
"""
from PyQt5 import QtWidgets
import interface
from interface.about import Ui_Form
class MyFormAbout(QtWidgets.QMainWindow, interface.about.Ui_Form):
def __init__(self):
super().__init__()
self.setupUi(self)
self.pushButton.clicked.connect(self.close_btn)
def close_btn(self):
self.close()
| StarcoderdataPython |
3512567 | """ for bitFlyer
"""
import pybitflyer
import pandas as pd
from selenium.webdriver.chrome.options import Options
from bs4 import BeautifulSoup
from .handler import InvestmentTrustSiteHandler
class bitFlyerHandler(InvestmentTrustSiteHandler):
""" bitFlyerHandler is a handler for bitFlyer
"""
__url_home = "https://bitflyer.com/ja-jp/ex/Home"
def __init__(self, options:Options=None):
options = options or Options()
options.headless = True
super().__init__(
options=options
)
def update(self, api_key, api_secret):
api = pybitflyer.API(api_key=api_key, api_secret=api_secret)
balances = api.getbalance()
df1 = pd.DataFrame(balances)
df1 = df1.rename(columns={"currency_code":"ticker"})
df1 = df1[["ticker", "amount"]]
self.browser.get(self.__url_home)
html = self.browser.page_source
soup = BeautifulSoup(html, 'html.parser')
res = soup.find(id="fundsInfo")
res = res.findChildren("table")[0]
df2, = pd.read_html(str(res))
df2 = df2.rename(columns={"Unnamed: 0": "ticker", "価格": "price"})
df2 = df2.iloc[1: , :]
df2 = df2[["ticker", "price"]].copy()
df2.iloc[0,1] = 1
df = df1.merge(df2)
df = df.apply(pd.to_numeric, errors="ignore")
df["valuation"] = df["amount"] * df["price"]
self.df = df | StarcoderdataPython |
1774313 | """Brachistochrone example."""
from math import pi
ocp = beluga.OCP('missle')
# Define independent variables
ocp.independent('t', 's')
# Define equations of motion
ocp.state('n', 'V*cos(psi)*cos(gam)', 'm') \
.state('e', 'V*sin(psi)*cos(gam)', 'm') \
.state('d', '-V*sin(gam)', 'm') \
.state('psi', 'g*tan(bank)/V', 'rad') \
# Define controls
ocp.control('gam','rad')
ocp.control('bank','rad')
# Define constants
ocp.constant('V',100,'m/s')
ocp.constant('g',-9.81,'m/s^2')
# ocp.constant('tfreal',50,'s')
# Define costs
ocp.path_cost('1','s')
# Define constraints
ocp.constraints() \
.initial('n-n_0','m') \
.initial('e-e_0','m') \
.initial('d-d_0','m') \
.terminal('n-n_f','m') \
.terminal('e-e_f','m') \
.terminal('d-d_f','m') \
.path('bankLim','bank','<>',60*pi/180,'rad',start_eps=1e-5) \
.path('gamLim','gam','<>',60*pi/180,'rad',start_eps=1e-5)\
ocp.scale(m='n', s='n/V', kg=1, rad=1, nd=1)
# ocp.scale(m='V', s=1, kg=1, rad=1, nd=1)
bvp_solver = beluga.bvp_algorithm('MultipleShooting',
tolerance=1e-4,
max_iterations=30,
verbose = True,
derivative_method='fd',
max_error=100,
)
bvp_solver = beluga.bvp_algorithm('qcpi',
tolerance=1e-4,
max_iterations=50,
verbose = True,
)
guess_maker = beluga.guess_generator('auto',
start=[-1000,-1000,-1000,pi/4], # Starting values for states in order
direction='forward',
costate_guess = 0.0,
control_guess = 0.0,
use_control_guess = True,
time_integrate=0.1
)
continuation_steps = beluga.init_continuation()
continuation_steps.add_step('bisection') \
.num_cases(10) \
.terminal('n', 0.0)\
.terminal('e', 0.0)\
.terminal('psi',pi/4)\
.terminal('d',-1000)
continuation_steps.add_step('bisection') \
.num_cases(21) \
.terminal('d',-900) \
.initial('n',-5000) \
.initial('e',-5000)
# continuation_steps.add_step('bisection') \
# .num_cases(5) \
# .constant('eps_bankLim',1e-4) \
# .constant('eps_gamLim',1e-4)
#
continuation_steps.add_step('bisection') \
.num_cases(11) \
.initial('n',-10000) \
.initial('e',-10000)
beluga.solve(ocp,
method='icrm',
bvp_algorithm=bvp_solver,
steps=continuation_steps,
guess_generator=guess_maker)
# beluga.solve(problem)
# # from timeit import timeit
#
# # print(timeit("get_problem()","from __main__ import get_problem",number=10))
# beluga.run(get_problem())
| StarcoderdataPython |
1630883 | <reponame>kristoffer-paulsson/bible-analyzer
#
# Copyright (c) 2021 by <NAME> <<EMAIL>>.
#
# Permission to use, copy, modify, and/or distribute this software for any purpose with
# or without fee is hereby granted, provided that the above copyright notice and this
# permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH REGARD TO
# THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
# EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL
# DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER
# IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN
# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
#
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
#
# Contributors:
# <NAME> - initial implementation
#
"""Module containing the CSV command class."""
import csv
import hashlib
import json
import re
from pathlib import Path
from pickle import Unpickler
from . import Command
from ..data import BOOKS
class CsvCommand(Command):
def __call__(self):
if self._args.corpus == "all":
self.iterate2("ot")
self.iterate2("nt")
else:
self.iterate2(self._args.corpus)
def iterate(self, corpus: str):
self.logger.info("Starting with parsing: {}".format(corpus.upper()))
path = self._config.get("cache")
for book in json.loads(BOOKS)[corpus]:
filename = path.joinpath("parsing-{}.pickle".format(book))
if not filename.is_file():
self.logger.error("The parsing for {} is missing at: {}".format(book.capitalize(), filename))
print(self.export(filename, corpus, book))
self.logger.info("Finished with corpus: {}".format(corpus.upper()))
def export(self, filename: Path, corpus: str, book: str) -> str:
with filename.open("rb") as cache:
data = Unpickler(cache).load()
csv_path = self._config.get("cache").joinpath(book + ".csv")
with csv_path.open("w", encoding="utf-8") as csv_file:
csv_file.truncate()
writer = csv.DictWriter(csv_file, fieldnames=("chapter", "verse", "corpus"))
writer.writeheader()
for entry in data:
writer.writerow({"chapter": entry.chapter, "verse": entry.verse,
"corpus": str(entry.text if entry.text else "").strip().lower().replace("·",
"").replace(
".", "").replace(",", "").replace(":", "").replace(";", "").replace("(",
"").replace(
")", "")})
hash = hashlib.sha256()
content = csv_path.read_bytes()
hash.update(content)
return "{} {} {}".format(hash.hexdigest(), len(content), csv_path.relative_to(self._config.get("cache")))
def iterate2(self, corpus: str):
self.logger.info("Starting with parsing: {}".format(corpus.upper()))
path = self._config.get("cache")
csv_path = self._config.get("cache").joinpath(corpus + ".csv")
with csv_path.open("w", encoding="utf-8") as csv_file:
csv_file.truncate()
writer = csv.DictWriter(csv_file, fieldnames=("book", "chapter", "verse", "corpus"))
writer.writeheader()
for book in json.loads(BOOKS)[corpus]:
filename = path.joinpath("parsing-{}.pickle".format(book))
if not filename.is_file():
self.logger.error("The parsing for {} is missing at: {}".format(book.capitalize(), filename))
self.export2(filename, writer, book)
self.logger.info("Finished with corpus: {}".format(corpus.upper()))
def export2(self, filename: Path, writer: csv.DictWriter, book: str):
with filename.open("rb") as cache:
data = Unpickler(cache).load()
for entry in data:
text = str(entry.text if entry.text else "")
text = re.sub(r"\[\d+\w?\]", "", text)
# text = re.sub(r"[ ]", " ", text)
text = re.sub(r"[··\.,:;;*‡—\(\)\[\]]", "", text)
text = text.strip().lower()
writer.writerow({"book": book, "chapter": entry.chapter, "verse": entry.verse, "corpus": text})
| StarcoderdataPython |
8000020 | <reponame>Daymorn/StealthUO-Scripts
from __future__ import division
import datetime as _datetime
import struct as _struct
import time as _time
from os import linesep as _linesep
from ._datatypes import *
from ._protocol import EVENTS_NAMES as _EVENTS_NAMES
from ._protocol import ScriptMethod as _ScriptMethod
from ._protocol import get_connection as _get_connection
from .utils import ddt2pdt as _ddt2pdt
from .utils import pdt2ddt as _pdt2ddt
from .utils import iterable as _iterable
_clear_event_callback = _ScriptMethod(7) # ClearEventProc
_clear_event_callback.argtypes = [_ubyte] # EventIndex
_set_event_callback = _ScriptMethod(11) # SetEventProc
_set_event_callback.argtypes = [_ubyte] # EventIndex
def SetEventProc(EventName, Callback=None):
conn = _get_connection()
try:
index = _EVENTS_NAMES.index(EventName.lower())
except ValueError:
raise ValueError('Unknown event "' + EventName + '".')
# clear event
if Callback is None:
_clear_event_callback(index)
# conn.callbacks[index] = None
# set event
else:
if conn.callbacks[index] is None:
_set_event_callback(index)
conn.callbacks[index] = Callback
_connected = _ScriptMethod(9) # GetConnectedStatus
_connected.restype = _bool
def Connected():
return _connected()
_add_to_system_journal = _ScriptMethod(10) # AddToSystemJournal
_add_to_system_journal.argtypes = [_str] # Text
def AddToSystemJournal(*args, **kwargs):
sep = kwargs.pop('sep', ', ')
end = kwargs.pop('end', '')
s_args = sep.join((str(arg) for arg in args))
s_kwargs = sep.join((str(k) + '=' + str(v) for k, v in kwargs.items()))
text = s_args + (sep if s_args and s_kwargs else '') + s_kwargs + end
_add_to_system_journal(text)
_get_stealth_info = _ScriptMethod(12) # GetStealthInfo
_get_stealth_info.restype = _buffer # TAboutData
def GetStealthInfo():
data = _get_stealth_info()
result = dict()
result['StealthVersion'] = _struct.unpack('3H', data[:6])
result['Build'] = _struct.unpack('H', data[6:8])[0]
result['BuildDate'] = _ddt2pdt(_struct.unpack('d', data[8:16])[0])
result['GITRevNumber'] = _struct.unpack('H', data[16:18])[0]
result['GITRevision'] = _str.from_buffer(data[18:]).value
return result
_connect = _ScriptMethod(45) # Connect
def Connect():
_connect()
_disconnect = _ScriptMethod(46) # Disconnect
def Disconnect():
_disconnect()
_set_pause_on_disc = _ScriptMethod(24) # SetPauseScriptOnDisconnectStatus
_set_pause_on_disc.argtypes = [_bool] # Value
def SetPauseScriptOnDisconnectStatus(Value):
_set_pause_on_disc(Value)
_get_pause_on_disc = _ScriptMethod(23) # GetPauseScriptOnDisconnectStatus
_get_pause_on_disc.restype = _bool
def GetPauseScriptOnDisconnectStatus():
return _get_pause_on_disc()
_set_reconnector = _ScriptMethod(22) # SetARStatus
_set_reconnector.argtypes = [_bool] # Value
def SetARStatus(Value):
_set_reconnector(Value)
_get_reconnector = _ScriptMethod(21) # GetARStatus
_get_reconnector.restype = _bool
def GetARStatus():
return _get_reconnector()
_get_self_name = _ScriptMethod(19) # GetCharName
_get_self_name.restype = _str
def CharName():
return _get_self_name()
_change_profile = _ScriptMethod(20) # ChangeProfile
_change_profile.restype = _int
_change_profile.argtypes = [_str] # PName
def ChangeProfile(PName):
return _change_profile(PName)
_change_profile_ex = _ScriptMethod(352) # ChangeProfileEx
_change_profile_ex.restype = _int
_change_profile_ex.argtypes = [_str, # PName
_str, # ShardName
_str] # CharName
def ChangeProfileEx(PName, ShardName, CharName):
return _change_profile_ex(PName, ShardName, CharName)
_get_profile_name = _ScriptMethod(8) # ProfileName
_get_profile_name.restype = _str
def ProfileName():
return _get_profile_name()
_get_self_id = _ScriptMethod(14) # GetSelfID
_get_self_id.restype = _uint
def Self():
return _get_self_id()
_get_self_sex = _ScriptMethod(25) # GetSelfSex
_get_self_sex.restype = _ubyte
def Sex():
return _get_self_sex()
_get_char_title = _ScriptMethod(26) # GetCharTitle
_get_char_title.restype = _str
def GetCharTitle():
return _get_char_title()
_get_gold_count = _ScriptMethod(27) # GetSelfGold
_get_gold_count.restype = _ushort
def Gold():
return _get_gold_count()
_get_armor_points = _ScriptMethod(28) # GetSelfArmor
_get_armor_points.restype = _ushort
def Armor():
return _get_armor_points()
_get_weight = _ScriptMethod(29) # GetSelfWeight
_get_weight.restype = _ushort
def Weight():
return _get_weight()
_get_max_weight = _ScriptMethod(30) # GetSelfMaxWeight
_get_max_weight.restype = _ushort
def MaxWeight():
return _get_max_weight()
_get_world_number = _ScriptMethod(18) # GetWorldNum
_get_world_number.restype = _ubyte
def WorldNum():
return _get_world_number()
_get_self_race = _ScriptMethod(31) # GetSelfRace
_get_self_race.restype = _ubyte
def Race():
return _get_self_race()
_get_max_pets = _ScriptMethod(32) # GetSelfPetsMax
_get_max_pets.restype = _ubyte
def MaxPets():
return _get_max_pets()
_get_pets_count = _ScriptMethod(33) # GetSelfPetsCurrent
_get_pets_count.restype = _ubyte
def PetsCurrent():
return _get_pets_count()
_get_fire_resist = _ScriptMethod(34) # GetSelfFireResist
_get_fire_resist.restype = _ushort
def FireResist():
return _get_fire_resist()
_get_cold_resist = _ScriptMethod(35) # GetSelfColdResist
_get_cold_resist.restype = _ushort
def ColdResist():
return _get_cold_resist()
_get_poison_resist = _ScriptMethod(36) # GetSelfPoisonResist
_get_poison_resist.restype = _ushort
def PoisonResist():
return _get_poison_resist()
_get_energy_resist = _ScriptMethod(37) # GetSelfEnergyResist
_get_energy_resist.restype = _ushort
def EnergyResist():
return _get_energy_resist()
_get_last_connection_time = _ScriptMethod(38) # GetConnectedTime
_get_last_connection_time.restype = _double
def ConnectedTime():
return _ddt2pdt(_get_last_connection_time())
_get_last_disconnection_time = _ScriptMethod(39) # GetDisconnectedTime
_get_last_disconnection_time.restype = _double
def DisconnectedTime():
return _ddt2pdt(_get_last_disconnection_time())
_get_last_opened_container = _ScriptMethod(40) # GetLastContainer
_get_last_opened_container.restype = _uint
def LastContainer():
return _get_last_opened_container()
_get_last_targeted_object = _ScriptMethod(41) # GetLastTarget
_get_last_targeted_object.restype = _uint
def LastTarget():
return _get_last_targeted_object()
_get_last_attacked_object = _ScriptMethod(42) # GetLastAttack
_get_last_attacked_object.restype = _uint
def LastAttack():
return _get_last_attacked_object()
_get_last_status = _ScriptMethod(43) # GetLastStatus
_get_last_status.restype = _uint
def LastStatus():
return _get_last_status()
_get_last_used_object = _ScriptMethod(44) # GetLastObject
_get_last_used_object.restype = _uint
def LastObject():
return _get_last_used_object()
_get_buff_bar_info = _ScriptMethod(349) # GetBuffBarInfo
_get_buff_bar_info.restype = _buffer # TBuffBarInfo
def GetBuffBarInfo():
result = []
fmt = '=HdHII'
size = _struct.calcsize(fmt)
keys = ('Attribute_ID', 'TimeStart', 'Seconds', 'ClilocID1', 'ClilocID2')
data = _get_buff_bar_info()
if b'' == '': # py2
data = bytes(data)
count = _struct.unpack('B', data[:1])[0]
data = data[1:]
for i in range(count):
values = _struct.unpack(fmt, data[i * size:i * size + size])
buff = dict(zip(keys, values))
buff['TimeStart'] = _ddt2pdt(buff['TimeStart'])
result.append(buff)
return result
_get_shard_name = _ScriptMethod(47) # GetShardName
_get_shard_name.restype = _str
def ShardName():
return _get_shard_name()
_get_profile_shard_name = _ScriptMethod(343) # GetProfileShardName
_get_profile_shard_name.restype = _str
def ProfileShardName():
return _get_profile_shard_name()
_get_proxy_ip = _ScriptMethod(60) # GetProxyIP
_get_proxy_ip.restype = _str
def ProxyIP():
return _get_proxy_ip()
_get_proxy_port = _ScriptMethod(61) # GetProxyPort
_get_proxy_port.restype = _ushort
def ProxyPort():
return _get_proxy_port()
_is_proxy_using = _ScriptMethod(62) # GetUseProxy
_is_proxy_using.restype = _bool
def UseProxy():
return _is_proxy_using()
_get_backpack_id = _ScriptMethod(48) # GetBackpackID
_get_backpack_id.restype = _uint
def Backpack():
return _get_backpack_id()
def Ground():
return 0
_get_char_strength = _ScriptMethod(49) # GetSelfStr
_get_char_strength.restype = _int
def Str():
return _get_char_strength()
_get_char_intelligence = _ScriptMethod(50) # GetSelfInt
_get_char_intelligence.restype = _int
def Int():
return _get_char_intelligence()
_get_char_dexterity = _ScriptMethod(51) # GetSelfDex
_get_char_dexterity.restype = _int
def Dex():
return _get_char_dexterity()
_get_char_hp = _ScriptMethod(52) # GetSelfLife
_get_char_hp.restype = _int
def Life():
return _get_char_hp()
def HP():
return _get_char_hp()
_get_char_mana = _ScriptMethod(53) # GetSelfMana
_get_char_mana.restype = _int
def Mana():
return _get_char_mana()
_get_char_stamina = _ScriptMethod(54) # GetSelfStam
_get_char_stamina.restype = _int
def Stam():
return _get_char_stamina()
_get_char_max_hp = _ScriptMethod(55) # GetSelfMaxLife
_get_char_max_hp.restype = _int
def MaxLife():
return _get_char_max_hp()
def MaxHP():
return _get_char_max_hp()
_get_char_max_mana = _ScriptMethod(56) # GetSelfMaxMana
_get_char_max_mana.restype = _int
def MaxMana():
return _get_char_max_mana()
_get_char_max_stamina = _ScriptMethod(57) # GetMaxStam
_get_char_max_stamina.restype = _int
def MaxStam():
return _get_char_max_stamina()
_get_char_luck = _ScriptMethod(58) # GetSelfLuck
_get_char_luck.restype = _int
def Luck():
return _get_char_luck()
_get_extended_info = _ScriptMethod(59) # GetExtInfo
_get_extended_info.restype = _buffer # TExtendedInfo
def GetExtInfo():
keys = ('MaxWeight', 'Race', 'StatCap', 'PetsCurrent', 'PetsMax',
'FireResist', 'ColdResist', 'PoisonResist', 'EnergyResist',
'Luck', 'DamageMin', 'DamageMax', 'Tithing_points',
'ArmorMax', 'fireresistMax', 'coldresistMax',
'poisonresistMax', 'energyresistMax', 'DefenseChance',
'DefensceChanceMax', 'Hit_Chance_Incr', 'Damage_Incr',
'Swing_Speed_Incr', 'Lower_Reagent_Cost', 'Spell_Damage_Incr',
'Faster_Cast_Recovery', 'Faster_Casting', 'Lower_Mana_Cost',
'HP_Regen', 'Stam_Regen', 'Mana_Regen', 'Reflect_Phys_Damage',
'Enhance_Potions', 'Strength_Incr', 'Dex_Incr', 'Int_Incr',
'HP_Incr', 'Mana_Incr')
fmt = '=HBH2B4Hh2Hi26H'
data = _get_extended_info()
if b'' == '': # py2
data = bytes(data)
values = _struct.unpack(fmt, data)
return dict(zip(keys, values))
_is_hidden = _ScriptMethod(63) # GetHiddenStatus
_is_hidden.restype = _bool
def Hidden():
return _is_hidden()
_is_poisoned = _ScriptMethod(64) # GetPoisonedStatus
_is_poisoned.restype = _bool
def Poisoned():
return _is_poisoned()
_is_paralyzed = _ScriptMethod(65) # GetParalyzedStatus
_is_paralyzed.restype = _bool
def Paralyzed():
return _is_paralyzed()
_is_dead = _ScriptMethod(66) # GetDeadStatus
_is_dead.restype = _bool
def Dead():
return _is_dead()
_get_warmode = _ScriptMethod(171) # IsWarMode
_get_warmode.restype = _bool
_get_warmode.argtypes = [_uint] # ObjID
def WarMode():
return _get_warmode(Self())
_get_war_target = _ScriptMethod(67) # GetWarTargetID
_get_war_target.restype = _uint
def WarTargetID():
return _get_war_target()
_set_warmode = _ScriptMethod(68) # SetWarMode
_set_warmode.argtypes = [_bool] # Value
def SetWarMode(Value):
_set_warmode(Value)
_attack = _ScriptMethod(69) # Attack
_attack.argtypes = [_uint] # AttackedID
def Attack(AttackedID):
_attack(AttackedID)
_use_self_paperdoll = _ScriptMethod(70) # UseSelfPaperdollScroll
def UseSelfPaperdollScroll():
_use_self_paperdoll()
_use_paperdoll = _ScriptMethod(71) # UseOtherPaperdollScroll
_use_paperdoll.argtypes = [_uint] # ID
def UseOtherPaperdollScroll(ID):
_use_paperdoll(ID)
_target_id = _ScriptMethod(72) # GetTargetID
_target_id.restype = _uint
def TargetID():
return _target_id()
def TargetPresent(): # GetTargetStatus
return bool(_target_id())
def WaitForTarget(MaxWaitTimeMS):
time = _time.time()
while not _target_id() and time + MaxWaitTimeMS / 1000 > _time.time():
Wait(10)
return time + MaxWaitTimeMS / 1000 > _time.time()
_cancel_target = _ScriptMethod(73) # CancelTarget
def CancelTarget():
_cancel_target()
while _target_id():
Wait(10)
_target_to_object = _ScriptMethod(74) # TargetToObject
_target_to_object.argtypes = [_uint] # ObjectID
def TargetToObject(ObjectID):
_target_to_object(ObjectID)
_target_xyz = _ScriptMethod(75) # TargetToXYZ
_target_xyz.argtypes = [_ushort, # X
_ushort, # Y
_byte] # Z
def TargetToXYZ(X, Y, Z):
_target_xyz(X, Y, Z)
_target_tile = _ScriptMethod(76) # TargetToTile
_target_tile.argtypes = [_ushort, # TileModel
_ushort, # X
_ushort, # Y
_byte] # Z
def TargetToTile(TileModel, X, Y, Z):
_target_tile(TileModel, X, Y, Z)
_wait_target_object = _ScriptMethod(77) # WaitTargetObject
_wait_target_object.argtypes = [_uint] # ObjID
def WaitTargetObject(ObjID):
_wait_target_object(ObjID)
_wait_target_tile = _ScriptMethod(78) # WaitTargetTile
_wait_target_tile.argtypes = [_ushort, # Tile
_ushort, # X
_ushort, # Y
_byte] # Z
def WaitTargetTile(Tile, X, Y, Z):
_wait_target_tile(Tile, X, Y, Z)
_wait_target_xyz = _ScriptMethod(79) # WaitTargetXYZ
_wait_target_xyz.argtypes = [_ushort, # X
_ushort, # Y
_byte] # Z
def WaitTargetXYZ(X, Y, Z):
_wait_target_xyz(X, Y, Z)
_wait_target_self = _ScriptMethod(80) # WaitTargetSelf
def WaitTargetSelf():
_wait_target_self()
_wait_target_graphic = _ScriptMethod(81) # WaitTargetType
_wait_target_graphic.argtypes = [_ushort] # ObjType
def WaitTargetType(ObjType):
_wait_target_graphic(ObjType)
_cancel_wait_target = _ScriptMethod(82) # CancelWaitTarget
def CancelWaitTarget():
_cancel_wait_target()
_wait_target_ground = _ScriptMethod(83) # WaitTargetGround
_wait_target_ground.argtypes = [_ushort] # ObjType
def WaitTargetGround(ObjType):
_wait_target_ground(ObjType)
_wait_target_last = _ScriptMethod(84) # WaitTargetLast
def WaitTargetLast():
_wait_target_last()
_wait = _ScriptMethod(0) # Wait
def Wait(WaitTimeMS):
end = _time.time() + WaitTimeMS / 1000
while _time.time() < end:
_wait() # pause script and event checks
_time.sleep(0.010)
else:
_wait() # condition does not work while delay is a very small number
_use_primary_ability = _ScriptMethod(85) # UsePrimaryAbility
def UsePrimaryAbility():
_use_primary_ability()
_use_secondary_ability = _ScriptMethod(86) # UseSecondaryAbility
def UseSecondaryAbility():
_use_secondary_ability()
_get_ability = _ScriptMethod(87) # GetAbility
_get_ability.restype = _str
def GetActiveAbility():
return _get_ability()
_toggle_fly = _ScriptMethod(88) # ToggleFly
def ToggleFly():
_toggle_fly()
_get_skill_id_from_socket = _ScriptMethod(89) # GetSkillID
_get_skill_id_from_socket.restype = _int # SkillID
_get_skill_id_from_socket.argtypes = [_str] # SkillName
def _get_skill_id(name):
skill_id = _get_skill_id_from_socket(name)
if skill_id < 0:
raise ValueError('Unknown skill name "' + name + '".')
return skill_id
_use_skill = _ScriptMethod(90) # UseSkill
_use_skill.argtypes = [_int] # SkillID
def UseSkill(SkillName):
_use_skill(_get_skill_id(SkillName))
return True
_lock_skill = _ScriptMethod(91) # ChangeSkillLockState
_lock_skill.argtypes = [_int, # SkillID
_ubyte] # SkillState
def ChangeSkillLockState(SkillName, skillState):
_lock_skill(_get_skill_id_from_socket(SkillName), skillState)
_get_skill_cap = _ScriptMethod(92) # GetSkillCap
_get_skill_cap.restype = _double
_get_skill_cap.argtypes = [_int] # SkillID
def GetSkillCap(SkillName):
return _get_skill_cap(_get_skill_id_from_socket(SkillName))
_get_skill_value = _ScriptMethod(93) # GetSkillValue
_get_skill_value.restype = _double
_get_skill_value.argtypes = [_int] # SkillID
def GetSkillValue(SkillName):
return _get_skill_value(_get_skill_id_from_socket(SkillName))
_get_skill_current_value = _ScriptMethod(351) # GetSkillCurrentValue
_get_skill_current_value.restype = _double
_get_skill_current_value.argtypes = [_int] # SkillID
def GetSkillCurrentValue(SkillName):
return _get_skill_current_value(_get_skill_id_from_socket(SkillName))
_request_virtues = _ScriptMethod(94) # ReqVirtuesGump
def ReqVirtuesGump():
_request_virtues()
_VIRTUES = {
'compassion': 0x69,
'honesty': 0x6A,
'honor': 0x6B,
'humility': 0x6C,
'justice': 0x6D,
'sacrifice': 0x6E,
'spirituality': 0x6F,
'valor': 0x70,
}
_use_virtue = _ScriptMethod(95) # UseVirtue
_use_virtue.argtypes = [_uint]
def UseVirtue(VirtueName):
if VirtueName.lower() not in _VIRTUES:
error = 'UseVirtue error: Unknown name "' + VirtueName + '".'
raise ValueError(error)
_use_virtue(_VIRTUES[VirtueName.lower()])
_SPELLS = {
# 1st circle
'clumsy': 1,
'create food': 2,
'feeblemind': 3,
'heal': 4,
'magic arrow': 5,
'night sight': 6,
'reactive armor': 7,
'weaken': 8,
# 2nd circle
'agility': 9,
'cunning': 10,
'cure': 11,
'harm': 12,
'magic trap': 13,
'magic untrap': 14,
'protection': 15,
'strength': 16,
# 3rd circle
'bless': 17,
'fireball': 18,
'magic lock': 19,
'poison': 20,
'telekinesis': 21,
'teleport': 22,
'unlock': 23,
'wall of stone': 24,
# 4th circle
'arch cure': 25,
'arch protection': 26,
'curse': 27,
'fire field': 28,
'greater heal': 29,
'lightning': 30,
'mana drain': 31,
'recall': 32,
# 5th circle
'blade spirit': 33,
'dispel field': 34,
'incognito': 35,
'magic reflection': 36,
'spell reflection': 36,
'mind blast': 37,
'paralyze': 38,
'poison field': 39,
'summon creature': 40,
# 6th circle
'dispel': 41,
'energy bolt': 42,
'explosion': 43,
'invisibility': 44,
'mark': 45,
'mass curse': 46,
'paralyze field': 47,
'reveal': 48,
# 7th circle
'chain lightning': 49,
'energy field': 50,
'flame strike': 51,
'gate travel': 52,
'mana vampire': 53,
'mass dispel': 54,
'meteor swarm': 55,
'polymorph': 56,
# 8th circle
'earthquake': 57,
'energy vortex': 58,
'resurrection': 59,
'summon air elemental': 60,
'summon daemon': 61,
'summon earth elemental': 62,
'summon fire elemental': 63,
'summon water elemental': 64,
# Necromancy
'animate dead': 101,
'blood oath': 102,
'corpse skin': 103,
'curse weapon': 104,
'evil omen': 105,
'horrific beast': 106,
'lich form': 107,
'mind rot': 108,
'pain spike': 109,
'poison strike': 110,
'strangle': 111,
'summon familiar': 112,
'vampiric embrace': 113,
'vengeful spirit': 114,
'wither': 115,
'wraith form': 116,
'exorcism': 117,
# Paladin spells
'cleanse by fire': 201,
'close wounds': 202,
'consecrate weapon': 203,
'dispel evil': 204,
'divine fury': 205,
'enemy of one': 206,
'holy light': 207,
'noble sacrifice': 208,
'remove curse': 209,
'sacred journey': 210,
# Bushido spells
'honorable execution': 401,
'confidence': 402,
'evasion': 403,
'counter attack': 404,
'lightning strike': 405,
'momentum strike': 406,
# Ninjitsu spells
'focus attack': 501,
'death strike': 502,
'animal form': 503,
'ki attack': 504,
'surprise attack': 505,
'backstab': 506,
'shadow jump': 507,
'mirror image': 508,
# Spellweaving spells
'arcane circle': 601,
'gift of renewal': 602,
'immolating weapon': 603,
'attunement': 604,
'thunderstorm': 605,
'nature fury': 606,
'summon fey': 607,
'summon fiend': 608,
'reaper form': 609,
'wildfire': 610,
'essence of wind': 611,
'dryad allure': 612,
'ethereal voyage': 613,
'word of death': 614,
'gift of life': 615,
'arcane empowerment': 616,
# Mysticism spells
'nether bolt': 678,
'healing stone': 679,
'pure magic': 680,
'enchant': 681,
'sleep': 682,
'eagle strike': 683,
'animated weapon': 684,
'stone form': 685,
'spell trigger': 686,
'mass sleep': 687,
'cleansing winds': 688,
'bombard': 689,
'spell plague': 690,
'hail storm': 691,
'nether cyclone': 692,
'rising colossus': 693,
# Shared Passives
'enchanted summoning': 715,
'enchanted_summoning': 715,
'intuition': 718,
'warriors gifts': 733,
'warriors_gifts': 733,
"warrior's gifts": 733,
# Provocation
'inspire': 701,
'invigorate': 702,
# Peacemaking
'resilience': 703,
'perseverance': 704,
# Discordance
'tribulation': 705,
'despair': 706,
# Magery
'death_ray': 707,
'death ray': 707,
'ethereal_burst': 708,
'ethereal burst': 708,
'ethereal_blast': 708,
'ethereal blast': 708,
# Mysticism
'nether_blast': 709,
'nether blast': 709,
'mystic_weapon': 710,
'mystic weapon': 710,
# Necromancy
'command_undead': 711,
'command undead': 711,
'conduit': 712,
# Spellweaving
'mana_shield': 713,
'mana shield': 713,
'summon_reaper': 714,
'summon reaper': 714,
# Bushido
'anticipate_hit': 716,
'anticipate hit': 716,
'warcry': 717,
# Chivalry
'rejuvenate': 719,
'holy_fist': 720,
'holy fist': 720,
# Ninjitsu
'shadow': 721,
'white_tiger_form': 722,
'white tiger form': 722,
# Archery
'flaming_shot': 723,
'flaming shot': 723,
'playing_the_odds': 724,
'playing the odds': 724,
# Fencing
'thrust': 725,
'pierce': 726,
# Mace Fighting
'stagger': 727,
'toughness': 728,
# Swordsmanship
'onslaught': 729,
'focused_eye': 730,
'focused eye': 730,
# Throwing
'elemental_fury': 731,
'elemental fury': 731,
'called_shot': 732,
'called shot': 732,
# Parrying
'shield_bash': 734,
'shield bash': 734,
'bodyguard': 735,
'heighten_senses': 736,
'heighten senses': 736,
# Poisoning
'tolerance': 737,
'injected_strike': 738,
'injected strike': 738,
'potency': 739,
# Wrestling
'rampage': 740,
'fists_of_fury': 741,
'fists of fury': 741,
'knockout': 742,
# Animal Taming
'whispering': 743,
'boarding': 745,
'combat_training': 744,
'combat training': 744,
}
def _get_spell_id(name):
name = name.lower()
if name not in _SPELLS:
raise ValueError('Unknown spell name "' + name + '".')
return _SPELLS[name]
_cast_spell = _ScriptMethod(96) # CastSpell
_cast_spell.argtypes = [_int] # SpellID
def Cast(SpellName):
_cast_spell(_get_spell_id(SpellName))
return True
def CastToObj(SpellName, ObjID):
_wait_target_object(ObjID)
_cast_spell(_get_spell_id(SpellName))
def CastToObject(SpellName, ObjID):
_wait_target_object(ObjID)
_cast_spell(_get_spell_id(SpellName))
_is_active_spell_ability = _ScriptMethod(98) # IsActiveSpellAbility
_is_active_spell_ability.restype = _bool
_is_active_spell_ability.argtypes = [_int] # SpellName
def IsActiveSpellAbility(SpellName):
return _is_active_spell_ability(_get_spell_id(SpellName))
_clear_catch_bag = _ScriptMethod(100) # UnsetCatchBag
def UnsetCatchBag():
_clear_catch_bag()
_set_catch_bag = _ScriptMethod(99) # SetCatchBag
_set_catch_bag.argtypes = [_uint] # ObjectID
def SetCatchBag(ObjectID):
if ObjectID == 0:
_clear_catch_bag()
return 0
elif not _is_object_exists(ObjectID):
error = 'SetCatchBag Error: Object {} not found.'.format(hex(ObjectID))
AddToSystemJournal(error)
return 1
else:
_set_catch_bag(ObjectID)
return 2
_use_object = _ScriptMethod(101) # UseObject
_use_object.argtypes = [_uint] # ObjectID
def UseObject(ObjectID):
_use_object(ObjectID)
_use_type = _ScriptMethod(102) # UseType
_use_type.restype = _uint
_use_type.argtypes = [_ushort, # ObjType
_ushort] # Color
def UseType(ObjType, Color):
return _use_type(ObjType, Color)
def UseType2(ObjType):
return _use_type(ObjType, 0xFFFF)
_use_from_ground = _ScriptMethod(103) # UseFromGround
_use_from_ground.restype = _uint
_use_from_ground.argtypes = [_ushort, # ObjType
_ushort] # Color
def UseFromGround(ObjType, Color):
return _use_from_ground(ObjType, Color)
_click_on_object = _ScriptMethod(104) # ClickOnObject
_click_on_object.argtypes = [_uint] # ObjectID
def ClickOnObject(ObjectID):
if not _is_object_exists(ObjectID):
err = 'ClickOnObject error: Object {} not found.'.format(hex(ObjectID))
AddToSystemJournal(err)
else:
_click_on_object(ObjectID)
_get_found_index = _ScriptMethod(105) # GetFoundedParamID
_get_found_index.restype = _int
def FoundedParamID():
return _get_found_index()
def FoundParamID():
return _get_found_index()
_get_last_line_serial = _ScriptMethod(106) # GetLineID
_get_last_line_serial.restype = _uint
def LineID():
return _get_last_line_serial()
_get_last_line_graphic = _ScriptMethod(107) # GetLineType
_get_last_line_graphic.restype = _ushort
def LineType():
return _get_last_line_graphic()
_get_last_line_name = _ScriptMethod(114) # GetLineName
_get_last_line_name.restype = _str
def LineName():
return _get_last_line_name()
_get_last_line_time = _ScriptMethod(108) # GetLineTime
_get_last_line_time.restype = _double
def LineTime():
return _ddt2pdt(_get_last_line_time())
_get_last_line_message_type = _ScriptMethod(109) # GetLineMsgType
_get_last_line_message_type.restype = _ubyte
def LineMsgType():
return _get_last_line_message_type()
_get_last_line_font_color = _ScriptMethod(110) # GetLineTextColor
_get_last_line_font_color.restype = _ushort
def LineTextColor():
return _get_last_line_font_color()
_get_last_line_font = _ScriptMethod(111) # GetLineTextFont
_get_last_line_font.restype = _ushort
def LineTextFont():
return _get_last_line_font()
_get_last_line_index = _ScriptMethod(112) # GetLineIndex
_get_last_line_index.restype = _int
def LineIndex():
return _get_last_line_index()
_get_last_line_count = _ScriptMethod(113) # GetLineCount
_get_last_line_count.restype = _int
def LineCount():
return _get_last_line_count()
_journal_ignore = _ScriptMethod(115) # AddJournalIgnore
_journal_ignore.argtypes = [_str] # Str
def AddJournalIgnore(Str):
_journal_ignore(Str)
_clear_journal_ignore = _ScriptMethod(116) # ClearJournalIgnore
def ClearJournalIgnore():
_clear_journal_ignore()
_chat_ignore = _ScriptMethod(117) # AddChatUserIgnore
_chat_ignore.argtypes = [_str] # User
def AddChatUserIgnore(User):
_chat_ignore(User)
_journal_add = _ScriptMethod(304) # AddToJournal
_journal_add.argtypes = [_str] # Msg
def AddToJournal(Msg):
_journal_add(Msg)
_clear_chat_ignore = _ScriptMethod(118) # ClearChatUserIgnore
def ClearChatUserIgnore():
_clear_chat_ignore()
_clear_journal = _ScriptMethod(119) # ClearJournal
def ClearJournal():
_clear_journal()
_clear_system_journal = _ScriptMethod(346) # ClearSystemJournal
def ClearSystemJournal():
_clear_system_journal()
_last_journal_message = _ScriptMethod(120) # LastJournalMessage
_last_journal_message.restype = _str
def LastJournalMessage():
return _last_journal_message()
_get_journal_line_index = _ScriptMethod(121) # InJournal
_get_journal_line_index.restype = _int
_get_journal_line_index.argtypes = [_str] # Str
def InJournal(Str):
return _get_journal_line_index(Str)
_get_journal_line_index_time = _ScriptMethod(122) # InJournalBetweenTimes
_get_journal_line_index_time.restype = _int
_get_journal_line_index_time.argtypes = [_str, # Str
_double, # TimeBegin
_double] # TimeEnd
def InJournalBetweenTimes(Str, TimeBegin, TimeEnd):
return _get_journal_line_index_time(Str, _pdt2ddt(TimeBegin),
_pdt2ddt(TimeEnd))
_get_journal_line = _ScriptMethod(123) # Journal
_get_journal_line.restype = _str
_get_journal_line.argtypes = [_uint] # StringIndex
def Journal(StringIndex):
return _get_journal_line(StringIndex)
_set_journal_line = _ScriptMethod(124) # SetJournalLine
_set_journal_line.argtypes = [_uint, # StringIndex
_str] # Text
def SetJournalLine(StringIndex, Text):
_set_journal_line(StringIndex, Text)
_low_journal_index = _ScriptMethod(125) # LowJournal
_low_journal_index.restype = _int
def LowJournal():
return _low_journal_index()
_high_journal_index = _ScriptMethod(126) # HighJournal
_high_journal_index.restype = _int
def HighJournal():
return _high_journal_index()
def WaitJournalLine(StartTime, Str, MaxWaitTimeMS=0):
time = {'milliseconds': MaxWaitTimeMS} if MaxWaitTimeMS else {'weeks': 999}
stop = StartTime + _datetime.timedelta(**time)
while _datetime.datetime.now() <= stop:
if InJournalBetweenTimes(Str, StartTime, stop) >= 0:
return True
Wait(10)
return False
def WaitJournalLineSystem(StartTime, Str, MaxWaitTimeMS=0):
time = {'milliseconds': MaxWaitTimeMS} if MaxWaitTimeMS else {'weeks': 999}
stop = StartTime + _datetime.timedelta(**time)
while _datetime.datetime.now() <= stop:
if InJournalBetweenTimes(Str, StartTime, stop) >= 0:
if LineName() == 'System':
return True
Wait(10)
return False
_set_search_distance = _ScriptMethod(127) # SetFindDistance
_set_search_distance.argtypes = [_uint] # Value
def SetFindDistance(Value):
_set_search_distance(Value)
_get_search_distance = _ScriptMethod(128) # GetFindDistance
_get_search_distance.restype = _uint
def GetFindDistance():
return _get_search_distance()
_set_search_vertical = _ScriptMethod(129) # SetFindVertical
_set_search_vertical.argtypes = [_uint] # Value
def SetFindVertical(Value):
_set_search_vertical(Value)
_get_search_vertical = _ScriptMethod(130) # GetFindVertical
_get_search_vertical.restype = _uint
def GetFindVertical():
return _get_search_vertical()
_set_search_at_null = _ScriptMethod(336) # SetFindInNulPoint
_set_search_at_null.argtypes = [_bool] # Value
def SetFindInNulPoint(Value):
_set_search_at_null(Value)
_get_search_at_null = _ScriptMethod(337) # GetFindInNulPoint
_get_search_at_null.restype = _bool
def GetFindInNulPoint():
return _get_search_at_null()
_find_graphic = _ScriptMethod(131) # FindTypeEx
_find_graphic.restype = _uint
_find_graphic.argtypes = [_ushort, # ObjType
_ushort, # Color
_uint, # Container
_bool] # InSub
def FindTypeEx(ObjType, Color, Container, InSub=True):
return _find_graphic(ObjType, Color, Container, InSub)
def FindType(ObjType, Container):
return _find_graphic(ObjType, 0xFFFF, Container, False)
_find_graphics_array = _ScriptMethod(340) # FindTypesArrayEx
_find_graphics_array.restype = _uint
_find_graphics_array.argtypes = [_uint, # Len
_buffer, # ArrayBytes
_uint, # Len2
_buffer, # ArrayBytes2
_uint, # Len3
_buffer, # ArrayBytes3
_bool] # InSub
def FindTypesArrayEx(ObjTypes, Colors, Containers, InSub):
args = []
for array, fmt in ((ObjTypes, 'H'),
(Colors, 'H'),
(Containers, 'I')):
args += [len(array), _struct.pack(str(len(array)) + fmt, *array)]
args.append(InSub)
return _find_graphics_array(*args)
_find_notoriety = _ScriptMethod(132) # FindNotoriety
_find_notoriety.restype = _uint
_find_notoriety.argtypes = [_ushort, # ObjType
_ubyte] # Notoriety
def FindNotoriety(ObjType, Notoriety):
return _find_notoriety(ObjType, Notoriety)
_find_at_point = _ScriptMethod(133) # FindAtCoord
_find_at_point.restype = _uint
_find_at_point.argtypes = [_ushort, # X
_ushort] # Y
def FindAtCoord(X, Y):
return _find_at_point(X, Y)
_search_ignore = _ScriptMethod(134) # Ignore
_search_ignore.argtypes = [_uint] # ObjID
def Ignore(ObjID):
_search_ignore(ObjID)
_unset_search_ignore = _ScriptMethod(135) # IgnoreOff
_unset_search_ignore.argtypes = [_uint] # ObjID
def IgnoreOff(ObjID):
_unset_search_ignore(ObjID)
_reset_search_ignore = _ScriptMethod(136) # IgnoreReset
def IgnoreReset():
_reset_search_ignore()
_get_ignore_list = _ScriptMethod(137) # GetIgnoreList
_get_ignore_list.restype = _buffer # TArray
def GetIgnoreList():
result = []
data = _get_ignore_list()
if data:
fmt = str(len(data) // 4) + 'I'
result.extend(_struct.unpack(fmt, data))
return result
_get_found_objects_list = _ScriptMethod(138) # GetFindedList
_get_found_objects_list.restype = _buffer # TArray
def GetFoundList():
result = []
data = _get_found_objects_list()
if data:
fmt = str(len(data) // 4) + 'I'
result.extend(_struct.unpack(fmt, data))
return result
def GetFindedList(): # HATE THIS!!! but there is nothing to do(
return GetFoundList()
_get_found_object = _ScriptMethod(139) # GetFindItem
_get_found_object.restype = _uint
def FindItem():
return _get_found_object()
_count_found_objects = _ScriptMethod(140) # GetFindCount
_count_found_objects.restype = _int
def FindCount():
return _count_found_objects()
_get_found_quantity = _ScriptMethod(141) # GetFindCount
_get_found_quantity.restype = _int
def FindQuantity():
return _get_found_quantity()
_count_found_quantities = _ScriptMethod(142) # FindFullQuantity
_count_found_quantities.restype = _int
def FindFullQuantity():
return _count_found_quantities()
_predicted_x = _ScriptMethod(143) # PredictedX
_predicted_x.restype = _ushort
def PredictedX():
return _predicted_x()
_predicted_y = _ScriptMethod(144) # PredictedY
_predicted_y.restype = _ushort
def PredictedY():
return _predicted_y()
_predicted_z = _ScriptMethod(145) # PredictedZ
_predicted_z.restype = _byte
def PredictedZ():
return _predicted_z()
_predicted_dir = _ScriptMethod(146) # PredictedDirection
_predicted_dir.restype = _ubyte
def PredictedDirection():
return _predicted_dir()
_get_x = _ScriptMethod(15) # GetX
_get_x.restype = _ushort
_get_x.argtypes = [_uint] # ObjID
def GetX(ObjID):
return _get_x(ObjID)
_get_y = _ScriptMethod(16) # GetY
_get_y.restype = _ushort
_get_y.argtypes = [_uint] # ObjID
def GetY(ObjID):
return _get_y(ObjID)
_get_z = _ScriptMethod(17) # GetZ
_get_z.restype = _byte
_get_z.argtypes = [_uint] # ObjID
def GetZ(ObjID):
return _get_z(ObjID)
_get_name = _ScriptMethod(147) # GetName
_get_name.restype = _str
_get_name.argtypes = [_uint] # ObjectID
def GetName(ObjectID):
return _get_name(ObjectID)
_get_alt_name = _ScriptMethod(148) # GetAltName
_get_alt_name.restype = _str
_get_alt_name.argtypes = [_uint] # ObjectID
def GetAltName(ObjectID):
return _get_alt_name(ObjectID)
_get_title = _ScriptMethod(149) # GetTitle
_get_title.restype = _str
_get_title.argtypes = [_uint] # ObjID
def GetTitle(ObjID):
return _get_title(ObjID)
_get_tooltip = _ScriptMethod(150) # GetTooltip
_get_tooltip.restype = _str
_get_tooltip.argtypes = [_uint] # ObjID
def GetTooltip(ObjID):
return _get_tooltip(ObjID)
def GetCliloc(ObjID):
return GetTooltip(ObjID)
_get_graphic = _ScriptMethod(151) # GetType
_get_graphic.restype = _ushort
_get_graphic.argtypes = [_uint] # ObjID
def GetType(ObjID):
return _get_graphic(ObjID)
_get_tooltip_obj = _ScriptMethod(152) # GetToolTipRec
_get_tooltip_obj.restype = _buffer # Array of TClilocRec
_get_tooltip_obj.argtypes = [_uint] # ObjID
def GetTooltipRec(ObjID):
result = []
data = _get_tooltip_obj(ObjID)
count = _struct.unpack_from('i', data)[0]
offset = 4
for i in range(count):
cliloc, length = _struct.unpack_from('2i', data, offset)
offset += 8
strings = []
for j in range(length):
string = _str.from_buffer(data, offset)
offset += _struct.calcsize(string.fmt)
strings.append(string.value)
result.append({'Cliloc_ID': cliloc, 'Params': strings})
return result
_get_object_tooltip = _ScriptMethod(153) # GetClilocByID
_get_object_tooltip.restype = _str
_get_object_tooltip.argtypes = [_uint] # ClilocID
def GetClilocByID(ClilocID):
return _get_object_tooltip(ClilocID)
_get_quantity = _ScriptMethod(154) # GetQuantity
_get_quantity.restype = _int
_get_quantity.argtypes = [_uint] # ObjID
def GetQuantity(ObjID):
return _get_quantity(ObjID)
_is_object_exists = _ScriptMethod(155) # IsObjectExists
_is_object_exists.restype = _bool
_is_object_exists.argtypes = [_uint] # ObjID
def IsObjectExists(ObjID):
return _is_object_exists(ObjID)
_is_npc = _ScriptMethod(172) # IsNPC
_is_npc.restype = _bool
_is_npc.argtypes = [_uint] # ObjID
def IsNPC(ObjID):
return _is_npc(ObjID)
_get_price = _ScriptMethod(156) # GetPrice
_get_price.restype = _uint
_get_price.argtypes = [_uint] # ObjID
def GetPrice(ObjID):
return _get_price(ObjID)
_get_direction = _ScriptMethod(157) # GetDirection
_get_direction.restype = _ubyte
_get_direction.argtypes = [_uint] # ObjID
def GetDirection(ObjID):
return _get_direction(ObjID)
_get_distance = _ScriptMethod(158) # GetDistance
_get_distance.restype = _int
_get_distance.argtypes = [_uint] # ObjID
def GetDistance(ObjID):
return _get_distance(ObjID)
_get_color = _ScriptMethod(159) # GetColor
_get_color.restype = _ushort
_get_color.argtypes = [_uint] # ObjID
def GetColor(ObjID):
return _get_color(ObjID)
_get_strength = _ScriptMethod(160) # GetStr
_get_strength.restype = _int
_get_strength.argtypes = [_uint] # ObjID
def GetStr(ObjID):
return _get_strength(ObjID)
_get_intelligence = _ScriptMethod(161) # GetInt
_get_intelligence.restype = _int
_get_intelligence.argtypes = [_uint] # ObjID
def GetInt(ObjID):
return _get_intelligence(ObjID)
_get_dexterity = _ScriptMethod(162) # GetDex
_get_dexterity.restype = _int
_get_dexterity.argtypes = [_uint] # ObjID
def GetDex(ObjID):
return _get_dexterity(ObjID)
_get_hp = _ScriptMethod(163) # GetHP
_get_hp.restype = _int
_get_hp.argtypes = [_uint] # ObjID
def GetHP(ObjID):
result = _get_hp(ObjID)
if not result and _is_object_exists(ObjID) and _is_npc(ObjID):
_request_stats(ObjID)
Wait(100)
result = _get_hp(ObjID)
return result
_get_max_hp = _ScriptMethod(164) # GetMaxHP
_get_max_hp.restype = _int
_get_max_hp.argtypes = [_uint] # ObjID
def GetMaxHP(ObjID):
return _get_max_hp(ObjID)
_get_mana = _ScriptMethod(165) # GetMana
_get_mana.restype = _int
_get_mana.argtypes = [_uint] # ObjID
def GetMana(ObjID):
result = _get_mana(ObjID)
if not result and _is_object_exists(ObjID) and _is_npc(ObjID):
_request_stats(ObjID)
Wait(100)
result = _get_mana(ObjID)
return result
_get_max_mana = _ScriptMethod(166) # GetMaxMana
_get_max_mana.restype = _int
_get_max_mana.argtypes = [_uint] # ObjID
def GetMaxMana(ObjID):
return _get_max_mana(ObjID)
_get_stamina = _ScriptMethod(167) # GetStam
_get_stamina.restype = _int
_get_stamina.argtypes = [_uint] # ObjID
def GetStam(ObjID):
result = _get_stamina(ObjID)
if not result and _is_object_exists(ObjID) and _is_npc(ObjID):
_request_stats(ObjID)
Wait(100)
result = _get_stamina(ObjID)
return result
_get_max_stamina = _ScriptMethod(168) # GetMaxStam
_get_max_stamina.restype = _int
_get_max_stamina.argtypes = [_uint] # ObjID
def GetMaxStam(ObjID):
return _get_max_stamina(ObjID)
_get_notoriety = _ScriptMethod(169) # GetNotoriety
_get_notoriety.restype = _ubyte
_get_notoriety.argtypes = [_uint] # ObjId
def GetNotoriety(ObjID):
return _get_notoriety(ObjID)
_get_container = _ScriptMethod(170) # GetParent
_get_container.restype = _uint
_get_container.argtypes = [_uint] # ObjID
def GetParent(ObjID):
return _get_container(ObjID)
def IsWarMode(ObjID):
return _get_warmode(ObjID)
_get_dead_status = _ScriptMethod(173) # IsDead
_get_dead_status.restype = _bool
_get_dead_status.argtypes = [_uint] # ObjID
def IsDead(ObjID):
return _get_dead_status(ObjID)
_get_running_status = _ScriptMethod(174) # IsRunning
_get_running_status.restype = _bool
_get_running_status.argtypes = [_uint] # ObjID
def IsRunning(ObjID):
return _get_running_status(ObjID)
_is_container = _ScriptMethod(175) # IsContainer
_is_container.restype = _bool
_is_container.argtypes = [_uint] # ObjID
def IsContainer(ObjID):
return _is_container(ObjID)
_get_hidden_status = _ScriptMethod(176) # IsHidden
_get_hidden_status.restype = _bool
_get_hidden_status.argtypes = [_uint] # ObjID
def IsHidden(ObjID):
return _get_hidden_status(ObjID)
_is_movable = _ScriptMethod(177) # IsMovable
_is_movable.restype = _bool
_is_movable.argtypes = [_uint] # ObjID
def IsMovable(ObjID):
return _is_movable(ObjID)
_get_yellow_hits_status = _ScriptMethod(178) # IsYellowHits
_get_yellow_hits_status.restype = _bool
_get_yellow_hits_status.argtypes = [_uint] # ObjID
def IsYellowHits(ObjID):
return _get_yellow_hits_status(ObjID)
_get_poisoned_status = _ScriptMethod(179) # IsPoisoned
_get_poisoned_status.restype = _bool
_get_poisoned_status.argtypes = [_uint] #
def IsPoisoned(ObjID):
return _get_poisoned_status(ObjID)
_get_paralyzed_status = _ScriptMethod(180) # IsParalyzed
_get_paralyzed_status.restype = _bool
_get_paralyzed_status.argtypes = [_uint] # ObjID
def IsParalyzed(ObjID):
return _get_paralyzed_status(ObjID)
_is_female = _ScriptMethod(181) # IsFemale
_is_female.restype = _bool
_is_female.argtypes = [_uint] # ObjID
def IsFemale(ObjID):
return _is_female(ObjID)
_open_door = _ScriptMethod(182) # OpenDoor
def OpenDoor():
_open_door()
_bow = _ScriptMethod(183) # Bow
def Bow():
_bow()
_salute = _ScriptMethod(184) # Salute
def Salute():
_salute()
_get_picked_item = _ScriptMethod(185) # GetPickupedItem
_get_picked_item.restype = _uint
def GetPickupedItem():
return _get_picked_item()
_set_picked_item = _ScriptMethod(186) # SetPickupedItem
_set_picked_item.argtypes = [_uint] # ID
def SetPickupedItem(ID):
_set_picked_item(ID)
_get_drop_check_coord = _ScriptMethod(187) # GetDropCheckCoord
_get_drop_check_coord.restype = _bool
def GetDropCheckCoord():
return _get_drop_check_coord()
_set_drop_check_coord = _ScriptMethod(188) # SetDropCheckCoord
_set_drop_check_coord.argtypes = [_bool] # Value
def SetDropCheckCoord(Value):
_set_drop_check_coord(Value)
_get_drop_delay = _ScriptMethod(189) # GetDropDelay
_get_drop_delay.restype = _uint
def GetDropDelay():
return _get_drop_delay()
_set_drop_delay = _ScriptMethod(190) # SetDropDelay
_set_drop_delay.argtypes = [_uint] # Value
def SetDropDelay(Value):
_set_drop_delay(Value)
_drag_item = _ScriptMethod(191) # DragItem
_drag_item.restype = _bool
_drag_item.argtypes = [_uint, # ItemID
_int] # Count
def DragItem(ItemID, Count):
return _drag_item(ItemID, Count)
_drop_item = _ScriptMethod(192) # DropItem
_drop_item.restype = _bool
_drop_item.argtypes = [_uint, # MoveIntoID
_int, # X
_int, # Y
_int] # Z
def DropItem(MoveIntoID, X, Y, Z):
return _drop_item(MoveIntoID, X, Y, Z)
def MoveItem(ItemID, Count, MoveIntoID, X, Y, Z):
if not DragItem(ItemID, Count):
return False
Wait(100)
return DropItem(MoveIntoID, X, Y, Z)
def Grab(ItemID, Count):
return MoveItem(ItemID, Count, Backpack(), 0, 0, 0)
def Drop(ItemID, Count, X, Y, Z):
return MoveItem(ItemID, Count, Ground(), X, Y, Z)
def DropHere(ItemID):
return MoveItem(ItemID, 0, Ground(), 0, 0, 0)
def MoveItems(Container, ItemsType, ItemsColor, MoveIntoID, X, Y, Z,
DelayMS, MaxCount=0):
FindTypeEx(ItemsType, ItemsColor, Container, False)
items = GetFoundList()
if not items: # nothing found
return False
drop_delay = GetDropDelay()
if not 50 < drop_delay < 10000:
drop_delay = 50 if drop_delay < 50 else 10000
if drop_delay > DelayMS:
DelayMS = 0
SetDropDelay(drop_delay)
if not 0 < MaxCount < len(items):
MaxCount = len(items)
for i in range(MaxCount):
MoveItem(items[i], 0, MoveIntoID, X, Y, Z)
Wait(DelayMS)
return True
def EmptyContainer(Container, DestContainer, delay_ms):
return MoveItems(Container, -1, -1, DestContainer,
0xFFFF, 0xFFFF, 0, delay_ms)
_request_context_menu = _ScriptMethod(193) # RequestContextMenu
_request_context_menu.argtypes = [_uint] # ID
def RequestContextMenu(ID):
_request_context_menu(ID)
_wait_context_menu = _ScriptMethod(194) # SetContextMenuHook
_wait_context_menu.argtypes = [_uint, # MenuID
_ubyte] # EntryNumber
def SetContextMenuHook(MenuID, EntryNumber):
_wait_context_menu(MenuID, EntryNumber)
_get_context_menu = _ScriptMethod(195) # GetContextMenu
_get_context_menu.restype = _str
def GetContextMenu():
return _get_context_menu()
_get_context_menu_record = _ScriptMethod(345) # GetContextMenuRec
_get_context_menu_record.restype = _buffer # TODO: What is this do?
def GetContextMenuRec():
"""
fmt = 'HH'
data = _get_context_menu_record()
keys = 'Tag', 'Flags'
serial, count, tmp = _struct.unpack('>IBI', data[:9])
l = []
for i in range(count):
l.append(_struct.unpack('HHIHH', data[9+i*12:9+i*12+12]))
"""
return None
_clear_context_menu = _ScriptMethod(196) # ClearContextMenu
def ClearContextMenu():
_clear_context_menu()
_is_trade = _ScriptMethod(197) # CheckTradeState
_is_trade.restype = _bool
def IsTrade():
return _is_trade()
_get_trade_container_serial = _ScriptMethod(198) # GetTradeContainer
_get_trade_container_serial.restype = _uint
_get_trade_container_serial.argtypes = [_ubyte, # TradeNum
_ubyte] # Num
def GetTradeContainer(TradeNum, Num):
return _get_trade_container_serial(TradeNum, Num)
_get_trade_opponent_serial = _ScriptMethod(199) # GetTradeOpponent
_get_trade_opponent_serial.restype = _uint
_get_trade_opponent_serial.argtypes = [_ubyte] # TradeNum
def GetTradeOpponent(TradeNum):
return _get_trade_opponent_serial(TradeNum)
_get_trades_count = _ScriptMethod(200) # GetTradeCount
_get_trades_count.restype = _ubyte
def TradeCount():
return _get_trades_count()
_get_trade_opponent_name = _ScriptMethod(201) # GetTradeOpponentName
_get_trade_opponent_name.restype = _str
_get_trade_opponent_name.argtypes = [_ubyte] # TradeNum
def GetTradeOpponentName(TradeNum):
return _get_trade_opponent_name(TradeNum)
_get_trade_state = _ScriptMethod(202) # TradeCheck
_get_trade_state.restype = _bool
_get_trade_state.argtypes = [_ubyte, # TradeNum
_ubyte] # Num
def TradeCheck(TradeNum, Num):
return _get_trade_state(TradeNum, Num)
_confirm_trade = _ScriptMethod(203) # ConfirmTrade
_confirm_trade.argtypes = [_ubyte] # TradeNum
def ConfirmTrade(TradeNum):
_confirm_trade(TradeNum)
_cancel_trade = _ScriptMethod(204) # CancelTrade
_cancel_trade.restype = _bool
_cancel_trade.argtypes = [_ubyte] # TradeNum
def CancelTrade(TradeNum):
return _cancel_trade(TradeNum)
_wait_menu = _ScriptMethod(205) # WaitMenu
_wait_menu.argtypes = [_str, # MenuCaption
_str] # ElementCaption
def WaitMenu(MenuCaption, ElementCaption):
_wait_menu(MenuCaption, ElementCaption)
_auto_menu = _ScriptMethod(206) # AutoMenu
_auto_menu.argtypes = [_str, # MenuCaption
_str] # ElementCaption
def AutoMenu(MenuCaption, ElementCaption):
_auto_menu(MenuCaption, ElementCaption)
_is_menu_hook = _ScriptMethod(207) # MenuHookPresent
_is_menu_hook.restype = _bool
def MenuHookPresent():
return _is_menu_hook()
_is_menu = _ScriptMethod(208) # MenuPresent
_is_menu.restype = _bool
def MenuPresent():
return _is_menu()
_cancel_menu = _ScriptMethod(209) # CancelMenu
def CancelMenu():
_cancel_menu()
def CancelAllMenuHooks():
_cancel_menu()
_close_menu = _ScriptMethod(210) # CloseMenu
def CloseMenu():
_close_menu()
_get_menu = _ScriptMethod(338) # GetMenuItems
_get_menu.restype = _str
_get_menu.argtypes = [_str] # MenuCaption
def GetMenuItems(MenuCaption): # TODO: split items, return list
return _get_menu(MenuCaption)
_get_last_menu = _ScriptMethod(339) # GetLastMenuItems
_get_last_menu.restype = _str
def GetLastMenuItems(): # TODO: split items, return list
return _get_last_menu()
_wait_gump = _ScriptMethod(211) # WaitGumpInt
_wait_gump.argtypes = [_int] # Value
def WaitGump(Value):
_wait_gump(int(Value))
_wait_gump_text_entry = _ScriptMethod(212) # WaitGumpTextEntry
_wait_gump_text_entry.argtypes = [_str] # Value
def WaitTextEntry(Value):
_wait_gump_text_entry(Value)
_auto_text_entry = _ScriptMethod(213) # GumpAutoTextEntry
_auto_text_entry.argtypes = [_int, # TextEntryID
_str] # Value
def GumpAutoTextEntry(TextEntryID, Value):
_auto_text_entry(TextEntryID, Value)
_auto_radiobutton = _ScriptMethod(214) # GumpAutoRadiobutton
_auto_radiobutton.argtypes = [_int, # RadiobuttonID
_int] # Value
def GumpAutoRadiobutton(RadiobuttonID, Value):
_auto_radiobutton(RadiobuttonID, Value)
_auto_checkbox = _ScriptMethod(215) # GumpAutoCheckBox
_auto_checkbox.argtypes = [_int, # CBID
_int] # Value
def GumpAutoCheckBox(CBID, Value):
_auto_checkbox(CBID, Value)
_send_gump_button = _ScriptMethod(216) # NumGumpButton
_send_gump_button.restype = _bool
_send_gump_button.argtypes = [_ushort, # GumpIndex
_int] # Value
def NumGumpButton(GumpIndex, Value):
return _send_gump_button(GumpIndex, Value)
_send_gump_text_entry = _ScriptMethod(217) # NumGumpTextEntry
_send_gump_text_entry.restype = _bool
_send_gump_text_entry.argtypes = [_ushort, # GumpIndex
_int, # TextEntryID
_str] # Value
def NumGumpTextEntry(GumpIndex, TextEntryID, Value):
return _send_gump_text_entry(GumpIndex, TextEntryID, Value)
_send_gump_radiobutton = _ScriptMethod(218) # NumGumpRadiobutton
_send_gump_radiobutton.restype = _bool
_send_gump_radiobutton.argtypes = [_ushort, # GumpIndex
_int, # RadiobuttonID
_int] # Value
def NumGumpRadiobutton(GumpIndex, RadiobuttonID, Value):
return _send_gump_radiobutton(GumpIndex, RadiobuttonID, Value)
_send_gump_checkbox = _ScriptMethod(219) # NumGumpCheckBox
_send_gump_checkbox.restype = _bool
_send_gump_checkbox.argtypes = [_ushort, # GumpIndex
_int, # CBID
_int] # Value
def NumGumpCheckBox(GumpIndex, CBID, Value):
return _send_gump_checkbox(GumpIndex, CBID, Value)
_get_gumps_count = _ScriptMethod(220) # GetGumpsCount
_get_gumps_count.restype = _int
def GetGumpsCount():
return _get_gumps_count()
_close_gump = _ScriptMethod(221) # CloseSimpleGump
_close_gump.argtypes = [_ushort] # GumpIndex
def CloseSimpleGump(GumpIndex):
_close_gump(GumpIndex)
def IsGump():
return GetGumpsCount() > 0
_get_gump_serial = _ScriptMethod(222) # GetGumpSerial
_get_gump_serial.restype = _uint
_get_gump_serial.argtypes = [_ushort] # GumpIndex
def GetGumpSerial(GumpIndex):
return _get_gump_serial(GumpIndex)
_get_gump_type = _ScriptMethod(223) # GetGumpID
_get_gump_type.restype = _uint
_get_gump_type.argtypes = [_ushort] # GumpIndex
def GetGumpID(GumpIndex):
return _get_gump_type(GumpIndex)
_get_gump_no_close = _ScriptMethod(224) # GetGumpNoClose
_get_gump_no_close.restype = _bool
_get_gump_no_close.argtypes = [_ushort] # GumpIndex
def IsGumpCanBeClosed(GumpIndex):
return _get_gump_no_close(GumpIndex)
_get_gump_text = _ScriptMethod(225) # GetGumpTextLines
_get_gump_text.restype = _str
_get_gump_text.argtypes = [_ushort] # GumpIndex
def GetGumpTextLines(GumpIndex):
result = _get_gump_text(GumpIndex)
return result.split(_linesep)[:-1] # cause '' was in the end of list
_get_gump_full_lines = _ScriptMethod(226) # GetGumpFullLines
_get_gump_full_lines.restype = _str
_get_gump_full_lines.argtypes = [_ushort] # GumpIndex
def GetGumpFullLines(GumpIndex):
result = _get_gump_full_lines(GumpIndex)
return result.split(_linesep)[:-1] # cause '' was in the end of list
_get_gump_short_lines = _ScriptMethod(227) # GetGumpShortLines
_get_gump_short_lines.restype = _str
_get_gump_short_lines.argtypes = [_ushort] # GumpIndex
def GetGumpShortLines(GumpIndex):
result = _get_gump_short_lines(GumpIndex)
return result.split(_linesep)[:-1] # cause '' was in the end of list
_get_gump_buttons = _ScriptMethod(228) # GetGumpButtonsDescription
_get_gump_buttons.restype = _str
_get_gump_buttons.argtypes = [_ushort] # GumpIndex
def GetGumpButtonsDescription(GumpIndex):
result = _get_gump_buttons(GumpIndex)
return result.split(_linesep)[:-1] # cause '' was in the end of list
_get_gump_info = _ScriptMethod(229) # GetGumpInfo
_get_gump_info.restype = _buffer # TGumpInfo
_get_gump_info.argtypes = [_ushort] # GumpIndex
class _Group:
args = [_int] * 3
container = 'groups'
keys = 'GroupNumber', 'Page', 'ElemNum'
class _EndGroup(_Group):
container = 'EndGroups'
class _GumpButton:
args = [_int] * 9
container = 'GumpButtons'
keys = ('X', 'Y', 'ReleasedID', 'PressedID', 'Quit', 'PageID',
'ReturnValue', 'Page', 'ElemNum')
class _ButtonTileArt:
args = [_int] * 12
container = 'ButtonTileArts'
keys = ('X', 'Y', 'ReleasedID', 'PressedID', 'Quit', 'PageID',
'ReturnValue', 'ArtID', 'Hue', 'ArtX', 'ArtY', 'ElemNum')
class _CheckBox:
args = [_int] * 8
container = 'CheckBoxes'
keys = ('X', 'Y', 'ReleasedID', 'PressedID', 'Status', 'ReturnValue',
'Page', 'ElemNum')
class _ChekerTrans:
args = [_int] * 6
container = 'ChekerTrans'
keys = 'X', 'Y', 'Width', 'Height', 'Page', 'ElemNum'
class _CroppedText:
args = [_int] * 8
container = 'CroppedText'
keys = 'X', 'Y', 'Width', 'Height', 'Color', 'TextID', 'Page', 'ElemNum'
class _GumpPic:
args = [_int] * 6
container = 'GumpPics'
keys = 'X', 'Y', 'ID', 'Hue', 'Page', 'ElemNum'
class _GumpPicTiled:
fmt = '=7i'
args = [_int] * 7
container = 'GumpPicTiled'
keys = 'X', 'Y', 'Width', 'Height', 'GumpID', 'Page', 'ElemNum'
class _Radiobutton:
args = [_int] * 8
container = 'RadioButtons'
keys = ('X', 'Y', 'ReleasedID', 'PressedID', 'Status', 'ReturnValue',
'Page', 'ElemNum')
class _ResizePic:
args = [_int] * 7
container = 'ResizePics'
keys = 'X', 'Y', 'GumpID', 'Width', 'Height', 'Page', 'ElemNum'
class _GumpText:
args = [_int] * 6
container = 'GumpText'
keys = 'X', 'Y', 'Color', 'TextID', 'Page', 'ElemNum'
class _TextEntry:
args = [_int] * 7 + [_str, _int, _int]
container = 'TextEntries'
keys = ('X', 'Y', 'Width', 'Height', 'Color', 'ReturnValue',
'DefaultTextID', 'RealValue', 'Page', 'ElemNum')
class _Text:
args = [_str]
container = 'Text'
keys = None
class _TextEntryLimited:
args = [_int] * 10
container = 'TextEntriesLimited'
keys = ('X', 'Y', 'Width', 'Height', 'Color', 'ReturnValue',
'DefaultTextID', 'Limit', 'Page', 'ElemNum')
class _TilePic:
args = [_int] * 5
container = 'TilePics'
keys = 'X', 'Y', 'ID', 'Page', 'ElemNum'
class _TilePicHue:
args = [_int] * 6
container = 'TilePicHue'
keys = 'X', 'Y', 'ID', 'Color', 'Page', 'ElemNum'
class _Tooltip:
args = [_uint, _str, _int, _int]
container = 'Tooltips'
keys = 'ClilocID', 'Arguments', 'Page', 'ElemNum'
class _HtmlGump:
args = [_int] * 9
container = 'HtmlGump'
keys = ('<KEY> 'Width', 'Height', 'TextID', 'Background', 'Scrollbar',
'Page', 'ElemNum')
class _XmfHtmlGump:
args = [_int] * 4 + [_uint] + [_int] * 4
container = 'XmfHtmlGump'
keys = ('<KEY> 'Width', 'Height', 'ClilocID', 'Background', 'Scrollbar',
'Page', 'ElemNum')
class _XmfHTMLGumpColor:
args = [_int] * 4 + [_uint] + [_int] * 5
container = 'XmfHTMLGumpColor'
keys = ('X', 'Y', 'Width', 'Height', 'ClilocID', 'Background', 'Scrollbar',
'Hue', 'Page', 'ElemNum')
class _XmfHTMLTok:
args = [_int] * 7 + [_uint, _str, _int, _int]
container = 'XmfHTMLTok'
keys = ('<KEY> 'Width', 'Height', 'Background', 'Scrollbar', 'Color',
'ClilocID', 'Arguments', 'Page', 'ElemNum')
class _ItemProperty:
args = [_uint, _int]
container = 'ItemProperties'
keys = 'Prop', 'ElemNum'
class _Gump:
fmt = '=2I2hi4?'
args = [_uint, _uint, _short, _short, _int] + [_bool] * 4
keys = ('Serial', 'GumpID', 'X', 'Y', 'Pages', 'NoMove', 'NoResize',
'NoDispose', 'NoClose')
def GetGumpInfo(GumpIndex):
data = _get_gump_info(GumpIndex)
values = _struct.unpack_from(_Gump.fmt, data, 0)
result = dict(zip(_Gump.keys, values))
offset = _struct.calcsize(_Gump.fmt)
# parse elements
elements = (_Group, _EndGroup, _GumpButton, _ButtonTileArt, _CheckBox,
_ChekerTrans, _CroppedText, _GumpPic, _GumpPicTiled,
_Radiobutton, _ResizePic, _GumpText, _TextEntry, _Text,
_TextEntryLimited, _TilePic, _TilePicHue, _Tooltip,
_HtmlGump, _XmfHtmlGump, _XmfHTMLGumpColor, _XmfHTMLTok,
_ItemProperty)
for cls in elements:
result[cls.container] = []
count = _ushort.from_buffer(data, offset)
offset += count.size
for i in range(count):
values = []
for arg in cls.args:
element = arg.from_buffer(data, offset)
offset += element.size
values.append(element.value)
if cls is _Text:
result[cls.container].append(*[values]) # there is only one element
else:
element = dict(zip(cls.keys, values))
if 'ClilocID' in cls.keys and 'Arguments' in cls.keys: # need to represent clilocs
text = GetClilocByID(element['ClilocID'])
args = element.get('Arguments', '')
args = args.split('@')[1:] or []
for arg in args:
if '~' in text:
if arg.startswith('#'): # another cliloc
arg = GetClilocByID(int(arg.strip('#')))
s = text.index('~')
e = text.index('~', s + 1)
text = text.replace(text[s:e + 1], arg, 1) or arg # TODO: wtf?
element['Arguments'] = text
result[cls.container].append(element)
return result
_ignore_gump_id = _ScriptMethod(230) # AddGumpIgnoreByID
_ignore_gump_id.argtypes = [_uint] # ID
def AddGumpIgnoreByID(ID):
_ignore_gump_id(ID)
_ignore_gump_serial = _ScriptMethod(231) # AddGumpIgnoreBySerial
_ignore_gump_serial.argtypes = [_uint] # Serial
def AddGumpIgnoreBySerial(Serial):
_ignore_gump_serial(Serial)
_gumps_ignore_reset = _ScriptMethod(232) # ClearGumpsIgnore
def ClearGumpsIgnore():
_gumps_ignore_reset()
def RhandLayer():
return 0x01
def LhandLayer():
return 0x02
def ShoesLayer():
return 0x03
def PantsLayer():
return 0x04
def ShirtLayer():
return 0x05
def HatLayer():
return 0x06
def GlovesLayer():
return 0x07
def RingLayer():
return 0x08
def TalismanLayer():
return 0x09
def NeckLayer():
return 0x0A
def HairLayer():
return 0x0B
def WaistLayer():
return 0x0C
def TorsoLayer():
return 0x0D
def BraceLayer():
return 0x0E
def BeardLayer():
return 0x10
def TorsoHLayer():
return 0x11
def EarLayer():
return 0x12
def ArmsLayer():
return 0x13
def CloakLayer():
return 0x14
def BpackLayer():
return 0x15
def RobeLayer():
return 0x16
def EggsLayer():
return 0x17
def LegsLayer():
return 0x18
def HorseLayer():
return 0x19
def RstkLayer():
return 0x1A
def NRstkLayer():
return 0x1B
def SellLayer():
return 0x1C
def BankLayer():
return 0x1D
_get_obj_at_layer = _ScriptMethod(233) # ObjAtLayerEx
_get_obj_at_layer.restype = _uint
_get_obj_at_layer.argtypes = [_ubyte, # LayerType
_uint] # PlayerID
def ObjAtLayerEx(LayerType, PlayerID):
return _get_obj_at_layer(LayerType, PlayerID)
def ObjAtLayer(LayerType):
return ObjAtLayerEx(LayerType, Self())
_get_layer = _ScriptMethod(234) # GetLayer
_get_layer.restype = _ubyte
_get_layer.argtypes = [_uint] # Obj
def GetLayer(Obj):
return _get_layer(Obj)
_wear_item = _ScriptMethod(235) # WearItem
_wear_item.argtypes = [_ubyte, # Layer
_uint] # Obj
def WearItem(Layer, Obj):
if GetPickupedItem() == 0 or Layer == 0 or Self() == 0:
return False
_wear_item(Layer, Obj)
SetPickupedItem(0)
return True
def Disarm():
backpack = Backpack()
tmp = []
for layer in LhandLayer(), RhandLayer():
item = ObjAtLayer(layer)
if item:
tmp.append(MoveItem(item, 1, backpack, 0, 0, 0))
return all(tmp)
def disarm():
return Disarm()
def Equip(Layer, Obj):
if Layer and DragItem(Obj, 1):
return WearItem(Layer, Obj)
return False
def equip(Layer, Obj):
return Equip(Layer, Obj)
def Equipt(Layer, ObjType):
item = FindType(ObjType, Backpack())
if item:
return Equip(Layer, item)
return False
def equipt(Layer, ObjType):
return Equipt(Layer, ObjType)
def UnEquip(Layer):
item = ObjAtLayer(Layer)
if item:
return MoveItem(item, 1, Backpack(), 0, 0, 0)
return False
_get_dress_delay = _ScriptMethod(236) # GetDressSpeed
_get_dress_delay.restype = _ushort
def GetDressSpeed():
return _get_dress_delay()
_set_dress_delay = _ScriptMethod(237) # SetDressSpeed
_set_dress_delay.argtypes = [_ushort] # Value
def SetDressSpeed(Value):
_set_dress_delay(Value)
_get_client_version_int = _ScriptMethod(355) # SCGetClientVersionInt
_get_client_version_int.restype = _int
def GetClientVersionInt():
return _get_client_version_int()
_wearable_layers = (RhandLayer(), LhandLayer(), ShoesLayer(), PantsLayer(),
ShirtLayer(), HatLayer(), GlovesLayer(), RingLayer(),
NeckLayer(), WaistLayer(), TorsoLayer(), BraceLayer(),
TorsoHLayer(), EarLayer(), ArmsLayer(), CloakLayer(),
RobeLayer(), EggsLayer(), LegsLayer())
_unequip_itemsset_macro = _ScriptMethod(356) # SCUnequipItemsSetMacro
def UnequipItemsSetMacro():
_unequip_itemsset_macro()
def Undress():
tmp = []
client_version_int = GetClientVersionInt()
if client_version_int < 7007400:
delay = GetDressSpeed()
char = Self()
backpack = Backpack()
for layer in _wearable_layers:
item = ObjAtLayerEx(layer, char)
if item:
tmp.append(MoveItem(item, 1, backpack, 0, 0, 0))
Wait(delay)
else:
UnequipItemsSetMacro()
tmp.append(True)
# no need to wait - all this done inside
return all(tmp)
_set_dress = _ScriptMethod(238) # SetDress
def SetDress():
_set_dress()
_equip_item_set_macro = _ScriptMethod(357) # SCEquipItemsSetMacro
def EquipItemsSetMacro():
_equip_item_set_macro()
_get_dress_set = _ScriptMethod(239) # GetDressSet
_get_dress_set.restype = _buffer # TLayersObjectsList
def EquipDressSet():
res = []
client_version_int = GetClientVersionInt()
if client_version_int < 7007400:
delay = GetDressSpeed()
data = _get_dress_set()
count = _struct.unpack('B', data[:1])[0]
data = data[1:]
offset = 0
for i in range(count):
layer, item = _struct.unpack_from('<BI', data, offset)
offset += 5
if item:
res.append(Equip(layer, item))
Wait(delay)
else:
EquipItemsSetMacro()
res.append(True)
# no need to wait - all this done inside
return all(res)
def DressSavedSet():
EquipDressSet()
def Count(ObjType):
FindType(ObjType, Backpack())
return FindFullQuantity()
def CountGround(ObjType):
FindType(ObjType, Ground())
return FindFullQuantity()
def CountEx(ObjType, Color, Container):
FindTypeEx(ObjType, Color, Container, False)
return FindFullQuantity()
def BP(): return 0X0F7A
def BM(): return 0x0F7B
def GA(): return 0x0F84
def GS(): return 0x0F85
def MR(): return 0x0F86
def NS(): return 0x0F88
def SA(): return 0x0F8C
def SS(): return 0x0F8D
def BPCount():
FindTypeEx(BP(), 0, Backpack(), True)
return FindFullQuantity()
def BMCount():
FindTypeEx(BM(), 0, Backpack(), True)
return FindFullQuantity()
def GACount():
FindTypeEx(GA(), 0, Backpack(), True)
return FindFullQuantity()
def GSCount():
FindTypeEx(GS(), 0, Backpack(), True)
return FindFullQuantity()
def MRCount():
FindTypeEx(MR(), 0, Backpack(), True)
return FindFullQuantity()
def NSCount():
FindTypeEx(NS(), 0, Backpack(), True)
return FindFullQuantity()
def SACount():
FindTypeEx(SA(), 0, Backpack(), True)
return FindFullQuantity()
def SSCount():
FindTypeEx(SS(), 0, Backpack(), True)
return FindFullQuantity()
_auto_buy = _ScriptMethod(240) # AutoBuy
_auto_buy.argtypes = [_ushort, # ItemType
_ushort, # ItemColor
_ushort] # Quantity
def AutoBuy(ItemType, ItemColor, Quantity):
_auto_buy(ItemType, ItemColor, Quantity)
_get_shop_list = _ScriptMethod(241) # GetShopList
_get_shop_list.restype = _str
def GetShopList():
return _get_shop_list()
_clear_shop_list = _ScriptMethod(242) # ClearShopList
def ClearShopList():
_clear_shop_list()
_auto_buy_extended = _ScriptMethod(243) # AutoBuyEx
_auto_buy_extended.argtypes = [_ushort, # ItemType
_ushort, # ItemColor
_ushort, # Quantity
_uint, # Price
_str] # ItemName
def AutoBuyEx(ItemType, ItemColor, Quantity, Price, ItemName):
_auto_buy_extended(ItemType, ItemColor, Quantity, Price, ItemName)
_get_auto_buy_delay = _ScriptMethod(244) # GetAutoBuyDelay
_get_auto_buy_delay.restype = _ushort
def GetAutoBuyDelay():
return _get_auto_buy_delay()
_set_auto_buy_delay = _ScriptMethod(245) # SetAutoBuyDelay
_set_auto_buy_delay.argtypes = [_ushort] # Value
def SetAutoBuyDelay(Value):
_set_auto_buy_delay(Value)
_get_auto_sell_delay = _ScriptMethod(246) # GetAutoSellDelay
_get_auto_sell_delay.restype = _ushort
def GetAutoSellDelay():
return _get_auto_sell_delay()
_set_auto_sell_delay = _ScriptMethod(247) # SetAutoSellDelay
_set_auto_sell_delay.argtypes = [_ushort] # Value
def SetAutoSellDelay(Value):
_set_auto_sell_delay(Value)
_auto_sell = _ScriptMethod(248) # AutoSell
_auto_sell.argtypes = [_ushort, # ItemType
_ushort, # ItemColor
_ushort] # Quantity
def AutoSell(ItemType, ItemColor, Quantity):
_auto_sell(ItemType, ItemColor, Quantity)
_request_stats = _ScriptMethod(249) # RequestStats
_request_stats.argtypes = [_uint] # ObjID
def RequestStats(ObjID):
_request_stats(ObjID)
_help_request = _ScriptMethod(250) # HelpRequest
def HelpRequest():
_help_request()
_quest_request = _ScriptMethod(251) # QuestRequest
def QuestRequest():
_quest_request()
_rename_mobile = _ScriptMethod(252) # RenameMobile
_rename_mobile.argtypes = [_uint, # Mob_ID
_str] # NewName
def RenameMobile(Mob_ID, NewName):
_rename_mobile(Mob_ID, NewName)
_mobile_can_be_renamed = _ScriptMethod(253) # MobileCanBeRenamed
_mobile_can_be_renamed.restype = _bool
_mobile_can_be_renamed.argtypes = [_uint] # Mob_ID
def MobileCanBeRenamed(Mob_ID):
return _mobile_can_be_renamed(Mob_ID)
_lock_stat = _ScriptMethod(254) # ChangeStatLockState
_lock_stat.argtypes = [_ubyte, # statNum
_ubyte] # statState
def SetStatState(statNum, statState):
_lock_stat(statNum, statState)
_get_static_art_bitmap = _ScriptMethod(255) # GetStaticArtBitmap
_get_static_art_bitmap.restype = _buffer # Bitmap file in bytes
_get_static_art_bitmap.argtypes = [_uint, # Id
_ushort] # Hue
def GetStaticArtBitmap(Id, Hue):
return _get_static_art_bitmap(Id, Hue)
_print_script_methods = _ScriptMethod(256) # PrintScriptMethodsList
_print_script_methods.argtypes = [_str, # FileName
_bool] # SortedList
def PrintScriptMethodsList(FileName, SortedList):
_print_script_methods(FileName, SortedList)
_alarm = _ScriptMethod(257) # SetAlarm
def Alarm():
_alarm()
_uo_say = _ScriptMethod(308) # SendTextToUO
_uo_say.argtypes = [_str] # Text
def UOSay(Text):
_uo_say(Text)
_uo_say_color = _ScriptMethod(309) # SendTextToUOColor
_uo_say_color.argtypes = [_str, # Text
_ushort] # Color
def UOSayColor(Text, Color):
_uo_say_color(Text, Color)
_reg_stealth = 0, '0', 'reg_stealth', 'stealth'
_reg_char = 1, '1', 'reg_char', 'char'
_set_global = _ScriptMethod(310) # SetGlobal
_set_global.argtypes = [_ubyte, # GlobalRegion
_str, # VarName
_str] # VarValue
def SetGlobal(GlobalRegion, VarName, VarValue):
if isinstance(GlobalRegion, str):
GlobalRegion = GlobalRegion.lower()
for region in _reg_stealth, _reg_char:
if GlobalRegion in region:
_set_global(region[0], VarName, VarValue)
break
else:
raise ValueError('GlobalRegion must be "stealth" or "char".')
_get_global = _ScriptMethod(311)
_get_global.restype = _str
_get_global.argtypes = [_ubyte, # GlobalRegion
_str] # VarName
def GetGlobal(GlobalRegion, VarName):
if isinstance(GlobalRegion, str):
GlobalRegion = GlobalRegion.lower()
for region in _reg_stealth, _reg_char:
if GlobalRegion in region:
return _get_global(region[0], VarName)
else:
raise ValueError('GlobalRegion must be "stealth" or "char".')
_console_entry_reply = _ScriptMethod(312)
_console_entry_reply.argtypes = [_str] # Text
def ConsoleEntryReply(Text):
_console_entry_reply(Text)
_console_entry_unicode_reply = _ScriptMethod(313) # ConsoleEntryUnicodeReply
_console_entry_unicode_reply.argtypes = [_str] # Text
def ConsoleEntryUnicodeReply(Text):
_console_entry_unicode_reply(Text)
_game_server_ip_string = _ScriptMethod(341) # GameServerIPString
_game_server_ip_string.restype = _str
def GameServerIPString():
return _game_server_ip_string()
_easyuo_sub_key = 'Software\\EasyUO'
def SetEasyUO(num, Regvalue):
if b'' == '': # py2
import _winreg as winreg
else:
import winreg
key = winreg.HKEY_CURRENT_USER
access = winreg.KEY_WRITE
with winreg.OpenKey(key, _easyuo_sub_key, 0, access) as easyuo_key:
winreg.SetValueEx(easyuo_key, '*' + str(num), 0, winreg.REG_SZ, Regvalue)
def GetEasyUO(num):
if b'' == '': # py2
import _winreg as winreg
else:
import winreg
key = winreg.HKEY_CURRENT_USER
access = winreg.KEY_READ
with winreg.OpenKey(key, _easyuo_sub_key, 0, access) as easyuo_key:
type_, data = winreg.QueryValueEx(easyuo_key, '*' + str(num))
return data
def EUO2StealthType(EUO):
# TODO: 2 and 3 compatible code: int(codecs.encode(b'A', 'hex'), 16)
res = 0
multi = 1
for char in EUO:
if b'' == '': # py2
tmp = int(char.encode('hex'), 16)
else:
tmp = int.from_bytes(char.encode(), 'little')
res += multi * (tmp - 65)
multi *= 26
res = (res - 7) ^ 0x0045
return 0 if res > 0xFFFF else res
def EUO2StealthID(EUO):
# TODO: 2 and 3 compatible code: int(codecs.encode(b'A', 'hex'), 16)
res = 0
multi = 1
for char in EUO:
if b'' == '': # py2
tmp = int(char.encode('hex'), 16)
else:
tmp = int.from_bytes(char.encode(), 'little')
res += multi * (tmp - 65)
multi *= 26
return (res - 7) ^ 0x0045
_http_get = _ScriptMethod(258) # HTTP_Get
_http_get.argtypes = [_str] # URL
def HTTP_Get(URL):
_http_get(URL)
_http_post = _ScriptMethod(259) # HTTP_Post
_http_post.restype = _str
_http_post.argtypes = [_str, # URL
_str] # PostData
def HTTP_Post(URL, PostData):
return _http_post(URL, PostData)
_http_body = _ScriptMethod(260) # HTTP_Body
_http_body.restype = _str
def HTTP_Body():
return _http_body()
_http_header = _ScriptMethod(261) # HTTP_Header
_http_header.restype = _str
def HTTP_Header():
return _http_header()
_party_invite = _ScriptMethod(262) # InviteToParty
_party_invite.argtypes = [_uint] # ID
def InviteToParty(ID):
_party_invite(ID)
_party_kick = _ScriptMethod(263) # RemoveFromParty
_party_kick.argtypes = [_uint] # ID
def RemoveFromParty(ID):
_party_kick(ID)
_party_msg_to = _ScriptMethod(264) # PartyMessageTo
_party_msg_to.argtypes = [_uint, # ID
_str] # Msg
def PartyMessageTo(ID, Msg):
_party_msg_to(ID, Msg)
_party_msg = _ScriptMethod(265) # PartySay
_party_msg.argtypes = [_str] # Msg
def PartySay(Msg):
_party_msg(Msg)
_party_can_loot = _ScriptMethod(266) # PartyCanLootMe
_party_can_loot.argtypes = [_bool] # Value
def PartyCanLootMe(Value):
_party_can_loot(Value)
_party_accept = _ScriptMethod(267) # PartyAcceptInvite
def PartyAcceptInvite():
_party_accept()
_party_reject = _ScriptMethod(268) # PartyDeclineInvite
def PartyDeclineInvite():
_party_reject()
_party_leave = _ScriptMethod(269) # PartyLeave
def PartyLeave():
_party_leave()
_is_in_party = _ScriptMethod(271) # InParty
_is_in_party.restype = _bool
def InParty():
return _is_in_party()
_get_party_members = _ScriptMethod(270) # PartyMembersList
_get_party_members.restype = _buffer # Array of Cardinal
def PartyMembersList():
result = []
data = _get_party_members()
if data:
fmt = 'I' * (len(data) // 4)
result.extend(_struct.unpack(fmt, data))
return result
_get_icq_connection_state = _ScriptMethod(272) # GetConnectedStatus
_get_icq_connection_state.restype = _bool
def ICQConnected():
return _get_icq_connection_state()
_icq_connect = _ScriptMethod(273) # ICQ_Connect
_icq_connect.argtypes = [_uint, # UIN
_str] # Password
def ICQConnect(UIN, Password):
_icq_connect(UIN, Password)
_icq_disconnect = _ScriptMethod(274) # ICQ_Disconnect
def ICQDisconnect():
_icq_disconnect()
_icq_set_status = _ScriptMethod(275) # ICQ_SetStatus
_icq_set_status.argtypes = [_ubyte] # Num
def ICQSetStatus(Num):
_icq_set_status(Num)
_icq_set_x_status = _ScriptMethod(276) # ICQ_SetXStatus
_icq_set_x_status.argtypes = [_ubyte] # Num
def ICQSetXStatus(Num):
_icq_set_x_status(Num)
_icq_send_message = _ScriptMethod(277) # ICQ_SendText
_icq_send_message.argtypes = [_uint, # DestinationUIN
_str] # Text
def ICQSendText(DestinationUIN, Text):
_icq_send_message(DestinationUIN, Text)
_messengers = {0: 1, # default - telegram
1: 1, 'Telegram': 1, 'telegram': 1,
2: 2, 'Viber': 2, 'viber': 2,
3: 3, 'Discord': 3, 'discord': 3}
_messenger_get_connected = _ScriptMethod(501) # Messenger_GetConnected
_messenger_get_connected.restype = _bool
_messenger_get_connected.argtypes = [_ubyte] # MesID
def MessengerGetConnected(MesID):
if MesID not in _messengers.keys():
error = 'MessengerGetConnected: MesID must be "Telegram", "Viber" or "Discord"'
raise ValueError(error)
return _messenger_get_connected(_messengers[MesID])
_messenger_set_connected = _ScriptMethod(502) # Messenger_SetConnected
_messenger_set_connected.argtypes = [_ubyte, # MesID
_bool] # Value
def MessengerSetConnected(MesID, Value):
if MesID not in _messengers.keys():
error = 'MessengerGetConnected: MesID must be "Telegram", "Viber" or "Discord"'
raise ValueError(error)
_messenger_set_connected(_messengers[MesID], Value)
_messenger_get_token = _ScriptMethod(503) # Messenger_GetToken
_messenger_get_token.restype = _str
_messenger_get_token.argtypes = [_ubyte] # MesID
def MessengerGetToken(MesID):
if MesID not in _messengers.keys():
error = 'MessengerGetConnected: MesID must be "Telegram", "Viber" or "Discord"'
raise ValueError(error)
return _messenger_get_token(_messengers[MesID])
_messenger_set_token = _ScriptMethod(504) # Messenger_SetToken
_messenger_set_token.argtypes = [_ubyte, # MesID
_str] # Value
def MessengerSetToken(MesID, Value):
if MesID not in _messengers.keys():
error = 'MessengerGetConnected: MesID must be "Telegram", "Viber" or "Discord"'
raise ValueError(error)
_messenger_set_token(_messengers[MesID], Value)
_messenger_get_name = _ScriptMethod(505) # Messenger_GetName
_messenger_get_name.restype = _str
_messenger_get_name.argtypes = [_ubyte] # MesID
def MessengerGetName(MesID):
if MesID not in _messengers.keys():
error = 'MessengerGetConnected: MesID must be "Telegram", "Viber" or "Discord"'
raise ValueError(error)
return _messenger_get_name(_messengers[MesID])
_messenger_send_message = _ScriptMethod(506) # Messenger_SendMessage
_messenger_send_message.argtypes = [_ubyte, # MesID
_str, # Msg
_str] # UserID
def MessengerSendMessage(MesID, Msg, UserID):
if MesID not in _messengers.keys():
error = 'MessengerGetConnected: MesID must be "Telegram", "Viber" or "Discord"'
raise ValueError(error)
_messenger_send_message(_messengers[MesID], Msg, UserID)
_tile_groups = {0: 0, 'tfLand': 0, 'tfland': 0, 'Land': 0, 'land': 0,
1: 1, 'tfStatic': 1, 'tfstatic': 1, 'Static': 1, 'static': 1}
_get_tile_flags = _ScriptMethod(278) # GetTileFlags
_get_tile_flags.restype = _uint
_get_tile_flags.argtypes = [_ubyte, # TileGroup
_ushort] # Tile
def GetTileFlags(TileGroup, Tile):
if TileGroup not in _tile_groups.keys():
raise ValueError('GetTileFlags: TileGroup must be "Land" or "Static"')
group = _tile_groups[TileGroup]
return _get_tile_flags(group, Tile)
_uint_to_flags = _ScriptMethod(350) # ConvertIntegerToFlags
_uint_to_flags.restype = _str
_uint_to_flags.argtypes = [_ubyte, # Group
_uint] # Flags
def ConvertIntegerToFlags(Group, Flags):
if Group not in _tile_groups.keys():
raise ValueError('GetTileFlags: Group must be "Land" or "Static"')
return _uint_to_flags(_tile_groups[Group], Flags).split(_linesep)[:-1]
_get_land_tile_data = _ScriptMethod(280) # GetLandTileData
_get_land_tile_data.restype = _buffer # TLandTileData
_get_land_tile_data.argtypes = [_ushort] # Tile
def GetLandTileData(Tile):
result = {}
data = _get_land_tile_data(Tile)
if data:
fmt = '2IH20s'
keys = 'Flags', 'Flags2', 'TextureID', 'Name'
values = _struct.unpack(fmt, data)
result.update(zip(keys, values))
result['Flags'] = ConvertIntegerToFlags(0, result['Flags'])
result['Flags2'] = ConvertIntegerToFlags(0, result['Flags2'])
result['Name'] = result['Name'].rstrip(b'\x00')
if b'' != '': # py3
result['Name'] = result['Name'].decode()
return result
_get_static_tile_data = _ScriptMethod(281) # GetStaticTileData
_get_static_tile_data.restype = _buffer # TStaticTileDataNew
_get_static_tile_data.argtypes = [_ushort] # Tile
def GetStaticTileData(Tile):
result = {}
data = _get_static_tile_data(Tile)
if data:
fmt = 'Q2i4B20s'
keys = 'Flags', 'Weight', 'Height', 'RadarColorRGBA', 'Name'
tmp = _struct.unpack(fmt, data)
values = tmp[:3] + (tmp[3:7],) + tmp[7:]
result.update(zip(keys, values))
result['Flags'] = ConvertIntegerToFlags(1, result['Flags'])
result['Name'] = result['Name'].rstrip(b'\x00')
if b'' != '': # py3
result['Name'] = result['Name'].decode()
return result
_get_cell = _ScriptMethod(13) # GetCell
_get_cell.restype = _buffer # TMapCell
_get_cell.argtypes = [_ushort, # X
_ushort, # Y
_ubyte] # WorldNum
def GetCell(X, Y, WorldNum):
result = {}
data = _get_cell(X, Y, WorldNum)
if data:
fmt = 'Hb'
keys = 'Tile', 'Z'
values = _struct.unpack(fmt, data)
result.update(zip(keys, values))
return result
_get_layer_count = _ScriptMethod(282) # GetLayerCount
_get_layer_count.restype = _ubyte
_get_layer_count.argtypes = [_ushort, # X
_ushort, # Y
_ubyte] # WorldNum
def GetLayerCount(X, Y, WorldNum):
return _get_layer_count(X, Y, WorldNum)
_read_static_xy = _ScriptMethod(283) # ReadStaticsXY
_read_static_xy.restype = _buffer # Array of TStaticItemRealXY
_read_static_xy.argtypes = [_ushort, # X
_ushort, # Y
_ubyte] # WorldNum
def ReadStaticsXY(X, Y, WorldNum):
result = []
data = _read_static_xy(X, Y, WorldNum)
if data:
fmt = '=3HbH'
keys = 'Tile', 'X', 'Y', 'Z', 'Color'
for pos in range(0, len(data), _struct.calcsize(fmt)):
values = _struct.unpack_from(fmt, data, pos)
item = dict(zip(keys, values))
result.append(item)
return result
_get_surface_z = _ScriptMethod(284) # GetSurfaceZ
_get_surface_z.restype = _byte
_get_surface_z.argtypes = [_ushort, # X
_ushort, # Y
_ubyte] # WorldNum
def GetSurfaceZ(X, Y, WorldNum):
return _get_surface_z(X, Y, WorldNum())
_is_cell_passable = _ScriptMethod(285) # IsWorldCellPassable
_is_cell_passable.restype = _buffer # Boolean, ShortInt 4 bytes
_is_cell_passable.argtypes = [_ushort, # CurrX
_ushort, # CurrY
_byte, # CurrZ
_ushort, # DestX
_ushort, # DestY
_ubyte] # WorldNum
def IsWorldCellPassable(CurrX, CurrY, CurrZ, DestX, DestY, WorldNum):
data = _is_cell_passable(CurrX, CurrY, CurrZ, DestX, DestY, WorldNum)
return _struct.unpack('?b', data)
_get_statics_array = _ScriptMethod(286) # GetStaticTilesArray
_get_statics_array.restype = _buffer # Array of TFoundTile
_get_statics_array.argtypes = [_ushort, # Xmin
_ushort, # Ymin
_ushort, # Xmax
_ushort, # Ymax
_ubyte, # WorldNum
_ushort, # Len
_buffer] # TileTypes: Array of Word
def GetStaticTilesArray(Xmin, Ymin, Xmax, Ymax, WorldNum, TileTypes):
if not _iterable(TileTypes):
TileTypes = [TileTypes]
result = []
data = _get_statics_array(Xmin, Ymin, Xmax, Ymax, WorldNum, len(TileTypes),
_struct.pack('H' * len(TileTypes), *TileTypes))
if data:
fmt = '3Hb'
for pos in range(0, len(data), _struct.calcsize(fmt)):
result.append(_struct.unpack_from(fmt, data, pos))
return result
_get_lands_array = _ScriptMethod(287) # GetLandTilesArray
_get_lands_array.restype = _buffer # Array of TFoundTile
_get_lands_array.argtypes = [_ushort, # Xmin
_ushort, # Ymin
_ushort, # Xmax
_ushort, # Ymax
_ubyte, # WorldNum
_ushort, # Len
_buffer] # TileTypes: Array of Word
def GetLandTilesArray(Xmin, Ymin, Xmax, Ymax, WorldNum, TileTypes):
if not _iterable(TileTypes):
TileTypes = [TileTypes]
result = []
data = _get_lands_array(Xmin, Ymin, Xmax, Ymax, WorldNum,
len(TileTypes),
_struct.pack('H' * len(TileTypes), *TileTypes))
if data:
fmt = '3Hb'
for pos in range(0, len(data), _struct.calcsize(fmt)):
result.append(_struct.unpack_from(fmt, data, pos))
return result
_client_print = _ScriptMethod(289) # ClientPrint
_client_print.argtypes = [_str] # Text
def ClientPrint(Text):
_client_print(Text)
_client_print_ex = _ScriptMethod(290) # ClientPrintEx
_client_print_ex.argtypes = [_uint, # SenderID
_ushort, # Color
_ushort, # Font
_str] # Text
def ClientPrintEx(SenderID, Color, Font, Text):
_client_print_ex(SenderID, Color, Font, Text)
_wnd = {0: 0, '0': 0, 'wtpaperdoll': 0, 'paperdoll': 0,
1: 1, '1': 1, 'wtstatus': 1, 'status': 1,
2: 2, '2': 2, 'wtcharprofile': 2, 'charprofile': 2, 'profile': 2,
3: 3, '3': 3, 'wtcontainer': 3, 'container': 3}
_close_client_ui_window = _ScriptMethod(291) # CloseClientUIWindow
_close_client_ui_window.argtypes = [_ubyte, # UIWindowType
_uint] # ID
def CloseClientUIWindow(UIWindowType, ID):
if isinstance(UIWindowType, str):
UIWindowType = UIWindowType.lower()
if UIWindowType not in _wnd.keys():
raise ValueError('CloseClientUIWindow: UIWindowType must be '
'"Paperdoll", "Status", "CharProfile" or "Container"')
_close_client_ui_window(_wnd[UIWindowType], ID)
_client_target_object_request = _ScriptMethod(292) # ClientRequestObjectTarget
def ClientRequestObjectTarget():
_client_target_object_request()
_client_target_tile_request = _ScriptMethod(293) # ClientRequestTileTarget
def ClientRequestTileTarget():
_client_target_tile_request()
_client_is_target_response = _ScriptMethod(294) # ClientTargetResponsePresent
_client_is_target_response.restype = _bool
def ClientTargetResponsePresent():
return _client_is_target_response()
_client_target_response = _ScriptMethod(295) # ClientTargetResponse
_client_target_response.restype = _buffer # TTargetInfo
def ClientTargetResponse():
result = {}
data = _client_target_response()
if data:
fmt = 'I3Hb'
keys = 'ID', 'Tile', 'X', 'Y', 'Z'
values = _struct.unpack(fmt, data)
result.update(zip(keys, values))
return result
def WaitForClientTargetResponse(MaxWaitTimeMS):
end = _time.time() + MaxWaitTimeMS / 1000
while _time.time() < end:
if ClientTargetResponsePresent():
return True
Wait(10)
return False
_check_lag_begin = _ScriptMethod(297) # CheckLagBegin
_check_lag_end = _ScriptMethod(298) # CheckLagEnd
_is_check_lag_ended = _ScriptMethod(299) # IsCheckLagEnd
_is_check_lag_ended.restype = _bool
def CheckLag(timeoutMS=10000):
end = _time.time() + timeoutMS / 1000
result = False
_check_lag_begin()
while _time.time() < end:
if _is_check_lag_ended():
return True
_check_lag_end()
return result
_get_quest_arrow = _ScriptMethod(300) # GetQuestArrow
_get_quest_arrow.restype = _buffer # TPoint
def GetQuestArrow():
data = _get_quest_arrow()
if data:
return _struct.unpack('ii', data)
return ()
_get_silent_mode = _ScriptMethod(301) # GetSilentMode
_get_silent_mode.restype = _bool
def GetSilentMode():
return _get_silent_mode()
_clear_info_window = _ScriptMethod(348) # ClearInfoWindow
def ClearInfoWindow():
_clear_info_window()
_set_silent_mode = _ScriptMethod(302) # SetSilentMode
_set_silent_mode.argtypes = [_bool] # Value
def SetSilentMode(Value):
_set_silent_mode(Value)
_fill_info_window = _ScriptMethod(303) # FillInfoWindow
_fill_info_window.argtypes = [_str] # s
def FillNewWindow(s):
_fill_info_window(s)
_get_stealth_path = _ScriptMethod(305) # GetStealthPath
_get_stealth_path.restype = _str
def StealthPath():
return _get_stealth_path()
def CurrentScriptPath():
return __file__
_get_stealth_profile_path = _ScriptMethod(306) # GetStealthProfilePath
_get_stealth_profile_path.restype = _str
def GetStealthProfilePath():
return _get_stealth_profile_path()
_get_shard_path = _ScriptMethod(307) # GetShardPath
_get_shard_path.restype = _str
def GetShardPath():
return _get_shard_path()
_step = _ScriptMethod(324) # Step
_step.restype = _ubyte
_step.argtypes = [_ubyte, # Direction
_bool] # Running
def Step(Direction, Running=False):
return _step(Direction, Running)
_step_q = _ScriptMethod(325) # StepQ
_step_q.restype = _int
_step_q.argtypes = [_ubyte, # Direction
_bool] # Running
def StepQ(Direction, Running):
return _step_q(Direction, Running)
_move_xyz = _ScriptMethod(326) # MoveXYZ
_move_xyz.restype = _bool
_move_xyz.argtypes = [_ushort, # Xdst
_ushort, # Ydst
_byte, # Zdst
_int, # AccuracyXY
_int, # AccuracyZ
_bool] # Running
def MoveXYZ(Xdst, Ydst, Zdst, AccuracyXY, AccuracyZ, Running):
return _move_xyz(Xdst, Ydst, Zdst, AccuracyXY, AccuracyZ, Running)
def newMoveXYZ(Xdst, Ydst, Zdst, AccuracyXY, AccuracyZ, Running, Callback=None):
def debug(msg):
if MoveXYZ.debug:
AddToSystemJournal('MoveXYZ: ' + msg)
def step(dir, run):
while 42: # while True
step = StepQ(dir, run)
if step == -2 or step >= 0:
return step >= 0
Wait(10)
if not hasattr(MoveXYZ, 'debug'):
MoveXYZ.debug = False
find_path = True
while 42: # while True
# pause while not connected
while not Connected():
Wait(100)
# try to find a path if required
if find_path:
find_path = False
path = GetPathArray3D(PredictedX(), PredictedY(), PredictedZ(),
Xdst, Ydst, Zdst,
WorldNum(), AccuracyXY, AccuracyZ, Running)
# there is no path to a target location
if len(path) <= 0:
debug('There is no path to a target location.')
return False
debug('Path found. Length = ' + str(len(path)))
# check path passability for a few steps
cx, cy, cz = PredictedX(), PredictedY(), PredictedZ()
for i in range(4):
try:
x, y, z = path[i]
if IsWorldCellPassable(cx, cy, cz, x, y, WorldNum()):
cx, cy, cz = x, y, z
else:
debug('Point ({0}, {1}, {2}) is not passable.'.format(x, y, z))
find_path = True
break
except IndexError:
break
if find_path:
continue
# stamina check
if not Dead() and Stam() < GetMoveCheckStamina():
Wait(100)
# lets walk :)
mx, my = PredictedX(), PredictedY()
x, y, z = path.pop(0)
dx = mx - x
dy = my - y
dir = CalcDir(mx, my, x, y)
# if something wrong
if (dx == 0 and dy == 0) or (abs(dx) > 1 or abs(dy) > 1) or dir == 100:
debug('dx = {0}, dy = {1}, dir = {2}'.format(dx, dy, dir))
find_path = True
continue
# try to turn if required
if PredictedDirection() != dir:
if not step(dir, Running):
find_path = True
continue
# try to do a step
if not step(dir, Running):
find_path = True
continue
# call a callback object if it is not None
# if callback will returns False - return
if Callback is not None:
if not Callback(x, y, z):
return False
# looks like it is done
if not path:
mx, my = PredictedX(), PredictedY()
# ensure this
if abs(mx - Xdst) <= AccuracyXY and abs(my - Ydst) <= AccuracyXY:
debug('Location reached!')
return True
# nope (
debug('Wtf? Recompute path.')
find_path = True
def newMoveXY(Xdst, Ydst, Optimized, Accuracy, Running):
return MoveXYZ(Xdst, Ydst, 0, Accuracy, 255, Running)
def NewMoveXY(Xdst, Ydst, Optimized, Accuracy, Running):
return newMoveXY(Xdst, Ydst, Optimized, Accuracy, Running)
_move_xy = _ScriptMethod(327) # MoveXY
_move_xy.restype = _bool
_move_xy.argtypes = [_ushort, # Xdst
_ushort, # Ydst
_bool, # Optimized
_int, # AccuracyXY
_bool] # Running
def MoveXY(Xdst, Ydst, Optimized, Accuracy, Running):
return _move_xy(Xdst, Ydst, Optimized, Accuracy, Running)
_set_impassable_location = _ScriptMethod(328) # SetBadLocation
_set_impassable_location.argtypes = [_ushort, # X
_ushort] # Y
def SetBadLocation(X, Y):
_set_impassable_location(X, Y)
_set_passable_location = _ScriptMethod(329) # SetGoodLocation
_set_passable_location.argtypes = [_ushort, # X
_ushort] # Y
def SetGoodLocation(X, Y):
_set_passable_location(X, Y)
_clear_impassable_locations = _ScriptMethod(330) # ClearBadLocationList
def ClearBadLocationList():
_clear_impassable_locations()
_set_impassable_object = _ScriptMethod(331) # SetBadObject
_set_impassable_object.argtypes = [_ushort, # Type
_ushort, # Color
_ubyte] # Radius
def SetBadObject(Type, Color, Radius):
_set_impassable_object(Type, Color, Radius)
_clear_impassable_objects = _ScriptMethod(332) # ClearBadObjectList
def ClearBadObjectList():
_clear_impassable_objects()
_los_check_type = {1: 1, '1': 1, 'lossphere': 1, 'sphere': 1,
2: 2, '2': 2, 'lossphereadv': 2, 'sphereadv': 2,
3: 3, '3': 3, 'lospol': 3, 'pol': 3,
4: 4, '4': 4, 'losrunuo': 4, 'runuo': 4, 'servuo': 4}
_los_check_options = {0: 0, '0': 0, None: 0,
0x100: 0x100,
'losspherecheckcorners': 0x100,
'spherecheckcorners': 0x100,
0x200: 0x200,
'lospolusenoshoot': 0x200,
'polusenoshoot': 0x200,
0x400: 0x400,
'lospollosthroughwindow': 0x400,
'pollosthroughwindow': 0x400}
_check_los = _ScriptMethod(333) # CheckLOS
_check_los.restype = _bool
_check_los.argtypes = [_ushort, # xf
_ushort, # yf
_byte, # zf
_ushort, # xt
_ushort, # yt
_byte, # zt
_ubyte, # WorldNum
_ubyte, # LOSCheckType
_uint] # LOSOptions
def CheckLOS(xf, yf, zf, xt, yt, zt, WorldNum, LOSCheckType, LOSOptions=None):
if not _iterable(LOSOptions) or isinstance(LOSOptions, str):
LOSOptions = [LOSOptions]
if isinstance(LOSCheckType, str):
LOSCheckType = LOSCheckType.lower()
if LOSCheckType not in _los_check_type.keys():
raise ValueError('CheckLOS: LOSCheckType must be "Sphere", "SphereAdv"'
', "Pol" or "RunUO".')
options = 0
for option in LOSOptions:
if isinstance(option, str):
option = option.lower()
if option not in _los_check_options.keys():
raise ValueError('CheckLOS: LOSOptions must be set of '
'"SphereCheckCorners", "PolUseNoShoot", '
'"PolLosThroughWindow" or None.')
options |= _los_check_options[option]
return _check_los(xf, yf, zf, xt, yt, zt, WorldNum, LOSCheckType, options)
_get_path_array = _ScriptMethod(334) # GetPathArray
_get_path_array.restype = _buffer # Array of TMyPoint
_get_path_array.argtypes = [_ushort, # DestX
_ushort, # DestY
_bool, # Optimized
_int] # Accuracy
def GetPathArray(DestX, DestY, Optimized, Accuracy):
result = []
data = _get_path_array(DestX, DestY, Optimized, Accuracy)
if data:
fmt = '2Hb'
for pos in range(0, len(data), _struct.calcsize(fmt)):
result.append(_struct.unpack_from(fmt, data, pos))
return result
_get_path_array_3d = _ScriptMethod(335) # GetPathArray3D
_get_path_array_3d.restype = _buffer # Array of TMyPoint
_get_path_array_3d.argtypes = [_ushort, # StartX
_ushort, # StartY
_byte, # StartZ
_ushort, # FinishX
_ushort, # FinishY
_byte, # FinishZ
_ubyte, # WorldNum
_int, # AccuracyXY
_int, # AccuracyZ
_bool] # Run
def GetPathArray3D(StartX, StartY, StartZ, FinishX, FinishY, FinishZ, WorldNum,
AccuracyXY, AccuracyZ, Run):
result = []
data = _get_path_array_3d(StartX, StartY, StartZ, FinishX, FinishY,
FinishZ, WorldNum, AccuracyXY, AccuracyZ, Run)
if data:
fmt = '2Hb'
for pos in range(0, len(data), _struct.calcsize(fmt)):
result.append(_struct.unpack_from(fmt, data, pos))
return result
def Dist(x1, y1, x2, y2):
dx = abs(x2 - x1)
dy = abs(y2 - y1)
return dx if dx > dy else dy
def CalcCoord(x, y, Dir):
if Dir > 7:
return x, y
dirs = {0: (0, -1),
1: (1, -1),
2: (1, 0),
3: (1, 1),
4: (0, 1),
5: (-1, 1),
6: (-1, 0),
7: (-1, -1)}
dx, dy = dirs[Dir]
return x + dx, y + dy
def CalcDir(Xfrom, Yfrom, Xto, Yto):
dx = abs(Xto - Xfrom)
dy = abs(Yto - Yfrom)
if dx == dy == 0:
return 100
elif (dx / (dy + 0.1)) >= 2:
return 6 if Xfrom > Xto else 2
elif (dy / (dx + 0.1)) >= 2:
return 0 if Yfrom > Yto else 4
elif Xfrom > Xto:
return 7 if Yfrom > Yto else 5
elif Xfrom < Xto:
return 1 if Yfrom > Yto else 3
_set_run_unmount_timer = _ScriptMethod(316) # SetRunUnmountTimer
_set_run_unmount_timer.argtypes = [_ushort] # Value
def SetRunUnmountTimer(Value):
_set_run_unmount_timer(Value)
_set_walk_mount_timer = _ScriptMethod(317) # SetWalkMountTimer
_set_walk_mount_timer.argtypes = [_ushort] # Value
def SetWalkMountTimer(Value):
_set_walk_mount_timer(Value)
_set_run_mount_timer = _ScriptMethod(318) # SetRunMountTimer
_set_run_mount_timer.argtypes = [_ushort] # Value
def SetRunMountTimer(Value):
_set_run_mount_timer(Value)
_set_walk_unmount_timer = _ScriptMethod(319) # SetWalkUnmountTimer
_set_walk_unmount_timer.argtypes = [_ushort] # Value
def SetWalkUnmountTimer(Value):
_set_walk_unmount_timer(Value)
_get_run_mount_timer = _ScriptMethod(320) # GetRunMountTimer
_get_run_mount_timer.restype = _ushort
def GetRunMountTimer():
return _get_run_mount_timer()
_get_walk_mount_timer = _ScriptMethod(321) # GetWalkMountTimer
_get_walk_mount_timer.restype = _ushort
def GetWalkMountTimer():
return _get_walk_mount_timer()
_get_run_unmount_timer = _ScriptMethod(322) # GetRunUnmountTimer
_get_run_unmount_timer.restype = _ushort
def GetRunUnmountTimer():
return _get_run_unmount_timer()
_get_walk_unmount_timer = _ScriptMethod(323) # GetWalkUnmountTimer
_get_walk_unmount_timer.restype = _ushort
def GetWalkUnmountTimer():
return _get_walk_unmount_timer()
_get_last_step_q_used_door = _ScriptMethod(344) # GetLastStepQUsedDoor
_get_last_step_q_used_door.restype = _uint
def GetLastStepQUsedDoor():
return _get_last_step_q_used_door()
_stop_mover = _ScriptMethod(353) # MoverStop
def StopMover():
_stop_mover()
def MoverStop():
StopMover()
_set_reconnector_ext = _ScriptMethod(354) # SetARExtParams
_set_reconnector_ext.argtypes = [_str, # ShardName
_str, # CharName
_bool] # UseAtEveryConnect
def SetARExtParams(ShardName, CharName, UseAtEveryConnect):
_set_reconnector_ext(ShardName, CharName, UseAtEveryConnect)
_use_item_on_mobile = _ScriptMethod(359) # SCUseItemOnMobile
_use_item_on_mobile.argtypes = [_uint, # ItemSerial
_uint] # TargetSerial
def UseItemOnMobile(ItemSerial, TargetSerial):
_use_item_on_mobile(ItemSerial, TargetSerial)
_bandage_self = _ScriptMethod(360) # SCBandageSelf
def BandageSelf():
_bandage_self()
_global_chat_join_channel = _ScriptMethod(361) # SCGlobalChatJoinChannel
_global_chat_join_channel.argtypes = [_str] # ChName
def GlobalChatJoinChannel(ChName):
_global_chat_join_channel(ChName)
global_chat_leave_channel = _ScriptMethod(362) # SCGlobalChatLeaveChannel
def GlobalChatLeaveChannel():
global_chat_leave_channel()
_global_chat_send_msg = _ScriptMethod(363) # SCGlobalChatSendMsg
_global_chat_send_msg.argtypes = [_str] # MsgText
def GlobalChatSendMsg(MsgText):
_global_chat_send_msg(MsgText)
global_chat_active_channel = _ScriptMethod(364) # SCGlobalChatActiveChannel
global_chat_active_channel.restype = _str
def GlobalChatActiveChannel():
return global_chat_active_channel()
global_chat_channel_list = _ScriptMethod(365) # SCGlobalChatChannelsList
global_chat_channel_list.restype = _str
def GlobalChatChannelsList():
result = global_chat_channel_list()
return result.split(_linesep)[:-1] # cause '' was in the end of list
_set_open_doors = _ScriptMethod(400) # SetMoveOpenDoor
_set_open_doors.argtypes = [_bool] # Value
def SetMoveOpenDoor(Value):
_set_open_doors(Value)
_get_open_doors = _ScriptMethod(401) # GetMoveOpenDoor
_get_open_doors.restype = _bool
def GetMoveOpenDoor():
return _get_open_doors()
_set_move_through_npc = _ScriptMethod(402) # SetMoveThroughNPC
_set_move_through_npc.argtypes = [_ushort] # Value
def SetMoveThroughNPC(Value):
_set_move_through_npc(Value)
_get_move_through_npc = _ScriptMethod(403) # GetMoveThroughNPC
_get_move_through_npc.restype = _ushort
def GetMoveThroughNPC():
return _get_move_through_npc()
_set_move_through_corner = _ScriptMethod(404) # SetMoveThroughCorner
_set_move_through_corner.argtypes = [_bool] # Value
def SetMoveThroughCorner(Value):
_set_move_through_corner(Value)
_get_move_through_corner = _ScriptMethod(405) # GetMoveThroughCorner
_get_move_through_corner.restype = _bool
def GetMoveThroughCorner():
return _get_move_through_corner()
_set_move_heuristic_mult = _ScriptMethod(406) # SetMoveHeuristicMult
_set_move_heuristic_mult.argtypes = [_int] # Value
def SetMoveHeuristicMult(Value):
_set_move_heuristic_mult(Value)
_get_move_heuristic_mult = _ScriptMethod(407) # GetMoveHeuristicMult
_get_move_heuristic_mult.restype = _int
def GetMoveHeuristicMult():
return _get_move_heuristic_mult()
_set_move_check_stamina = _ScriptMethod(408) # SetMoveCheckStamina
_set_move_check_stamina.argtypes = [_ushort] # Value
def SetMoveCheckStamina(Value):
_set_move_check_stamina(Value)
_get_move_check_stamina = _ScriptMethod(409) # GetMoveCheckStamina
_get_move_check_stamina.restype = _ushort
def GetMoveCheckStamina():
return _get_move_check_stamina()
_set_move_turn_cost = _ScriptMethod(410) # SetMoveTurnCost
_set_move_turn_cost.argtypes = [_int] # Value
def SetMoveTurnCost(Value):
_set_move_turn_cost(Value)
_get_move_turn_cost = _ScriptMethod(411) # GetMoveTurnCost
_get_move_turn_cost.restype = _int
def GetMoveTurnCost():
return _get_move_turn_cost()
_set_move_between_two_corners = _ScriptMethod(412) # SetMoveBetweenTwoCorners
_set_move_between_two_corners.argtypes = [_bool] # Value
def SetMoveBetweenTwoCorners(Value):
_set_move_between_two_corners(Value)
_get_move_between_two_corners = _ScriptMethod(413) # GetMoveBetweenTwoCorners
_get_move_between_two_corners.restype = _bool
def GetMoveBetweenTwoCorners():
return _get_move_between_two_corners()
def StartStealthSocketInstance(*args, **kwargs):
Wait(10)
def CorrectDisconnection():
_get_connection().close()
def PlayWav(FileName):
import platform
if platform.system() == 'Windows':
import winsound
winsound.PlaySound(FileName, winsound.SND_FILENAME)
else:
error = 'PlayWav supports only windows.'
AddToSystemJournal(error)
_get_multis = _ScriptMethod(347) # GetMultis
_get_multis.restype = _buffer
def GetMultis():
data = _get_multis()
result = []
count, = _struct.unpack_from('I', data, 0)
fmt = '=I2Hb6H'
size = _struct.calcsize(fmt)
keys = ("<KEY>
"XMin", "XMax", "YMin", "YMax",
"Width", "Height")
for i in range(count):
obj = dict(zip(keys, _struct.unpack_from(fmt, data, i * size + 4)))
result.append(obj)
return result
_get_menu_items_ex = _ScriptMethod(358) # GetMenuItemsEx
_get_menu_items_ex.restype = _buffer
_get_menu_items_ex.argtypes = [_str]
def GetMenuItemsEx(MenuCaption):
"""
GetMenuItemsEx(MenuCaption: str) => Array of MenuItems
MenuItems:
model: int (item type i guess)
color: int
text: str
Example:
menu_items = GetMenuItemsEx('Inscription items')
print(menu_items[0].text)
>> 1 Blank scroll
"""
class MenuItems:
model = None
color = None
text = None
def __str__(self):
template = 'Model: {0}, Color: {1}, Text: {2}'
return '{ ' + template.format(hex(self.model), hex(self.color), self.text) + ' }'
def __repr__(self):
return self.__str__()
data = _get_menu_items_ex(MenuCaption)
result = []
# count = _struct.unpack_from('H', data, 0)
offset = 2
while offset < len(data):
model, color = _struct.unpack_from('HH', data, offset)
offset += 4
text = _str.from_buffer(data, offset)
offset += text.size
item = MenuItems()
item.model = model
item.color = color
item.text = text.value
result.append(item)
return result
_close_client_gump = _ScriptMethod(342) # CloseClientGump
_close_client_gump.argtypes = [_uint] # ID
def CloseClientGump(ID):
_close_client_gump(ID)
_get_next_step_z = _ScriptMethod(366) # GetNextStepZ
_get_next_step_z.restype = _byte
_get_next_step_z.argtypes = [_ushort, # CurrX
_ushort, # CurrY
_ushort, # DestX
_ushort, # DestY
_ubyte, # WorldNum
_byte] # CurrZ
def GetNextStepZ(CurrX, CurrY, DestX, DestY, WorldNum, CurrZ):
return _get_next_step_z(CurrX, CurrY, DestX, DestY, WorldNum, CurrZ)
_client_hide = _ScriptMethod(368) # ClientHide
_client_hide.restype = _bool
_client_hide.argtypes = [_uint] # ID
def ClientHide(ID):
return _client_hide(ID)
_get_skill_lock_state = _ScriptMethod(369) # GetSkillLockState
_get_skill_lock_state.restype = _byte
_get_skill_lock_state.argtypes = [_str] # SkillName
def GetSkillLockState(SkillName):
return _get_skill_lock_state(SkillName)
_get_stat_lock_state = _ScriptMethod(372) # GetStatLockState
_get_stat_lock_state.restype = _byte
_get_stat_lock_state.argtypes = [_str] # SkillName
def GetStatLockState(SkillName):
_get_stat_lock_state(SkillName)
| StarcoderdataPython |
172009 | #!/bin/python
# Python 2.7
import os
stage = (os.getenv("STAGE") or "development").upper()
output = "We're running in %s" % stage
if stage.startswith("PROD"):
output = "DANGER!!! - " + output
print(output)
| StarcoderdataPython |
3485874 | <gh_stars>10-100
from moocng.http.exceptions import Http410
from moocng.http.middleware import HttpErrorCaptureMiddleware
| StarcoderdataPython |
107991 | import fiona
import numpy as np
import pandas as pd
import geopandas as gpd
import geojson
from shapely.geometry import Point, LineString
from six import iteritems
from six.moves import reduce
from itertools import chain, count, permutations
import os, sys
type_map = dict(MultiLineString="LineString",
LineString="LineString",
Point="Point")
def sort_features(fn, features):
with open(fn) as fp:
t = geojson.load(fp)
offset = {t: fs[-1]['id']+1 for t, fs in iteritems(features)}
for n, f in enumerate(t['features']):
t = type_map[f['geometry']['type']]
f['id'] = offset.setdefault(t, 0) + n
features.setdefault(t, []).append(f)
return features
def trim_fixedid(name):
return name[:-len('-fixedid')] if name.endswith('-fixedid') else name
def gen_outfn(fn, suffix, tmpdir=None):
name, ext = os.path.splitext(os.path.basename(fn))
outfn = trim_fixedid(name) + suffix + ext
if tmpdir is not None:
outfn = os.path.join(tmpdir, outfn)
else:
outfn = os.path.join(os.path.dirname(fn), outfn)
if os.path.exists(outfn):
os.unlink(outfn)
return outfn
def stitch_tiles(fn):
df = gpd.read_file(fn)
df = df.rename(columns={'OBJECTID': 'oid',
'ogc_fid': 'oid',
'underconstruction': 'under_construction',
'UnderConst': 'under_construction',
'Symbol': 'symbol',
'ABR': 'country',
'VoltageLev': 'voltagelevel',
'T9_Code': 't9_code',
'Visible': 'visible',
'Current_': 'current',
'NumberOfCi': 'numberofcircuits',
'Undergroun': 'underground',
'Tie_line': 'tie_line',
'Text_': 'text_',
'LengthKm': 'shape_length'})
df['tie_line'] = df['tie_line'].fillna(0.).astype(int)
for f in ('visible', 'underground', 'under_construction'):
df[f] = df[f].astype(int)
df['numberofcircuits'] = df['numberofcircuits'].astype(int)
df['shape_length'] = df['shape_length'].astype(float)
df['symbol'] = df['symbol'].str.split(',').str[0]
uc_b = df['symbol'].str.endswith(' Under Construction')
df.loc[uc_b, 'symbol'] = df.loc[uc_b, 'symbol'].str[:-len(' Under Construction')]
# Break MultiLineStrings
e = (df.loc[df.type == 'MultiLineString', 'geometry'])
if not e.empty:
extra = df.drop("geometry", axis=1).join(
pd.Series(chain(*e), np.repeat(e.index, e.map(len)), name="geometry"),
how="right"
)
df = df[df.type != 'MultiLineString'].append(extra, ignore_index=True)
def up_to_point(l, p, other):
if l.boundary[1].distance(other) > l.boundary[0].distance(other):
l = LineString(l.coords[::-1])
for n, r in enumerate(l.coords):
if l.project(Point(r)) > l.project(p):
return l.coords[:n]
return l.coords[:]
def stitch_lines(a, b):
if a.buffer(1e-2).contains(b):
return a
p = a.intersection(b)
if p.is_empty:
d = a.distance(b)/(2-1e-2)
assert d < .5e-2
p = a.buffer(d).intersection(b.buffer(d)).centroid
p = p.representative_point()
return LineString(up_to_point(a, p, b) + up_to_point(b, p, a)[::-1])
def unique_lines(df):
dfbuf = df.buffer(1e-2)
for i, geom in df.geometry.iteritems():
ndfbuf = dfbuf.drop(i)
if ndfbuf.contains(geom).any():
dfbuf = ndfbuf
return df.loc[dfbuf.index]
def stitch_where_possible(df):
if len(df) == 1: return df.geometry.iloc[0]
df = unique_lines(df)
for p in permutations(df.geometry.iloc[1:]):
try:
#print("Stitching {}: {} lines".format(df.ogc_fid.iloc[0], len(df)))
return reduce(stitch_lines, p, df.geometry.iloc[0])
except AssertionError:
pass
else:
raise Exception("Could not stitch lines with `oid = {}`".format(df.oid.iloc[0]))
stitched = df.groupby('oid').apply(stitch_where_possible)
df = gpd.GeoDataFrame(df.groupby('oid').first().assign(geometry=stitched)).reset_index()
outfn = gen_outfn(fn, '-fixed')
df.set_index('oid', drop=False).to_file(outfn, driver='GeoJSON')
return outfn
def strip_duplicate_stations(fn):
with open(fn) as fp:
f = geojson.load(fp)
key_map = {'OBJECTID': 'oid',
'ogc_fid': 'oid',
'Under_cons': 'under_construction',
'name_all': 'name',
'Name_Eng': 'name',
'Symbol': 'symbol',
'Country': 'country',
'Tie_line_s': 'tie_line_s',
'Visible': 'visible',
'MW': 'capacity',
'mw': 'capacity'}
seen_oids = set()
features = []
for feature in f['features']:
prop = feature['properties']
for old, new in key_map.items():
if old in prop:
prop[new] = prop.pop(old)
feature['id'] = oid = prop['oid']
if oid in seen_oids:
continue
seen_oids.add(oid)
if 'symbol' in prop:
prop['symbol'] = prop['symbol'].split(',', 1)[0]
if 'TSO' in prop:
prop['TSO'] = prop['TSO'].strip()
if 'EIC_code' in prop:
prop['EIC_code'] = prop['EIC_code'].strip()
features.append(feature)
f['features'] = features
outfn = gen_outfn(fn, '-fixed')
with open(outfn, 'w') as fp:
geojson.dump(f, fp)
return outfn
if __name__ == '__main__':
base = sys.argv[1]
files = sys.argv[2:]
features = dict()
for fn in files:
sort_features(fn, features)
for geom_type, fs in iteritems(features):
fixed_id_fn = gen_outfn(base + '.geojson', '-{}-fixedid'.format(geom_type))
with open(fixed_id_fn, 'w') as fp:
geojson.dump({'type': 'FeatureCollection', 'features': fs}, fp)
if geom_type == 'LineString':
fixed_fn = stitch_tiles(fixed_id_fn)
print("Stitched tiles into {}.".format(fixed_fn))
elif geom_type == 'Point':
fixed_fn = strip_duplicate_stations(fixed_id_fn)
print("Stripped station duplicates into {}.".format(fixed_fn))
else:
print("Incompatible geometry type {} in {}.".format(geom_type, fixed_id_fn))
| StarcoderdataPython |
8007944 | import asyncio
import aiohttp
import json
import math
from server import lamps
from server import configuration
from server.log import log
class LeagueApi:
""" League of Legends active game API. """
url = 'https://127.0.0.1:2999/liveclientdata/activeplayer'
loop = None
def __init__(self, loop):
self.health = 100
self.loop = loop
async def health_percent(self):
""" called to retrieve data from the API """
async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session:
return await self.get(session)
async def get(self, session):
async with session.get(self.url) as resp:
stats = json.loads(await resp.text())['championStats']
health = math.ceil((stats['currentHealth'] * 100 / stats['maxHealth']))
return health
def color(health):
schema = configuration.SchemaConfiguration()
# cheapest gradient.
red = (255 - math.ceil(health * 2.55))
green = math.ceil(health * 2.55)
color = f'#{red:02x}{green:02x}00'
schema.set_hue(color)
schema.set_brightness(1.0)
schema.set_saturation(1.0)
return schema
async def run(loop, lamp):
lifx = lamps.CircadianLifx()
league = LeagueApi(loop)
lamp = lifx.get_device_by_name(lamp)
while True:
try:
health = await league.health_percent()
lifx.set_color(lamp, color(health))
await asyncio.sleep(0.5)
except:
log("no active game - sleeping for 5 seconds.")
await asyncio.sleep(5)
# restore original state.
| StarcoderdataPython |
6416798 | <gh_stars>0
"""It’s easy to modify the code for creating k-fold cross-validation to create stratified k-folds.
We are only changing from model_selection.KFold to model_selection.StratifiedKFold and in the kf.split(...) function,
we specify the target column on which we want to stratify.
We assume that our CSV dataset has a column called “target” and it is a classification problem!
"""
import pandas as pd
from sklearn import model_selection
if __name__ == "__main__":
# assume the training data is train.csv and has a clumn called
# 'target' and the probelm is classification problem
df = pd.read_csv("winequality-red.csv")
# we create a new column called kfold and fill it with -1
df["kfold"] = -1
# randomize the rows of the data
df = df.sample(frac=1).reset_index(drop=True)
# fetch the target column on which we want to stratify
# y is a numpy array of the target column
y = df.quality.values
# intitiate the stratified kfold class from the model selection module
# No of folds is 5 here
kf = model_selection.StratifiedKFold(n_splits=5)
# fill the new kfold column in the dataset
for fold, (t_, v_) in enumerate(kf.split(X=df, y=y)):
df.loc[v_, "kfold"] = fold
# sae the new csv with kfold column
df.to_csv("train_strat_kfold.csv")
| StarcoderdataPython |
234263 | default_app_config = 'addons.s3compatb3.apps.S3CompatB3AddonAppConfig'
| StarcoderdataPython |
8190249 |
class DUNet(nn.Module):
def __init__(self, in_channels, out_channels, kernel_size=3, filters=[16, 32, 64], layers=3,
weight_norm=True, batch_norm=True, activation=nn.ReLU, final_activation=None):
super().__init__()
assert len(filters) > 0
self.final_activation = final_activation
self.encoder1 = create_encoder(in_channels, filters, kernel_size, weight_norm, batch_norm, activation, layers)
self.encoder2 = create_encoder(in_channels*2, filters, kernel_size, weight_norm, batch_norm, activation, layers)
decoders1 = []
decoders2 = []
for i in range(out_channels):
decoders1.append(create_decoder(1, filters, kernel_size, weight_norm, batch_norm, activation, layers, concat_layer=2))
decoders2.append(create_decoder(1, filters, kernel_size, weight_norm, batch_norm, activation, layers, concat_layer=3))
self.decoders1 = nn.Sequential(*decoders1)
self.decoders2 = nn.Sequential(*decoders2)
def encode(self, x, switch):
if switch==0:
self.encoder = self.encoder1
elif switch==1:
self.encoder = self.encoder2
tensors = []
indices = []
sizes = []
for encoder in self.encoder:
x = encoder(x)
sizes.append(x.size())
tensors.append(x)
x, ind = F.max_pool2d(x, 2, 2, return_indices=True)
indices.append(ind)
return x, tensors, indices, sizes
def decode(self, _x, _tensors, _indices, _sizes, switch):
if switch==0:
self.decoders = self.decoders1
elif switch==1:
self.decoders = self.decoders2
y = []
for _decoder in self.decoders:
x = _x
tensors = _tensors[:]
indices = _indices[:]
sizes = _sizes[:]
for decoder in _decoder:
tensor = tensors.pop()
size = sizes.pop()
ind = indices.pop()
x = F.max_unpool2d(x, ind, 2, 2, output_size=size)
x = torch.cat([tensor, x], dim=1)
x = decoder(x)
y.append(x)
return torch.cat(y, dim=1)
def decode_(self, _tensors1, _x2, _tensors2, _indices2, _sizes2, switch):
if switch==0:
self.decoders = self.decoders1
elif switch==1:
self.decoders = self.decoders2
y = []
for _decoder in self.decoders:
x = _x2
tensors1 = _tensors1[:]
tensors2 = _tensors2[:]
indices2 = _indices2[:]
sizes2 = _sizes2[:]
for decoder in _decoder:
tensor1 = tensors1.pop()
tensor2 = tensors2.pop()
size2 = sizes2.pop()
ind2 = indices2.pop()
x = F.max_unpool2d(x, ind2, 2, 2, output_size=size2)
x = torch.cat([tensor1, tensor2, x], dim=1)
x = decoder(x)
y.append(x)
return torch.cat(y, dim=1)
def forward(self, x):
x1, tensors1, indices1, sizes1 = self.encode(x,0)
y1 = self.decode(x1, tensors1, indices1, sizes1,0)
y1 = torch.cat([x, y1], dim=1)
# print(torch.add(x,y1).shape)
x2, tensors2, indices2, sizes2 = self.encode(y1,1)
y2 = self.decode_(tensors1, x2, tensors2, indices2, sizes2, 1)
if self.final_activation is not None:
y2 = self.final_activation(y2)
return y2
| StarcoderdataPython |
1918627 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sys
import bme280
import smbus2
DEFAULT_ADDRESS = 0x76
DEFAULT_PORT = 1
def to_str(data):
return "{},{},{}".format(round(data.temperature),
round(data.humidity),
round(data.pressure))
def main():
try:
_bus = smbus2.SMBus(DEFAULT_PORT)
bme280.load_calibration_params(_bus, DEFAULT_ADDRESS)
# print bme values to std out
print(to_str(bme280.sample(_bus, DEFAULT_ADDRESS)))
except Exception as e:
print(e, file=sys.stderr)
return -1
finally:
_bus.close()
return 0
if __name__ == "__main__":
sys.exit(main())
| StarcoderdataPython |
3252146 | <reponame>impastasyndrome/DS-ALGO-OFFICIAL
class Solution:
def convertToTitle(self, n):
"""
:type n: int
:rtype: str
"""
result, start = "", ord("A")
while n > 0:
result, n = chr((n - 1) % 26 + start) + result, (n - 1) // 26
return result
| StarcoderdataPython |
327078 | import math
from math import cos, fabs, radians, sin, sqrt
import hypothesis.strategies as st
import pytest # type: ignore
from hypothesis import assume, example, given, note
from ppb_vector import Vector
from utils import angle_isclose, angles, floats, isclose, vectors
data_exact = [
(Vector(1, 1), -90, Vector(1, -1)),
(Vector(1, 1), 0, Vector(1, 1)),
(Vector(1, 1), 90, Vector(-1, 1)),
(Vector(1, 1), 180, Vector(-1, -1)),
]
@pytest.mark.parametrize("input, angle, expected", data_exact,
ids=[str(angle) for _, angle, _ in data_exact])
def test_exact_rotations(input, angle, expected):
assert input.rotate(angle) == expected
assert input.angle(expected) == angle
# angle (in degrees) -> (sin, cos)
# values from 0 to 45°
# lifted from https://en.wikibooks.org/wiki/Trigonometry/Selected_Angles_Reference
remarkable_angles = {
15: ((sqrt(6) + sqrt(2)) / 4, (sqrt(6) - sqrt(2)) / 4),
22.5: (sqrt(2 + sqrt(2)) / 2, sqrt(2 - sqrt(2)) / 2),
30: (sqrt(3) / 2, 0.5),
45: (sqrt(2) / 2, sqrt(2) / 2),
}
# extend up to 90°
remarkable_angles.update({
90 - angle: (sin_t, cos_t)
for angle, (cos_t, sin_t) in remarkable_angles.items()
})
# extend up to 180°
remarkable_angles.update({
angle + 90: (-sin_t, cos_t)
for angle, (cos_t, sin_t) in remarkable_angles.items()
})
# extend up to 360°
remarkable_angles.update({
angle + 180: (-cos_t, -sin_t)
for angle, (cos_t, sin_t) in remarkable_angles.items()
})
# extend to negative angles
remarkable_angles.update({
-angle: (cos_t, -sin_t)
for angle, (cos_t, sin_t) in remarkable_angles.items()
})
@pytest.mark.parametrize("angle, trig", remarkable_angles.items(),
ids=[str(x) for x in remarkable_angles])
def test_remarkable_angles(angle, trig):
"""Test that our table of remarkable angles agrees with Vector._trig.
This is useful both as a consistency test of the table,
and as a test of Vector._trig (which Vector.rotate uses).
"""
cos_t, sin_t = trig
cos_m, sin_m = Vector._trig(angle)
assert isclose(sin_t, sin_m, abs_tol=0, rel_tol=1e-14)
assert isclose(cos_t, cos_m, abs_tol=0, rel_tol=1e-14)
data_close = [
(Vector(1, 0), angle, Vector(cos_t, sin_t))
for (angle, (cos_t, sin_t)) in remarkable_angles.items()
] + [
(Vector(1, 1), angle, Vector(cos_t - sin_t, cos_t + sin_t))
for (angle, (cos_t, sin_t)) in remarkable_angles.items()
]
@pytest.mark.parametrize("input, angle, expected", data_close,
ids=[f"({v.x},{v.y}).rotate({angle})" for v, angle, _ in data_close])
def test_close_rotations(input, angle, expected):
assert input.rotate(angle).isclose(expected)
assert angle_isclose(input.angle(expected), angle)
@given(angle=angles())
def test_trig_stability(angle):
"""cos² + sin² == 1
We are testing that this equation holds, as otherwise rotations
would (slightly) change the length of vectors they are applied to.
Moreover, Vector._trig should get closer to fulfilling it than
math.{cos,sin}.
"""
r_cos, r_sin = Vector._trig(angle)
r_len = r_cos * r_cos + r_sin * r_sin
# Don't use exponents here. Multiplication is generally more stable.
assert math.isclose(r_len, 1, rel_tol=1e-18)
t_cos, t_sin = cos(radians(angle)), sin(radians(angle))
t_len = t_cos * t_cos + t_sin * t_sin
assert fabs(1 - r_len) <= fabs(1 - t_len)
@given(angle=angles(), n=st.integers(min_value=0, max_value=100_000))
def test_trig_invariance(angle: float, n: int):
"""Test that cos(θ), sin(θ) ≃ cos(θ + n*360°), sin(θ + n*360°)"""
r_cos, r_sin = Vector._trig(angle)
n_cos, n_sin = Vector._trig(angle + 360 * n)
note(f"δcos: {r_cos - n_cos}")
assert isclose(r_cos, n_cos, rel_to=[n / 1e9])
note(f"δsin: {r_sin - n_sin}")
assert isclose(r_sin, n_sin, rel_to=[n / 1e9])
@given(v=vectors(), angle=angles(), n=st.integers(min_value=0, max_value=100_000))
def test_rotation_invariance(v: Vector, angle: float, n: int):
"""Check that rotating by angle and angle + n×360° have the same result."""
rot_once = v.rotate(angle)
rot_many = v.rotate(angle + 360 * n)
note(f"δ: {(rot_once - rot_many).length}")
assert rot_once.isclose(rot_many, rel_tol=n / 1e9)
@given(initial=vectors(), angle=angles())
def test_rotation_angle(initial, angle):
"""initial.angle( initial.rotate(angle) ) == angle"""
assume(initial.length > 1e-5)
assert angle_isclose(initial.angle(initial.rotate(angle)), angle)
@given(angle=angles(), loops=st.integers(min_value=0, max_value=500))
def test_rotation_stability(angle, loops):
"""Rotating loops times by angle is equivalent to rotating by loops*angle."""
initial = Vector(1, 0)
fellswoop = initial.rotate(angle * loops)
note(f"One Fell Swoop: {fellswoop}")
stepwise = initial
for _ in range(loops):
stepwise = stepwise.rotate(angle)
note(f"Step-wise: {stepwise}")
assert fellswoop.isclose(stepwise, rel_tol=1e-8)
assert math.isclose(fellswoop.length, initial.length, rel_tol=1e-15)
@given(initial=vectors(), angles=st.lists(angles()))
def test_rotation_stability2(initial, angles):
"""Rotating by a sequence of angles is equivalent to rotating by the total."""
total_angle = sum(angles)
fellswoop = initial.rotate(total_angle)
note(f"One Fell Swoop: {fellswoop}")
stepwise = initial
for angle in angles:
stepwise = stepwise.rotate(angle)
note(f"Step-wise: {stepwise}")
# Increase the tolerance on this comparison,
# as stepwise rotations induce rounding errors
assert fellswoop.isclose(stepwise, rel_tol=1e-6)
assert math.isclose(fellswoop.length, initial.length, rel_tol=1e-15)
@given(x=vectors(), y=vectors(), scalar=floats(), angle=angles())
# In this example:
# * x * l == -y
# * Rotation must not be an multiple of 90deg
# * Must be sufficiently large
@example(x=Vector(1e10, 1e10), y=Vector(1e19, 1e19), scalar=-1e9, angle=45)
def test_rotation_linearity(x, y, scalar, angle):
"""(l*x + y).rotate is equivalent to l*x.rotate + y.rotate"""
inner = (scalar * x + y).rotate(angle)
outer = scalar * x.rotate(angle) + y.rotate(angle)
note(f"scalar * x + y: {scalar * x + y}")
note(f"scalar * x.rotate(): {scalar * x.rotate(angle)}")
note(f"y.rotate(): {y.rotate(angle)}")
note(f"Inner: {inner}")
note(f"Outer: {outer}")
assert inner.isclose(outer, rel_to=[x, scalar * x, y])
| StarcoderdataPython |
5080254 | <filename>lib/dramatis/runtime/actor/actor.py<gh_stars>1-10
from __future__ import with_statement
from logging import warning
from threading import Lock
from threading import currentThread
from sys import exc_info
from traceback import print_exc
import dramatis
import dramatis.runtime
class Actor(object):
def __init__(self,behavior = None):
self._call_threading_enabled = False
self._call_thread = None
self._behavior = behavior
self._gate = dramatis.runtime.Gate()
self._interface = dramatis.Actor.Interface(self)
if not behavior:
self._gate.refuse("object")
else:
if isinstance(behavior,dramatis.Actor.Behavior):
if behavior.actor.name:
raise dramatis.error.Bind( "behavior already bound" )
oi = self._interface
class actor_class ( behavior.__class__.__bases__[0] ):
@property
def actor( cls ):
return oi
behavior.__class__ = actor_class
self._gate.always( ( [ "object", "dramatis_exception" ] ), True )
self.block()
self._queue = []
self._mutex = Lock()
self._continuations = {}
dramatis.runtime.Scheduler.current.append( self )
if hasattr(behavior,"dramatis_bound"):
behavior.dramatis_bound()
def become(self, behavior):
if behavior == self._behavior: return
if isinstance( behavior, dramatis.Actor.Behavior ):
if behavior.actor.name:
raise dramatis.error.Bind( "cannot become bound behavior" )
new_oi = self._interface
class actor_class ( behavior.__class__.__bases__[0] ):
@property
def actor( cls ):
return new_oi
behavior.__class__ = actor_class
if isinstance( self._behavior, dramatis.Actor.Behavior ):
old_oi = dramatis.Actor.Interface( None )
class actor_class ( self._behavior.__class__.__bases__[0] ):
@property
def actor( cls ):
return old_oi
self._behavior.__class__ = actor_class
self._behavior = behavior
if hasattr(behavior,"dramatis_bound"):
behavior.dramatis_bound()
self.schedule()
@property
def name(self):
if( not hasattr(self,"_name") ):
self._name = dramatis.Actor.Name( self )
return self._name
@property
def runnable(self):
# warning( "runnable? " + str(self) + " " + self.state )
return self.state == "runnable"
behavior = property( lambda(self): self._behavior )
def _set_call_threading_enabled( self, v ):
self._call_threading_enabled = v
call_threading_enabled = property(
lambda(self): self._call_threading_enabled,
lambda(self,v): self._set_call_threading_enabled(v) )
def make_runnable(self):
# warning( "make_runnable " + str(self) + " " )
self.state = "runnable"
def is_blocked(self):
# warning( "blocked? " + str(self) + " " + self.state )
return self.state == "blocked"
def block(self):
# warning('block ' + str(self) + " ")
self.state = "blocked"
def current_call_thread(self,that):
return self._call_thread and self._call_thread == that
def actor_send( self, args, opts ):
return self.common_send( "actor", args, opts )
def object_send(self,name,args,kwds,opts):
t = None
args = (name,)+args
o = opts.get("continuation_send")
if o:
t = "continuation"
args = (o,)+args
else:
t = "object"
return self.common_send( t, args, opts )
def common_send(self,dest,args,opts):
# warning( "common send " + str(currentThread()) + " " + dest + " " + str(args) + " " + str(opts) )
task = dramatis.runtime.Task( self, dest, args, opts )
with self._mutex:
if ( not self.runnable and
( self._gate.accepts( *( ( task.dest, task.method ) + task.arguments ) ) or self.current_call_thread( task.call_thread ) ) ):
self.make_runnable()
dramatis.runtime.Scheduler.current.schedule( task )
else:
self._queue.append(task)
v = task.queued()
# warning( "returning " + str(v) )
return v
def deliver( self, dest, args, continuation, call_thread ):
old_call_thread = self._call_thread
old_behavior = self._behavior
try:
self._call_thread = call_thread
method = args[0]
args = args[1:]
result = None
# warning( "deliver " + dest + " " + method + " " + str(args) + " " + str(self._behavior) )
if ( dest == "actor" ):
result = self.__getattribute__(method).__call__( *args )
elif ( dest == "object" ):
# warning( "before call " + str(self._behavior) + " " + str( self._behavior.__getattribute__(method) ) )
v = self._behavior.__getattribute__(method).__call__( *args )
if v is self._behavior:
v = self.name
result = v
# warning( "after call " + str(self._behavior) )
elif ( dest == "continuation" ):
continuation_name = method
c = self._continuations[continuation_name]
if not c: raise "hell 0 #{Thread.current}"
method = args[0]
args = args[1:]
if( method == "result" ):
method = "continuation_result"
elif( method == "exception" ):
method = "continuation_exception"
else: raise "hell *"
if c.__getattribute__(method).__call__(*args):
old_behavior = None
del self._continuations[ continuation_name ]
else: raise "hell 1: " + str(self._dest)
continuation.result( result )
except Exception, exception:
try:
# warning( "trying to except " + repr(exception) )
# print_exc()
dramatis.error.traceback( exception ).set( exc_info()[2] )
continuation.exception( exception )
except Exception, e:
# warning( "double exception fault: " + repr(e) )
# print_exc()
raise e
finally:
self._call_thread = old_call_thread
# warning( "final schedule " + str( self._behavior ) )
if old_behavior is self._behavior:
self.schedule()
# warning( "after final schedule " + str( self._behavior ) )
def object_initialize( self, *args ):
self._gate.accept( "object" )
self._behavior.__init__( *args )
def actor_yield(self): pass
def bind( self, behavior ):
if self._behavior: raise dramatis.error.Bind()
self._behavior = behavior
self._gate.accept( "object" )
self.schedule()
return self.name
def exception( self, exception ):
try:
self._behavior.dramatis_exception( exception )
except AttributeError:
dramatis.Runtime.current.exception( exception )
return self
def deadlock( self, exception ):
tasks = []
with self._mutex:
tasks = list(self._queue)
self._queue[:] = []
for task in tasks:
try:
task.exception( exception )
except Exception, e:
raise e
def register_continuation( self, c ):
self._continuations[str(c)] = c
def schedule( self, continuation = None ):
with self._mutex:
task = None
index = 0
while task == None and index < len(self._queue):
candidate = self._queue[index]
if( self._gate.accepts( *( ( candidate.dest, candidate.method ) + candidate.arguments ) ) or
self.current_call_thread( candidate.call_thread ) ):
task = candidate
self._queue.pop(index)
index += 1
if( task ):
# warning( "schedule next " + str( task ) )
dramatis.runtime.Scheduler.current.schedule( task )
else:
# warning( "schedule block " + str(self) )
self.block()
# warning( "schedule blocked " + str(self) )
| StarcoderdataPython |
3285495 | <filename>project/aat/migrations/0003_auto_20170914_1537.py
# -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-14 15:37
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('aat', '0002_recognizerpretraineddata'),
]
operations = [
migrations.AddField(
model_name='recognizerpretraineddata',
name='csv_path',
field=models.CharField(blank=True, max_length=150),
),
migrations.AddField(
model_name='recognizerpretraineddata',
name='faces_path',
field=models.CharField(blank=True, max_length=150),
),
]
| StarcoderdataPython |
8056405 | <gh_stars>0
'''
@Author: ConghaoWong
@Date: 2019-12-20 09:38:24
LastEditors: <NAME>
LastEditTime: 2020-09-16 16:31:38
@Description: main of Erina
'''
import argparse
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # 去除TF输出
import time
import numpy as np
import tensorflow as tf
from matplotlib.axes._axes import _log as matplotlib_axes_logger
from helpmethods import dir_check
from models import BGM, Linear
from PrepareTrainData import DataManager
matplotlib_axes_logger.setLevel('ERROR') # 画图警告
os.environ["KMP_DUPLICATE_LIB_OK"]="TRUE" # kNN问题
TIME = time.strftime('%Y%m%d-%H%M%S',time.localtime(time.time()))
def get_parser():
parser = argparse.ArgumentParser(description='linear')
# environment settrings and test options
parser.add_argument('--gpu', type=int, default=2)
parser.add_argument('--load', type=str, default='null')
parser.add_argument('--draw_results', type=int, default=False)
parser.add_argument('--save_base_dir', type=str, default='./logs')
parser.add_argument('--log_dir', type=str, default='null')
parser.add_argument('--sr_enable', type=int, default=False)
# model basic settings
parser.add_argument('--obs_frames', type=int, default=8)
parser.add_argument('--pred_frames', type=int, default=12)
parser.add_argument('--test_set', type=int, default=2)
parser.add_argument('--save_best', type=int, default=True)
# training data settings
parser.add_argument('--train_type', type=str, default='all')
# 'one': 使用一个数据集按照分割训练集训练
# 'all': 使用除测试外的所有数据集训练
parser.add_argument('--train_base', type=str, default='agent')
# parser.add_argument('--frame', type=str, default='01234567')
parser.add_argument('--train_percent', type=float, default=[0.0], nargs='+') # 用于训练数据的百分比, 0表示全部
parser.add_argument('--step', type=int, default=4) # 数据集滑动窗步长
parser.add_argument('--reverse', type=int, default=False) # 按时间轴翻转训练数据
parser.add_argument('--add_noise', type=int, default=False) # 训练数据添加噪声
parser.add_argument('--rotate', type=int, default=False) # 旋转训练数据(起始点保持不变)
parser.add_argument('--normalization', type=int, default=False)
# test settings when training
parser.add_argument('--test', type=int, default=True)
parser.add_argument('--start_test_percent', type=float, default=0.0)
parser.add_argument('--test_step', type=int, default=3) # 训练时每test_step个epoch,test一次
# training settings
parser.add_argument('--epochs', type=int, default=500)
parser.add_argument('--batch_size', type=int, default=500)
parser.add_argument('--dropout', type=float, default=0.5)
parser.add_argument('--lr', type=float, default=1e-3)
# save/load settings
parser.add_argument('--model_name', type=str, default='model')
parser.add_argument('--save_model', type=int, default=True)
parser.add_argument('--save_per_step', type=bool, default=True)
# Linear args
parser.add_argument('--diff_weights', type=float, default=0.95)
# BGM args
parser.add_argument('--model', type=str, default='bgm')
# Social args
# parser.add_argument('--max_neighbor', type=int, default=6)
parser.add_argument('--init_position', type=float, default=20)
# parser.add_argument('--future_interaction', type=int, default=True)
parser.add_argument('--calculate_social', type=int, default=False)
# SR args
parser.add_argument('--grid_shape_x', type=int, default=700)
parser.add_argument('--grid_shape_y', type=int, default=700)
parser.add_argument('--grid_length', type=float, default=0.1) # 网格的真实长度
parser.add_argument('--avoid_size', type=int, default=15) # 主动避让的半径网格尺寸
parser.add_argument('--interest_size', type=int, default=20) # 原本感兴趣的预测区域
# parser.add_argument('--social_size', type=int, default=1) # 互不侵犯的半径网格尺寸
parser.add_argument('--max_refine', type=float, default=0.8) # 最大修正尺寸
# Guidance Map args
parser.add_argument('--gridmapsize', type=int, default=32)
return parser
def gpu_config(args):
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu)
gpus = tf.config.experimental.list_physical_devices(device_type='GPU')
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
def load_args(save_args_path, current_args):
save_args = np.load(current_args.load+'args.npy', allow_pickle=True).item()
save_args.gpu = current_args.gpu
save_args.load = current_args.load
save_args.draw_results = current_args.draw_results
save_args.sr_enable = current_args.sr_enable
return save_args
def main():
args = get_parser().parse_args()
# args.frame = [int(i) for i in args.frame]
gpu_config(args)
if args.load == 'null':
inputs = DataManager(args).train_info
else:
inputs = 0
args = load_args(args.load+'args.npy', args)
if args.log_dir == 'null':
log_dir_current = TIME + args.model_name + args.model + str(args.test_set)
args.log_dir = os.path.join(dir_check(args.save_base_dir), log_dir_current)
else:
args.log_dir = dir_check(args.log_dir)
if args.model == 'bgm':
model = BGM
elif args.model == 'linear':
model = Linear
model(train_info=inputs, args=args).run_commands()
if __name__ == "__main__":
main()
| StarcoderdataPython |
11301418 | <reponame>executablebooks/sphinx-jupyterbook-latex
import sys
from typing import cast
from sphinx.application import Sphinx
from sphinx.builders.latex import LaTeXBuilder
from sphinx.config import Config
from sphinx.util import logging
from sphinx.util.fileutil import copy_asset_file
from . import __version__, theme
from .transforms import LatexRootDocPostTransforms, MystNbPostTransform
if sys.version_info < (3, 9):
import importlib_resources as resources
else:
import importlib.resources as resources
logger = logging.getLogger(__name__)
def override_latex_config(app: Sphinx, config: Config) -> None:
"""This ``config-inited`` event overrides aspects of the sphinx latex config.
- ``latex_engine`` -> ``xelatex``
- ``latex_theme`` -> ``jupyterBook``
- appends necessary LaTeX commands to the preamble
"""
# only allow latex builder to access rest of the features
config["latex_engine"] = "xelatex"
config["latex_theme"] = "jupyterBook"
config["numfig"] = True
latex_elements = cast(dict, config["latex_elements"])
# preamble to overwrite things from sphinx latex writer
config_preamble = (
latex_elements["preamble"] if "preamble" in config["latex_elements"] else ""
)
latex_elements["preamble"] = (
config_preamble
+ r"""
% Start of preamble defined in sphinx-jupyterbook-latex %
\usepackage[Latin,Greek]{ucharclasses}
\usepackage{unicode-math}
% fixing title of the toc
\addto\captionsenglish{\renewcommand{\contentsname}{Contents}}
\hypersetup{
pdfencoding=auto,
psdextra
}
% End of preamble defined in sphinx-jupyterbook-latex %
"""
)
# at the moment, True means list for this config
if (type(config["jblatex_show_tocs"]) is bool) and config["jblatex_show_tocs"]: # type: ignore[comparison-overlap] # noqa: E501
config["jblatex_show_tocs"] = "list"
def setup_latex_transforms(app: Sphinx) -> None:
"""This ``builder-inited`` event sets up aspects of the extension,
reserved only for when a LaTeX builder is specified.
"""
if not isinstance(app.builder, LaTeXBuilder):
return
# note: bold is a dynamically created function
from sphinx.util.console import bold # type: ignore[attr-defined]
# decide whether we will convert top-level toctree captions to parts
app.env.jblatex_captions_to_parts = False # type: ignore[attr-defined]
app.env.img_converter_ext = False # type: ignore[attr-defined]
if app.config["jblatex_captions_to_parts"] is True: # type: ignore[comparison-overlap]
app.config["latex_toplevel_sectioning"] = "part"
app.config["numfig_secnum_depth"] = 2 # equation number with chapter numbers
app.env.jblatex_captions_to_parts = True
elif app.config["jblatex_captions_to_parts"] is None:
# if using the sphinx-external-toc, we can look if parts are being specified
# TODO this should probably be made more robust
sitemap = getattr(app.config, "external_site_map", None)
if sitemap is not None:
if sitemap.file_format == "jb-book" and len(sitemap.root.subtrees) > 1:
app.config["latex_toplevel_sectioning"] = "part"
app.config[
"numfig_secnum_depth"
] = 2 # equation number with chapter numbers
app.env.jblatex_captions_to_parts = True
elif sitemap.file_format == "jb-book":
app.config["latex_toplevel_sectioning"] = "chapter"
elif sitemap.file_format == "jb-article":
app.config["latex_toplevel_sectioning"] = "section"
# Copy the class theme to the output directory.
# note: importlib.resources is the formal method to access files within packages
with resources.as_file(resources.files(theme).joinpath("jupyterBook.cls")) as path:
copy_asset_file(str(path), app.outdir)
# only load when myst-nb is present
if MystNbPostTransform.check_dependency():
app.add_post_transform(MystNbPostTransform)
if app.config["jblatex_load_imgconverter"]:
app.setup_extension("sphinx.ext.imgconverter")
app.env.img_converter_ext = "sphinx.ext.imgconverter" # type: ignore[attr-defined]
logger.info(
bold("sphinx-jupyterbook-latex v%s:")
+ "engine='%s', toplevel_section='%s', imgconverter='%s', show_tocs='%s'",
__version__,
app.config["latex_engine"],
app.config["latex_toplevel_sectioning"],
app.env.img_converter_ext, # type: ignore[attr-defined]
app.config["jblatex_show_tocs"],
)
app.add_post_transform(LatexRootDocPostTransforms)
| StarcoderdataPython |
6669945 | <reponame>Bhclira/NExT
# Faça um programa que imprima a soma de todos os números pares entre
# dois números informados pelo usuário.
num1 = int(input('\nDigite o Primeiro Número: '))
num2 = int(input('Digite o Segundo Número: '))
soma = 0
for i in range (num1, num2):
if i%2==0:
print(f'{i}', end=' -> ')
soma = soma + i
print(f'\nA soma dos Números Pares: {soma}') | StarcoderdataPython |
3389668 | from editor.constants import *
class ActionManager:
def __init__(self, actns_count):
self.undo_list = []
self.redo_list = []
self.max_actions_count = actns_count
def undo(self):
pass
def redo(self):
pass
| StarcoderdataPython |
1940005 | from pathlib import Path
from fhir.resources.valueset import ValueSet as _ValueSet
from oops_fhir.utils import ValueSet
from oops_fhir.r4.code_system.medication_knowledge_characteristic_codes import (
medicationKnowledgeCharacteristicCodes as medicationKnowledgeCharacteristicCodes_,
)
__all__ = ["medicationKnowledgeCharacteristicCodes"]
_resource = _ValueSet.parse_file(Path(__file__).with_suffix(".json"))
class medicationKnowledgeCharacteristicCodes(medicationKnowledgeCharacteristicCodes_):
"""
Medication knowledge characteristic codes
MedicationKnowledge Characteristic Codes
Status: draft - Version: 4.0.1
http://hl7.org/fhir/ValueSet/medicationknowledge-characteristic
"""
class Meta:
resource = _resource
| StarcoderdataPython |
3356135 | <gh_stars>0
from pandapower.plotting.generic_geodata import *
from pandapower.plotting.collections import *
from pandapower.plotting.colormaps import * | StarcoderdataPython |
3470682 | <filename>TrainingCNN.py
# -*- coding: utf-8 -*-
"""
Created on Sun Mar 21 18:43:20 2021
@author: <NAME>
"""
############################################################################################
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras import regularizers
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense, Flatten, Conv2D
from tensorflow.keras.callbacks import ModelCheckpoint
############################################################################################
def get_checkpoint_best_only():
checkpoints_best_only_path = 'checkpoints_best_only/checkpoint'
checkpoints_best_only = ModelCheckpoint(filepath=checkpoints_best_only_path,
save_weights_only=True,
save_freq='epoch',
monitor='val_mean_squared_error',
save_best_only=True,
verbose=1)
return checkpoints_best_only
def get_checkpoint_every_epoch():
checkpoints_every_epoch_path = 'checkpoints_every_epoch/checkpoint_{epoch:03d}'
checkpoints_every_epoch = ModelCheckpoint(filepath=checkpoints_every_epoch_path,
frequency='epoch',
save_weights_only=True,
verbose=1)
return checkpoints_every_epoch
############################################################################################
#Import training data
with open('BinSL_TRAINextract.npy', 'rb') as f:
TRAIN_ILDIPD_FeatureCON = np.load(f)
TRAIN_ILDIPD_LabelCON = np.load(f)
print('Loading training data has done!')
print('Total samples: ' + str(TRAIN_ILDIPD_FeatureCON.shape))
print('Total labels: ' + str(TRAIN_ILDIPD_LabelCON.shape))
#Split into training (75%) and validation (25%) set by sklearn train_test_split()
train_images, x_v, train_labels, y_v = train_test_split(TRAIN_ILDIPD_FeatureCON,TRAIN_ILDIPD_LabelCON,test_size = 0.25,train_size =0.75)
print('Total training samples: ' + str(train_images.shape))
print('Total validation samples: ' + str(x_v.shape))
print('TensorFlow version: ' + tf.__version__)
############################################################################################
# Build the Sequential convolutional neural network model
model = Sequential([
Conv2D(32, (5,5), kernel_initializer=tf.keras.initializers.he_uniform(), kernel_regularizer=regularizers.l2(0.001), activation='relu',strides=3, input_shape=(321,50,2)),
tf.keras.layers.BatchNormalization(),
Conv2D(64, (3,3), kernel_regularizer=regularizers.l2(0.001), activation='relu',strides=2),
tf.keras.layers.BatchNormalization(),
Conv2D(96, (3,3), kernel_regularizer=regularizers.l2(0.001), activation='relu',strides=2),
tf.keras.layers.BatchNormalization(),
Conv2D(128, (3,3), kernel_regularizer=regularizers.l2(0.001), activation='relu',strides=2),
tf.keras.layers.BatchNormalization(),
Flatten(),
Dense(1024,activation='relu'),
Dropout(0.3),
Dense(512,activation='relu'),
Dense(256,activation='relu'),
Dense(1),
])
model.summary()
opt = tf.keras.optimizers.Adam(learning_rate=0.001)
mse = tf.keras.metrics.MeanSquaredError()
model.compile(optimizer=opt,
loss='mean_squared_error',
metrics=[mse]
)
print(model.loss)
print(model.optimizer)
print(model.metrics)
print(model.optimizer.lr)
callbacks = [get_checkpoint_best_only(),get_checkpoint_every_epoch()]
# Fitting
history = model.fit(train_images, train_labels, epochs=500, validation_data=(x_v, y_v), batch_size=64, callbacks=callbacks) | StarcoderdataPython |
3276117 | from watchdog.events import FileSystemEvent, FileCreatedEvent, FileDeletedEvent, FileModifiedEvent
from os.path import basename
from os import stat
import logging
import requests
from hashlib import sha256
class FileEventHandler(object):
""" EventHandler class for watchdog.
Contains method for interacting with the server.
Gets event and fires of client.
"""
def __init__(self, baseurl, chunk_size, folder):
self.baseUrl = baseurl
self.chunkSize = chunk_size
self.folder = folder
# Entrypoint
def dispatch(self, event: FileSystemEvent):
if isinstance(event, FileCreatedEvent) or isinstance(event, FileModifiedEvent):
file = basename(event.src_path)
if self.remote_exists(file):
local_cs = self.get_local_checksum(file)
remote_cs = self.get_remote_checksum(file)
changed_blocks = self.compare(local_cs, remote_cs)
if changed_blocks:
logging.info("File changed - doing incremental change")
self.incremental_send(file, changed_blocks)
self.truncate(file, stat(self.folder + file).st_size)
else:
logging.info("Sending new file")
self.send_file(file)
elif isinstance(event, FileDeletedEvent):
logging.info("File deleted")
self.delete_file(basename(event.src_path))
else:
logging.debug(f"Other event: {event}")
def compare(self, local_cs, remote_cs):
# import pudb; pu.db
logging.debug(f'Local checksums: {local_cs}')
logging.debug(f'Remote checksums: {remote_cs}')
if local_cs['checksum'] == remote_cs['checksum']:
# zero changed blocks, they are identical
return []
else:
remote_blocks = len(remote_cs['chunks'])
logging.debug(f'Remote blocks for compare {remote_blocks}')
changed_blocks = []
for idx, chunk in enumerate(local_cs['chunks']):
logging.debug(f"Comparing block {idx}")
if idx >= remote_blocks:
logging.debug("No block on remote side to compare with.")
changed_blocks.append(idx)
elif chunk != remote_cs['chunks'][idx]:
changed_blocks.append(idx)
return changed_blocks
def remote_exists(self, filename):
url = self.baseUrl + 'checksum/' + basename(filename)
r = requests.get(url)
if r.ok:
return True
else:
return False
def truncate(self, filename, lenght):
filename = filename
print(f"Truncating {filename} to {lenght}")
url = self.baseUrl + 'truncate/'
requests.post(url, json={'filename': filename, 'lenght': lenght})
def get_local_checksum(self, filename):
cs_whole = sha256()
chunk_checksums = []
with open(self.folder + filename, 'rb') as fh:
for chunk in read_in_chunks(fh, chunk_size=self.chunkSize):
cs_whole.update(chunk)
cs_chunk = sha256()
cs_chunk.update(chunk)
chunk_checksums.append(cs_chunk.hexdigest())
return {"checksum": cs_whole.hexdigest(),
"chunks": chunk_checksums}
def get_remote_checksum(self, filename):
""" fetch the remote checksum so we can compare """
url = self.baseUrl + 'checksum/' + basename(filename)
r = requests.get(url)
return r.json()
def send_block(self, filename: str, block: int, content: bytes):
filename = basename(filename)
url = self.baseUrl + 'upload_chunk/' + filename + '/' + str(block)
r = requests.post(url, data=content)
logging.debug(f'Sending block {block} on file {filename} with len {len(content)}: {r.text}')
def incremental_send(self, filename: str, blocks: list):
local_file = self.folder + filename
logging.info(f"Doing incremental change on {local_file}")
logging.debug(f"Changed blocks: {blocks}")
with open(local_file, 'rb') as fh:
for block in blocks:
fh.seek(block * self.chunkSize)
content = fh.read(self.chunkSize)
self.send_block(filename, block, content)
return True
def send_file(self, file: str):
url = self.baseUrl + 'upload/'
with open(self.folder + file, 'rb') as fh:
files = {'file': fh}
r = requests.post(url, files=files)
logging.info(f"Sending file {file}: {r.text}")
def delete_file(self, file: str):
url = self.baseUrl + 'delete/'
r = requests.post(url, json={'filename' : file})
logging.info(f"Deleteing file {file}: {r.text}")
def read_in_chunks(file_object, chunk_size=8192):
"""Generator to read a file piece by piece.
Default chunk size: 8k."""
while True:
data = file_object.read(chunk_size)
if not data:
break
yield data
| StarcoderdataPython |
11232845 | <gh_stars>10-100
from __future__ import print_function
import math
import torch.nn as nn
import numpy as np
import pdb
class Loss(object):
def __init__(self, name, criterion):
self.name = name
self.criterion = criterion
if not issubclass(type(self.criterion), nn.modules.loss._Loss):
raise ValueError("Criterion has to be a subclass of torch.nn._Loss")
self.acc_loss = 0
self.norm_term = 0
def reset(self):
self.acc_loss = 0
self.norm_term = 0
def get_loss(self):
raise NotImplementedError
def eval_batch(self, outputs, target):
raise NotImplementedError
def cuda(self):
self.criterion.cuda()
def backward(self):
if type(self.acc_loss) is int:
raise ValueError("No loss to back propagate.")
self.acc_loss.backward()
class NLLLoss(Loss):
_NAME = "Avg NLLLoss"
def __init__(self, weight=None, mask=None, size_average=True):
self.mask = mask
self.size_average = size_average
if mask is not None:
if weight is None:
raise ValueError("Must provide weight with a mask.")
weight[mask] = 0
#weight = weight.cuda()
super(NLLLoss, self).__init__(
self._NAME,
nn.NLLLoss(weight=weight, size_average=size_average))
def get_loss(self):
if isinstance(self.acc_loss, int):
return 0
loss = self.acc_loss.item()#.data[0]
if self.size_average:
loss /= self.norm_term
return loss
def eval_batch(self, outputs, target):
#print (outputs.size(), target.size())
self.acc_loss += self.criterion(outputs, target)
self.norm_term += 1
| StarcoderdataPython |
8180915 | <filename>perfrunner/helpers/sync.py<gh_stars>10-100
import threading
class SyncHotWorkload:
def __init__(self, current_hot_load_start, timer_elapse):
self.timer = None
self.current_hot_load_start = current_hot_load_start
self.timer_elapse = timer_elapse
def start_timer(self, ws):
self.timer_elapse.value = 1
if ws.working_set_move_time:
self.timer = threading.Timer(ws.working_set_move_time, self.start_timer, args=[ws])
self.timer.start()
def stop_timer(self):
if self.timer:
self.timer.cancel()
self.timer = None
| StarcoderdataPython |
5187599 | <filename>src/xsd_frontend/base.py
from django.views.generic.base import View
from django.views.generic.list import ListView
from django.shortcuts import redirect
from .models import UpdateRequest
from .forms import UpdateRequestReply
class BaseUpdateRequestList(ListView):
model=UpdateRequest
template_name="" # Must be set by child views
area="" # Again set by child view
form_action="" # Where to send the form
custom_include=""
context_object_name="update_requests" # This is consistent
def get_queryset(self):
queryset=super(BaseUpdateRequestList, self).get_queryset()
queryset=queryset.filter(area=self.area).order_by('-sent').prefetch_related('request_made_by')
return queryset
def get_context_data(self, **kwargs):
context = super(BaseUpdateRequestList, self).get_context_data(**kwargs)
context['response_form'] = UpdateRequestReply()
context['form_action']=self.form_action
context['update_request_custom']=self.custom_include
return context
class BaseUpdateRequestRespond(View):
success_url="" # Where to return, the UpdateRequestList is a good bet
def post(self,request, *args, **kwargs):
ur_pk=int(request.POST['pk'])
ur=UpdateRequest.objects.get(pk=ur_pk)
ur.response_body=request.POST['response_body']
if 'completed' in request.POST: ur.completed=request.POST['completed']
if ur.completed and 'completed' not in request.POST: ur.completed=False
ur.save()
return_url=self.success_url+"#ur"+str(ur_pk)
return redirect(return_url)
| StarcoderdataPython |
178870 | <filename>main.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jun 4 16:25:33 2021
@author: jay
"""
import argparse
from utils.config import process_config
from agents import *
from sklearn.model_selection import train_test_split
import h5py
import numpy as np
import torch
def main():
# parse the path of the json config file
arg_parser = argparse.ArgumentParser(description="")
arg_parser.add_argument(
'config',
metavar='config_json_file',
default='None',
help='The Configuration file in json format')
args = arg_parser.parse_args()
# parse the config json file
config = process_config(args.config)
# Create the Agent and pass all the configuration to it then run it..
agent_class = globals()[config.agent]
with h5py.File(config.data_root,'r') as hdf: #Read hdf5 file and converts into a numpy aray
ls=list(hdf.keys())
print('Dataset List: \n', ls)
X = np.array(hdf.get('extracted_x'))#extrated_x
y = np.array(hdf.get('target_y'))#target_y
X = torch.from_numpy(X)
y = torch.from_numpy(y)
#X = X.cuda()
#y = y.cuda()
config.X_train,config.X_test,config.y_train,config.y_test = train_test_split(X,y,test_size=0.090909,shuffle=False)
config.X_train, config.X_val, config.y_train, config.y_val = train_test_split(config.X_train, config.y_train, test_size=0.1, shuffle = False)
#y_train = y_train.type(torch.LongTensor)
#y_test = y_test.type(torch.LongTensor)
#y_val = y_val.type(torch.LongTensor)
config.num_rows, config.num_cols = config.X_train.shape
config.num_rows_2, config.num_cols_2 = config.X_test.shape
config.num_rows_3, config.num_cols_3 = config.X_val.shape
agent = agent_class(config)
agent.run()
agent.finalize()
if __name__ == '__main__':
main()
| StarcoderdataPython |
260524 | <reponame>jldantas/leet<filename>leet/backends/cb.py
# -*- coding: utf-8 -*-
"""Implements the Carbon Black Response, using Live Response backend.
This module contains the three necessary classes to implement the CB backend:
- CBMachine -> Represents a machine to CB
- CBSession -> Represents a LR session
- Backend -> The main entry point for the backend
"""
import logging
import datetime
from cbapi.response import CbResponseAPI, Sensor
import cbapi.errors
from ..base import LeetBackend, LeetMachine, LeetSOType, LeetSession, LeetFileAttributes
from ..errors import LeetSessionError, LeetCommandError
_MOD_LOGGER = logging.getLogger(__name__)
class CBMachine(LeetMachine):
"""A LeetMachine implementation for the CB Backend.
Attributes:
sensor (cbapi.response.Sensor): A sensor as seen by the CB API.
can_connect (bool): If the machine is available to be connected.
"""
def __init__(self, hostname, backend_name, sensor):
"""Creates a new CBMachine object.
Args:
hostname (str): The hostname of the machine
backend_name (str): The unique name for the backend
sensor (cbapi.response.Sensor): The sensor object that represents
a machine in CB
"""
super().__init__(hostname, backend_name)
self.sensor = sensor
if self.sensor.os_type == 1:
self.so_type = LeetSOType.WINDOWS
@property
def can_connect(self):
"""If the machine is available to be connected."""
return True if self.sensor.status == "Online" else False
def refresh(self):
"""See base class documentation"""
self.sensor.refresh()
def connect(self):
"""See base class documentation"""
try:
return CBSession(self.sensor.lr_session(), self)
except cbapi.errors.TimeoutError as e:
raise LeetSessionError("Timed out when requesting a session to cbapi") from e
except cbapi.errors.ObjectNotFoundError as e:
raise LeetSessionError("Max limit of sessions opened") from e
#return CBSession(self.sensor.lr_session(), self)
class CBSession(LeetSession):
"""Represents a new session using the CB backend.
This basically wraps a live response session into a leet session, allowing
decoupling of the plugin and the backend. It handles all the necessary
code provide what is defined in the base class and makes sure any errors
raised are correctly coverted to the respective Leet errors.
"""
#TODO test what error is raised if session is interrupted in the middle
def __init__(self, lr_session, machine_info):
"""Returns a CBSession object.
Args:
lr_session (cbapi.live_response_api.LiveResponse): A live response
session
machine_info (CBMachine): A machine info object
"""
super().__init__(lr_session, machine_info)
self._mapping_table = {
"list_processes" : self.raw_session.list_processes,
"get_file" : self.raw_session.get_file,
"put_file" : self.raw_session.put_file,
"delete_file" : self.raw_session.delete_file,
"start_process" : self.raw_session.create_process,
"make_dir" : self.raw_session.create_directory,
"dir_list" : self.raw_session.list_directory
}
def start_process(self, cmd_string, cwd=None, background=False):
"""See base class documentation"""
return self._execute("start_process", cmd_string, not background, None, cwd, 600, not background)
def delete_file(self, remote_file_path):
"""See base class documentation"""
self._execute("delete_file", remote_file_path)
def put_file(self, fp, remote_file_path, overwrite=False):
"""See base class documentation"""
if self.exists(remote_file_path) and overwrite:
self._execute("delete_file", remote_file_path)
remote_path = self.path_separator.join(remote_file_path.split(self.path_separator)[:-1])
if not self.exists(remote_path):
self.make_dir(remote_path)
self._execute("put_file", fp, remote_file_path)
def make_dir(self, remote_path, recursive=True):
"""See base class documentation"""
path_parts = remote_path.split(self.path_separator)
#if the last split is empty, probably it was passed with trailling
#separator
if not path_parts[-1]:
path_parts = path_parts[:-1]
#This skips the root of the path
check = []
necessary_create = False
check.append(path_parts.pop(0))
if recursive:
for i, part in enumerate(path_parts):
check.append(part)
if not self.exists(self.path_separator.join(check)):
#the moment we can't find a path, we need to create everything
#from there forward
necessary_create = True
break
if necessary_create:
check.pop(-1)
for missing_path in path_parts[i:]:
check.append(missing_path)
path = self.path_separator.join(check)
_MOD_LOGGER.debug("Trying to create path '%s' on the remote host", path)
self._execute("make_dir", path)
else:
_MOD_LOGGER.debug("No path need to be created.")
else:
self._execute("make_dir", remote_path)
def exists(self, remote_file_path):
"""See base class documentation"""
if remote_file_path[-1] == self.path_separator:
idx = -2
else:
idx = -1
split_path = remote_file_path.split(self.path_separator)
#passing a root path (c:, d:, /, etc) is a logic error and raises an
#exception
if len(split_path) == 1:
raise LeetCommandError("Can't verify existence of root paths.")
file_name = split_path[idx]
path = self.path_separator.join(split_path[:idx]) + self.path_separator
try:
list_dir = self._execute("dir_list", path)
#list_dir = self.raw_session.list_directory(path)
except LeetCommandError as e:
# except cbapi.live_response_api.LiveResponseError as e:
return False
return bool([a for a in list_dir if a["filename"] == file_name])
def get_file(self, remote_file_path):
"""See base class documentation"""
#TODO check if the file exist first?
return self._execute("get_file", remote_file_path)
def _execute(self, *args):
"""See base class documentation"""
#TODO should live response errors be mapped to plugin errors?
_MOD_LOGGER.debug("Executing on session: %s", args)
try:
if len(args) == 1:
return self._mapping_table[args[0]]()
else:
return self._mapping_table[args[0]](*args[1:])
#TODO it can also raise ApiError on 404 to server?
except cbapi.errors.TimeoutError as e:
raise LeetSessionError("Timed out when requesting a session to cbapi") from e
except cbapi.live_response_api.LiveResponseError as e:
raise LeetCommandError(str(e)) from e
#raise LeetPluginError(str(e)) from e
# except KeyError as e:
# raise LeetSessionError("Unknown function.", True) from e
def _parse_file_attributes(self, attributes):
attr = []
attr_list = set(attributes)
if "HIDDEN" in attr_list:
attr.append(LeetFileAttributes.HIDDEN)
if "DIRECTORY" in attr_list:
attr.append(LeetFileAttributes.DIRECTORY)
if "SYSTEM" in attr_list:
attr.append(LeetFileAttributes.SYSTEM)
return attr
def list_dir(self, remote_path):
"""See base class documentation"""
# Sample return of a CB dirlist
# {'last_access_time': 1458169329, 'last_write_time': 1458169329, 'filename': '$Recycle.Bin', 'create_time': 1247541536, 'attributes': ['HIDDEN', 'SYSTEM', 'DIRECTORY'], 'size': 0},
# {'last_access_time': 1515105722, 'last_write_time': 1515105722, 'filename': 'Boot', 'create_time': 1449789900, 'attributes': ['HIDDEN', 'SYSTEM', 'DIRECTORY'], 'size': 0},
# {'last_access_time': 1515105722, 'last_write_time': 1290309831, 'filename': 'bootmgr', 'create_time': 1449789900, 'attributes': ['READONLY', 'HIDDEN', 'SYSTEM', 'ARCHIVE'], 'size': 383786},
# {'last_access_time': 1247548136, 'last_write_time': 1247548136, 'filename': 'Documents and Settings', 'create_time': 1247548136, 'alt_name': 'DOCUME~1', 'attributes': ['HIDDEN', 'SYSTEM', 'DIRECTORY', 'REPARSE_POINT', 'NOT_CONTENT_INDEXED'], 'size': 0}
list_dir = []
cb_list_dir = self._execute("dir_list", remote_path)
if len(cb_list_dir) == 1 and "DIRECTORY" in cb_list_dir[0]["attributes"]:
cb_list_dir = self._execute("dir_list", remote_path + self.path_separator)
for entry in cb_list_dir:
data = {"name": entry["filename"],
"size": entry["size"],
"attributes": self._parse_file_attributes(entry["attributes"]),
"create_time": datetime.datetime.utcfromtimestamp(entry["create_time"]),
"modification_time": datetime.datetime.utcfromtimestamp(entry["last_write_time"]),
}
list_dir.append(data)
return list_dir
def list_processes(self):
"""See base class documentation"""
processes = []
process_list = self._execute("list_processes")
for process in process_list:
processes.append({"username": process["username"],
"pid": process["pid"],
"ppid": process["parent"],
"start_time": datetime.datetime.utcfromtimestamp(process["create_time"]),
"command_line": process["command_line"].split(self.path_separator)[-1],
"path": process["path"],
})
return processes
def __enter__(self):
"""Enter context"""
return self
def __exit__(self, exeception_type, exception_value, traceback):
"""Exit context"""
self.raw_session.close()
class Backend(LeetBackend):
"""Implements the CB backend communication.
This class starts the connection to the backend server and enables direct
interaction with it.
"""
def __init__(self, profile_name):
"""Returns a Backend object.
Args:
profile_name (str): The profile name that this class will connect,
as seen in the 'credentials.response' file.
"""
super().__init__("CB-" + profile_name, 7) #TODO move max_session to a configuration/variable
self._profile_name = profile_name
self._cb = None
@property
def url(self):
"""The Carbon Black server URL"""
return self._cb.url
def start(self):
"""Starts the internal thread (see base class documentation) and
start the connection to the CB server.
"""
super().start()
self._cb = CbResponseAPI(profile=self._profile_name)
return self
def _get_sensor(self, hostname):
"""Return the sensor related to the hostname. If more than one sensor
is found, it will return the one that did the most recent check-in.
Args:
hostname (str): The machine name
Returns:
[Sensor]: The list of sensors
"""
recent_sensor = None
query = "hostname:" + hostname
sensors = self._cb.select(Sensor).where(query)
for sensor in sensors:
if recent_sensor is None:
recent_sensor = sensor
else:
if sensor.last_checkin_time > recent_sensor.last_checkin_time:
recent_sensor = sensor
return recent_sensor
def _search_machines(self, search_request):
"""See base class documentation"""
machine_list = []
for hostname in search_request.hostnames:
sensor = self._get_sensor(hostname)
if sensor is not None:
machine_list.append(CBMachine(hostname, self.backend_name, sensor))
return machine_list
| StarcoderdataPython |
1907325 | # -*- coding: utf-8 -*-
from .sign import sign_content
__name__ = "uonet-request-signer"
__version__ = "1.0.0"
__all__ = ["sign_content"]
| StarcoderdataPython |
6691224 | <filename>tief/association/association_rule.py
from .apriori import support
import pandas as pd
import itertools
def confidence(_item, next_item):
"""
Return confidence value
_item: list of string, ex ['123'] or ['123', '124']
next_item: list of string, ex ['123'] or ['123', '124']
"""
join = _item + next_item
return support(join)/support(_item)
def has_duplicates(iterable):
l = list(itertools.chain(*iterable)) # in case iterable is an iterator
return len(set(l)) != len(l)
def permutations(iterable, r=None):
pool = tuple(iterable)
n = len(pool)
r = n if r is None else r
if r > n:
return
indices = list(range(n))
cycles = list(range(n, n-r, -1))
permu = [pool[i] for i in indices[:r]]
if not(has_duplicates(permu)):
yield permu
while n:
for i in reversed(range(r)):
cycles[i] -= 1
if cycles[i] == 0:
indices[i:] = indices[i+1:] + indices[i:i+1]
cycles[i] = n - i
else:
j = cycles[i]
indices[i], indices[-j] = indices[-j], indices[i]
permu = [pool[i] for i in indices[:r]]
if not(has_duplicates(permu)):
yield permu
break
else:
return
def getPermutations(iterable, r=None):
return list(permutations(iterable,r))
def association_rule(itemset = [], min_confidence = 0.5):
asso = getPermutations(itemset,2)
table = []
for item in asso:
conf = confidence(item[0], item[1])
if conf >= min_confidence:
table.append([str(item[0])+" --> "+str(item[1]),item[0], item[1], support(item[0]), support(item[1]), conf])
return pd.DataFrame(table, columns=['Notasi','Antecedent', 'Consequents', 'Antecedent Support', 'Consequents Support', 'Confidence'])
| StarcoderdataPython |
362028 | <filename>Type Trainer.py
import curses
from curses import wrapper, initscr, endwin
from time import sleep, time
from art import text2art
import locale
from math import log
from json import load
from os.path import isfile
from _thread import start_new_thread as nt
from copy import deepcopy
if not isfile(".chars"):
print ("There is no character file! Creating .char (cause the program will be slow af otherwhise)...")
with open(".chars", "w", encoding="utf8") as charf:
univ_chars = {}
charf.write("{")
for i in range(128):
charf.write(str(i) + ":\"\"\"" + text2art(chr(i), "universal").replace("\"", "\\\"") + "\"\"\",")
charf.write("}")
if not isfile(".conf"):
print ("There is no config! Creating config...")
with open(".conf", "w", encoding="utf8") as cnf:
cnf.write("0;0;0")
with open(".chars", "r", encoding="utf8") as charf:
chars = eval(charf.read())
with open("special characters", "r", encoding="utf8") as s:
sc = eval(s.read())
input("resize screen if needed")
locale.setlocale(locale.LC_ALL, '')
code = locale.getpreferredencoding()
with open(".save", "r", encoding="utf8") as s:
sv2 = s.read().split("\n")
sv = {
"words": [],
"amount": [],
"average":[],
"priority": []
}
for i in range(len(sv2)):
sv2[i]=sv2[i].split(";")
sv["words"].append(sv2[i][0])
sv["amount"].append(int(sv2[i][1]))
sv["average"].append(float(sv2[i][2]))
sv["priority"].append(int(sv2[i][3]))
with open(".conf", "r") as f:
r = f.read().split(";")
av_speed = float(r[0])
counter = int(r[1])
score = float(r[2])
def save_conf(av_speed, counter, score):
with open(".conf", "w") as s:
s.write(str(av_speed) + ";" + str(counter) + ";" + str(score))
def save_save(sv, diff):
for i in diff:
for j in diff[i]:
sv[i][j] = diff[i][j]
with open(".save", "w", encoding="utf8") as s:
sv["out"] = []
for i in range(len(sv["words"])):
sv["out"].append(";".join([sv["words"][i],str(sv["amount"][i]),str(sv["average"][i]),str(sv["priority"][i])]))
s.write("\n".join(sv["out"]))
def round2(num):
return int(num * 100) /100
def my_t2a(text, size, do_round=True):
if size == 0:
res = [[""] for i in range(12)]
for i in text:
tmp = chars[ord(i)].split("\n")
for j in range(len(tmp)):
res[j] += tmp[j]
return res
elif size == 1:
font = "bulbhead"
if isinstance(text, int):
text = str(text)
elif isinstance(text, float):
if do_round:
text = str(round2(text))
else:
text = str(text)
elif not isinstance(text, str):
raise TypeError
return text2art(text, font).split("\n")
def strt_scrn():
stdscr = initscr()
stdscr.keypad(True)
return stdscr
def give_word():
global counter
while 0 not in [counter % i for i in sv["priority"]]:
counter+=1
return [counter % i for i in sv["priority"]].index(0)
def create_header(av_speed_ART, spl_prev_ART, av_speed_word_ART, score_ART, amount, counter, past_priority, current_priority):
global stdscr
counter = str(counter)
amount = str(amount)
current_priority = str(current_priority)
past_priority = str(past_priority)
stdscr.addstr("esc - menu", curses.A_BLINK)
stdscr.addstr("\n\n")
for i in range(len(spl_prev_ART)):
stdscr.addstr(" ")
stdscr.addstr(av_speed_ART[i], curses.A_BLINK)
stdscr.addstr(" " * (32 - len(av_speed_ART[i])))
stdscr.addstr(av_speed_word_ART[i], curses.A_BLINK)
stdscr.addstr(" " * (32 - len(av_speed_word_ART[i])))
stdscr.addstr(spl_prev_ART[i], curses.A_BLINK)
stdscr.addstr(" " * (32 - len(spl_prev_ART[i])))
stdscr.addstr(score_ART[i] + "\n", curses.A_BLINK)
stdscr.addstr("\n "+ "Total: ")
stdscr.addstr(amount, curses.A_BLINK)
stdscr.addstr(" " * (8 - len(amount))+ "Pos: ")
stdscr.addstr(counter, curses.A_BLINK)
stdscr.addstr(" " * (8 - len(counter))+ "Prv Pnts: ")
stdscr.addstr(str(past_priority), curses.A_BLINK)
stdscr.addstr(" " * (8 - len(past_priority))+ "Crr Pnts: ")
stdscr.addstr(current_priority, curses.A_BLINK)
stdscr.addstr("\n\n")
def create_text(Art, y):
global stdscr
for i in Art:
stdscr.addstr(" ")
for j in range(len(i)):
if j<y:
stdscr.addstr(i[j], curses.A_BLINK)
else:
stdscr.addstr(i[j])
stdscr.addstr("\n")
stdscr = strt_scrn()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
def main(stdscr):
global av_speed
diff = {}
for i in sv:
diff[i] = {}
sv2 = deepcopy(sv)
curses.start_color()
curses.use_default_colors()
active = set()
for i in range(len(sv["average"])):
if sv["average"][i] != 0:
active.add(i)
stdscr.clear()
y=0
x=""
past_priority = 0
spl_prev = "----"
score = (1/(sum([sv["average"][i] for i in active])/len(active)))**3*len(sv["words"])**0.5
while True:
for no_need in range(10):
score = (1/(sum([sv["average"][i] / len(sv["words"][i]) for i in active])/len(active)))**3*len(sv["words"])**0.5
stdscr.clear()
Art = []
ind = give_word()
active.add(ind)
Word = sv["words"][ind]
for i in Word:
if i in "äöüÖÜÄß":
Art.append(sc[i].split("\n"))
else:
Art.append(chars[ord(i)].split("\n"))
Art2 = [[] for i in range (12)]
for i in Art:
for j in range(len(i)):
Art2[j].append(i[j])
Art = Art2
amount = sum(sv["amount"])
av_speed_word = sv["average"][ind] / len(Word)
current_priority = sv["priority"][ind]
av_speed_ART = my_t2a(av_speed, 1)
av_speed_word_ART = my_t2a(av_speed_word, 1)
spl_prev_ART = my_t2a(spl_prev, 1)
score_ART = my_t2a(score, 1)
create_header(av_speed_ART, spl_prev_ART, av_speed_word_ART, score_ART, amount, counter, past_priority, current_priority)
create_text(Art, 0)
while True:
x=stdscr.getkey()
if y >= len(Word):
y=0
break
if x == chr(27):
break
elif x == Word[y]:
if y==0:
start = time()
y+=1
else:
continue
stdscr.clear()
av_speed_ART = my_t2a(av_speed, 1)
if y > 1:
spl_prev_ART = my_t2a((end1-start)/(y-1), 1)
else:
spl_prev_ART = my_t2a("----", 1)
create_header(av_speed_ART, spl_prev_ART, av_speed_word_ART, score_ART, amount, counter, past_priority, current_priority)
create_text(Art, y)
end1 = time()
if x == chr(27):
break
end = time()
speed = end - start
am = sv["amount"][ind]
if am > 19:
am = 19
sv["average"][ind] = (sv["average"][ind] * am + speed) / (am + 1)
diff["average"][ind] = sv["average"][ind]
sv["amount"][ind] += 1
diff["amount"][ind] = sv["amount"][ind]
if sv["amount"][ind] < 10:
sv["priority"][ind] += 101
past_priority = 100
else:
add_p = (av_speed/((speed + sv["average"][ind]*9)/len(Word) + av_speed*90)*100)** (10 + log(sv["amount"][ind], 10))
sv["priority"][ind] = int((1 + add_p / (1 + abs(add_p)))**30/100)
past_priority = sv["priority"][ind]
diff["priority"][ind] = sv["priority"][ind]
diff["words"][ind] = sv["words"][ind]
spl_prev = speed / len(Word)
av_speed = (av_speed * (1000-len(Word)) + speed)/1000
if x == chr(27):
stdscr.clear()
stdscr.addstr("q - quit", curses.A_BLINK)
stdscr.addstr(" ")
stdscr.addstr("c - continue", curses.A_BLINK)
stdscr.addstr("\n")
while True:
x=stdscr.getkey()
if x in "cq":
break
else:
stdscr.addstr("\ninvalid input", curses.A_BLINK)
if x == "q":
break
if x == "c":
y=0
nt(save_conf, (av_speed, counter, score))
nt(save_save, (sv2, deepcopy(diff)))
main(stdscr)
with open(".conf", "w") as s:
s.write(str(av_speed) + ";" + str(counter) + ";" + str(score))
with open(".save", "w", encoding="utf8") as s:
sv["out"] = []
for i in range(len(sv["words"])):
sv["out"].append(";".join([sv["words"][i],str(sv["amount"][i]),str(sv["average"][i]),str(sv["priority"][i])]))
s.write("\n".join(sv["out"]))
curses.nocbreak()
stdscr.keypad(False)
curses.echo()
curses.endwin() | StarcoderdataPython |
6644836 | """Remove EquipmentDataField default_val."""
# pylint: disable=invalid-name
from django.db import migrations
class Migration(migrations.Migration):
"""Remove EquipmentDataField default_val."""
dependencies = [
('IoT_DataMgmt',
'0087_delete_EquipmentInstanceDataFieldDailyAgg')
]
operations = [
migrations.RemoveField(
model_name='equipmentdatafield',
name='default_val')
]
| StarcoderdataPython |
1657574 | # -*- coding: utf-8 -*-
"""
This script read USGS streamflow data.
Created on Fri Feb 14 00:21:31 2020
@author: <NAME>
usage:
python swat_plot.py
"""
import pandas as pd
import datetime
from sys import version
if version > '3':
from urllib.request import urlopen
else:
from urllib2 import urlopen
#%% read usgs gauge data
def read_usgs_rdb(filepath_or_link):
return pd.read_csv(filepath_or_link, comment='#', header=0, sep='\t')[1:].apply(lambda x: pd.to_numeric(x, errors='ignore') if x.name.endswith('_va') else x, axis=0)
def create_streamflow_url(gauge_list, begin_date='1900-01-01', end_date='2019-12-31', step='D'):
if type(gauge_list) is not list:
raise TypeError('The gagelist must be a list type.')
if gauge_list == []:
raise ValueError('The gagelist must not be an empty list.')
if step == 'D':
gages = ('&site_no={}' * len(gauge_list)).format(*gauge_list)
period = '&period=&begin_date={}&end_date={}'.format(begin_date, end_date)
url = 'https://waterdata.usgs.gov/nwis/dv?&cb_00060=on&format=rdb{}&referred_module=sw{}'.format(gages, period)
elif step == 'M':
gages = ''.join(['&site_no={0}&por_{0}_93535=594467,00060,93535,{1:.7},{2:.7}'.format(g, begin_date, end_date) for g in gauge_list])
url = 'https://waterdata.usgs.gov/nwis/monthly?referred_module=sw&format=rdb{}'.format(gages)
return url
def read_usgs_flow(gauge_list, begin_date='1900-01-01', end_date='2019-12-31', step='D'):
print('Requested gauges: ', gauge_list)
print('Requested period: ', begin_date, end_date)
try:
bdate = datetime.datetime.strptime(begin_date, '%Y-%m-%d')
edate = datetime.datetime.strptime(end_date, '%Y-%m-%d')
except:
raise ValueError ("The input formats for the begin_date:{} and end_date:{} must be YYYY-MM-DD".format(begin_date, end_date))
url = create_streamflow_url(gauge_list, begin_date, end_date, step)
print('\nDownloading USGS observed streamflow data:')
print(url, '\n')
df = read_usgs_rdb(url).drop('agency_cd', axis=1)
if step != 'D':
df['datetime'] = df.apply(lambda x: datetime.datetime(int(x.year_nu), int(x.month_nu), 1), axis=1)
df.drop(['year_nu', 'month_nu', 'parameter_cd', 'ts_id'], axis=1, inplace=True)
df.set_index('datetime', inplace=True)
f_url = urlopen(url)
lines = [f_url.readline() for i in range(1000 + len(gauge_list))]
f_url.close()
gauge_names = dict()
# read name of the gauge
def find_site_name(gauge_no):
for l in lines:
if gauge_no.encode() in l:
return l
for g in gauge_list:
l = find_site_name(g)
gauge_names[g] = l.strip().lstrip(b'#').strip().decode()
return df, gauge_names | StarcoderdataPython |
3553342 | """
This script was made by Nick at 19/07/20.
To implement code for inference with your model.
"""
from argparse import ArgumentParser, Namespace
import os
import matplotlib.pyplot as plt
import numpy as np
import pytorch_lightning as pl
import torch
from src.utils import Config, get_dataloader
pl.seed_everything(777)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
def parse_args() -> Namespace:
# configurations
parser = ArgumentParser(description="Inference Autoencoders")
parser.add_argument(
"--cfg-dataset",
default="./configs/dataset/mnist.yml",
type=str,
help="select dataset",
)
parser.add_argument(
"--cfg-model", default="./configs/model/AE.yml", type=str, help="select model"
)
return parser.parse_args()
def show_result(input_img, output_img):
fig = plt.figure()
rows = 1
cols = 2
ax1 = fig.add_subplot(rows, cols, 1)
ax1.imshow(input_img)
ax1.set_title("Input")
ax1.axis("off")
ax2 = fig.add_subplot(rows, cols, 2)
ax2.imshow(output_img)
ax2.set_title("Ouput")
ax2.axis("off")
plt.show()
def run(cfg: dict):
# Load checkpoint
checkpoint_path = os.path.join(cfg.model.ckpt.path, cfg.model.ckpt.filename)
Model = getattr(__import__("src"), cfg.model.name)
model = Model(cfg.model.params)
model = model.load_from_checkpoint(
checkpoint_path=checkpoint_path,
)
# Select test image
_, val_dataloader = get_dataloader(cfg)
test_image = None
for data in val_dataloader:
images, _ = data
test_image = images[0, :, :, :].unsqueeze(0)
break
# Inference
x = torch.Tensor(test_image)
y = model(x)
output = np.transpose(y[0].cpu().detach().numpy(), [1, 2, 0])
test_image = np.transpose(test_image[0, :, :, :].cpu().numpy(), [1, 2, 0])
show_result(test_image, output)
if __name__ == "__main__":
args = parse_args()
cfg = Config()
cfg.add_dataset(args.cfg_dataset)
cfg.add_model(args.cfg_model)
run(cfg)
| StarcoderdataPython |
8157996 | # Generated by Django 3.2.7 on 2022-01-22 10:09
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('gui', '0018_auto_20220114_2218'),
]
operations = [
migrations.AlterField(
model_name='channels',
name='ar_amt_target',
field=models.BigIntegerField(),
),
migrations.AlterField(
model_name='channels',
name='ar_out_target',
field=models.IntegerField(),
),
]
| StarcoderdataPython |
3346192 | <reponame>APrioriInvestments/object_database<filename>object_database/web/cells_demo/collapsible_panel.py<gh_stars>1-10
# Coyright 2017-2019 Nativepython Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from object_database.web import cells as cells
from object_database.web.CellsTestPage import CellsTestPage
class BasicCollapsiblePanel(CellsTestPage):
def cell(self):
isExpanded = cells.Slot(False)
return cells.Subscribed(
lambda: cells.Button(
"Close" if isExpanded.get() else "Open",
lambda: isExpanded.set(not isExpanded.get()),
)
) + cells.CollapsiblePanel(
panel=cells.SubscribedSequence(
lambda: [1],
lambda i: cells.Text("PANE") + cells.Subscribed(lambda: "Some Text"),
),
content=cells.ResizablePanel(
cells.Subscribed(lambda: cells.Card("I am some content")),
cells.Subscribed(lambda: cells.Card("I am the other half of content")),
),
isExpanded=lambda: isExpanded.get(),
)
def text(self):
return "You should see a non-expanded collapsible panel."
| StarcoderdataPython |
3450857 | __author__ = "arnoldochavez"
import random
import pygame
from . import resources as res
from . import constants as const
from .components import player
from .components import obstacles
from .components import backgrounds as back
pygame.init()
class Control( object ):
def __init__( self ):
self.gameState = const.GAMESTATE_START
self.display = pygame.display.set_mode((const.SCREEN_WIDTH, const.SCREEN_HEIGHT))
self.done = False
self.clock = pygame.time.Clock()
self.fps = 60
self.spawnObstaclesTimer = self.fps * 2
self.instances = []
self.blinkScreenAlpha = 0
self.blinkScreenEnd = False
def event_loop( self ):
for event in pygame.event.get():
if event.type == pygame.QUIT:
self.done = True
def spawn_obstacles( self ):
randSeparation = random.randrange(-64,64)
self.instance_create(const.SCREEN_WIDTH+64, (const.SCREEN_HEIGHT/2) + randSeparation, obstacles.Coral).side = const.DIR_UP
self.instance_create(const.SCREEN_WIDTH+64, (const.SCREEN_HEIGHT/2) + randSeparation, obstacles.Coral).side = const.DIR_DOWN
def start( self ):
self.gameState = const.GAMESTATE_START
self.blinkScreenEnd = False
self.instance_create(const.SCREEN_WIDTH/2, const.SCREEN_HEIGHT/2,player.Player)
for i in range(0, int(const.SCREEN_WIDTH/256)+2):
self.instance_create(i*256, const.SCREEN_HEIGHT, back.SandMiddle)
self.instance_create(i*256, const.SCREEN_HEIGHT, back.SandBack)
self.instance_create(i*256, const.SCREEN_HEIGHT, back.SandFront)
def update( self ):
#UPDATE ALL INSTANCES
for inst in self.instances:
inst.update()
for inst in self.instances:
if inst.destroy:
inst.destroyed()
self.instances.remove(inst)
#SELF UPDATE STEP
#SPAWN OBSTACLES
if self.gameState == const.GAMESTATE_RUN:
if self.spawnObstaclesTimer > 0:
self.spawnObstaclesTimer -= 1
else:
self.spawnObstaclesTimer = self.fps * 2
self.spawn_obstacles()
#END SPAWN OBSTACLES
elif self.gameState == const.GAMESTATE_LOSS:
if not self.blinkScreenEnd:
if self.blinkScreenAlpha<255:
self.blinkScreenAlpha = min(self.blinkScreenAlpha + 50, 255)
else:
self.blinkScreenAlpha = 255
self.blinkScreenEnd = True
else:
if self.blinkScreenAlpha>0:
self.blinkScreenAlpha = max(self.blinkScreenAlpha - 50, 0)
else:
self.blinkScreenAlpha = 0
def draw( self ):
#DRAW ALL INSTANCES
self.display.fill(const.SCREEN_COLOR)
self.instances.sort(key=lambda x: x.depth)
for inst in self.instances:
inst.draw(self.display)
if self.gameState == const.GAMESTATE_LOSS:
surf = pygame.Surface((self.display.get_width(),self.display.get_height()), pygame.SRCALPHA)
surf.fill((255,255,255,self.blinkScreenAlpha))
self.display.blit(surf,(0,0))
if self.gameState == const.GAMESTATE_START:
#self.draw_text("PRESS UP!", const.SCREEN_WIDTH/2, const.SCREEN_HEIGHT - 14, const.ALIGN_CENTER, (255,161,17))
self.draw_text("PRESS UP!", const.SCREEN_WIDTH/2, const.SCREEN_HEIGHT - 16, const.ALIGN_CENTER, (255,255,255))
def main( self ):
self.start()
while not self.done:
self.event_loop()
self.update()
self.draw()
pygame.display.flip()
self.clock.tick(self.fps)
def set_gamestate( self, state ):
self.gameState = state
def restart_game( self ):
for inst in self.instances:
inst.destroyed()
del self.instances[:]
self.start()
def draw_text( self, text, x, y, align = const.ALIGN_LEFT, color = (0,0,0), font = res.FONT["MAIN"]):
surf = font.render(text, True, color)
if align == const.ALIGN_LEFT:
self.display.blit(surf, (x, y - surf.get_height()))
elif align == const.ALIGN_CENTER:
self.display.blit(surf, (x - (surf.get_width()/2), y - surf.get_height()))
elif align == const.ALIGN_RIGHT:
self.display.blit(surf, (x - surf.get_width(), y - surf.get_height()))
def instance_create( self, x, y, obj ):
inst = obj(x, y)
inst.control = self
self.instances.append(inst)
return inst | StarcoderdataPython |
205225 | import torch
import torch.nn as nn
from typing import List, Union
# TODO: allow additional kwargs
class CausalConv1d(nn.Conv1d):
""" Causal Convolutional 1D layer.
A simple nn.Conv1d with causal padding.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size=1,
stride=1,
dilation=1,
groups=1,
bias=True):
self._padding = (kernel_size - 1) * dilation
super().__init__(
in_channels,
out_channels,
kernel_size=kernel_size,
stride=stride,
padding=self._padding,
dilation=dilation,
groups=groups,
bias=bias
)
def forward(self, x):
result = super().forward(x)
if self._padding != 0:
return result[:, :, : - self._padding]
return result
class Conv2dEncoder(nn.Module):
""" 2D convolutional encoder.
Args:
chl_seq (list): Input/Output channel sequence used in every Conv2d of
the encoder. The number of conv2d layers are equal to:
len(chl_seq) - 1.
kernel_size (tuple): Kernel size used on each Conv2d layer.
stride (stride): Stride used on each Conv2d layer.
activation (nn.Module): Activation function computed after every Conv2d.
return_skips (bool): If True, forward() returns the resulting tensors
along with the resulting skip connection after every Conv2d.
"""
def __init__(self,
chl_seq: list = [1, 16, 32, 64, 64],
kernel_size: tuple = (2, 3),
stride: tuple = (1, 2),
activation_seq: List[nn.Module] = [
nn.LeakyReLU, nn.LeakyReLU, nn.LeakyReLU, nn.LeakyReLU],
return_skips: bool = True):
super().__init__()
# hparams
self.chl_seq = chl_seq
self.kernel_size = kernel_size
self.stride = stride
self.return_skips = return_skips
# encoder
self.encoder_l = nn.ModuleList()
for idx in range(len(chl_seq) - 1):
self.encoder_l.add_module(f"conv2d_l{idx}",
nn.Conv2d(in_channels=chl_seq[idx],
out_channels=chl_seq[idx + 1],
kernel_size=self.kernel_size,
stride=self.stride))
self.encoder_l.add_module(f"activation_{idx}",
activation_seq[idx]())
@torch.no_grad()
def get_output_dims(self, x):
""" Return the tensor output dimensions by probing with a tensor x.
Args:
x (torch.tensor): Probe tensor.
Returns:
(torch.Size): Output shape of the encoder.
"""
return self(x)[0].shape if self.return_skips else self(x).shape
def forward(self, x):
if self.return_skips:
skips = []
for idx in range(len(self.encoder_l) // 2):
x = self.encoder_l._modules[f"conv2d_l{idx}"](x)
x = self.encoder_l._modules[f"activation_{idx}"](x)
if self.return_skips:
skips.append(x)
return (x, skips) if self.return_skips else x
class Conv2dDecoder(nn.Module):
""" 2D convolutional decoder. """
def __init__(self,
chl_seq: list = [64, 64, 32, 16, 1],
# IMPORTANT: len(chl_seq) = len(padding_seq) + 1
output_padding_seq: Union[List[int], List[tuple]] = [
0, 0, (0, 1), 0],
kernel_size: tuple = (2, 3),
stride: tuple = (1, 2),
activation_seq: List[nn.Module] = [
nn.LeakyReLU, nn.LeakyReLU, nn.LeakyReLU, nn.Sigmoid
],
last_activation: nn.Module = nn.Sigmoid):
super().__init__()
# hparams
self.chl_seq = chl_seq
self.output_padding_seq = output_padding_seq
self.kernel_size = kernel_size
self.stride = stride
self.activation_seq = activation_seq
# decoder
self.decoder_l = nn.ModuleList()
for idx in range(len(chl_seq) - 1):
self.decoder_l.add_module(f"convtranspose2d_l{idx}",
nn.ConvTranspose2d(in_channels=chl_seq[idx],
out_channels=chl_seq[idx + 1],
kernel_size=self.kernel_size,
stride=self.stride,
output_padding=output_padding_seq[idx]))
self.decoder_l.add_module(f"activation_{idx}",
activation_seq[idx]())
def forward(self, x, skips):
for idx in range(len(self.decoder_l) // 2):
if skips is not None:
x += skips[-(1 + idx)] # reverse order
x = self.decoder_l._modules[f"convtranspose2d_l{idx}"](x)
x = self.decoder_l._modules[f"activation_{idx}"](x)
return x
class DTLNSeparationCore(nn.Module):
""" Dual-Signal Transformation Long Shot-Term Memory Network separation core
as described in:
https://www.isca-speech.org/archive/Interspeech_2020/pdfs/2631.pdf
The original network uses two separation cores with a stack of LSTM
"""
def __init__(self,
input_size: int,
hidden_size: int,
output_size: int,
rnn_type: nn.Module = nn.LSTM,
rnn_stack_size: int = 2,
rnn_bidirectional: bool = False,
dropout_rate: float = 0.25,
activation: nn.Module = nn.Sigmoid):
super().__init__()
# network params
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
self.rnn_type = rnn_type
self.rnn_stack_size = rnn_stack_size
self.rnn_bidirectional = rnn_bidirectional
self.dropout_rate = dropout_rate
self.activation = activation
# network modules
self.separation_core = nn.ModuleDict({
"rnn_stack": self.rnn_type(self.input_size, self.hidden_size,
dropout=self.dropout_rate,
num_layers=self.rnn_stack_size,
bidirectional=self.rnn_bidirectional),
"fcl": nn.Linear(self.hidden_size * (
2 if self.rnn_bidirectional else 1),
self.output_size),
"activation": self.activation()
})
def forward(self, x):
x, _ = self.separation_core["rnn_stack"](x)
x = x.permute(1, 0, 2)
x = self.separation_core["fcl"](x)
x = self.separation_core["activation"](x)
return x
| StarcoderdataPython |
9697071 | import re
from flaski import app
from flask_login import current_user
from flask_caching import Cache
from flaski.routines import check_session_app
import dash
from dash.dependencies import Input, Output, State
import dash_core_components as dcc
import dash_html_components as html
import dash_bootstrap_components as dbc
from ._utils import handle_dash_exception, parse_table, protect_dashviews, validate_user_access, \
make_navbar, make_footer, make_options, make_table, META_TAGS, make_min_width, \
change_table_minWidth, change_fig_minWidth
from ._aadatalake import read_results_files, read_gene_expression, read_genes, read_significant_genes, \
filter_samples, filter_genes, filter_gene_expression, nFormat, read_dge,\
make_volcano_plot, make_ma_plot, make_pca_plot, make_annotated_col
import uuid
from werkzeug.utils import secure_filename
import json
from flask import session
import pandas as pd
import os
CURRENTAPP="aadatalake"
navbar_title="RNAseq data lake"
dashapp = dash.Dash(CURRENTAPP,url_base_pathname=f'/{CURRENTAPP}/' , meta_tags=META_TAGS, server=app, external_stylesheets=[dbc.themes.BOOTSTRAP], title="FLASKI", assets_folder="/flaski/flaski/static/dash/")
protect_dashviews(dashapp)
cache = Cache(dashapp.server, config={
'CACHE_TYPE': 'redis',
'CACHE_REDIS_URL': 'redis://:%s@%s' %( os.environ.get('REDIS_PASSWORD'), os.environ.get('REDIS_ADDRESS') ) #'redis://localhost:6379'),
})
controls = [
html.H5("Filters", style={"margin-top":10}),
html.Label('Data sets'), dcc.Dropdown( id='opt-datasets', multi=True),
html.Label('Groups',style={"margin-top":10}), dcc.Dropdown( id='opt-groups', multi=True),
html.Label('Samples',style={"margin-top":10}), dcc.Dropdown( id='opt-samples', multi=True),
html.Label('Gene names',style={"margin-top":10}), dcc.Dropdown( id='opt-genenames', multi=True),
html.Label('Gene IDs',style={"margin-top":10}), dcc.Dropdown( id='opt-geneids', multi=True),
html.Label('Download file prefix',style={"margin-top":10}), dcc.Input(id='download_name', value="data.lake", type='text') ]
side_bar=[ dbc.Card(controls, body=True),
html.Button(id='submit-button-state', n_clicks=0, children='Submit', style={"width": "100%","margin-top":4, "margin-bottom":4} )
]
# Define Layout
dashapp.layout = html.Div( [ html.Div(id="navbar"), dbc.Container(
fluid=True,
children=[
html.Div(id="app_access"),
html.Div(id="redirect-pca"),
html.Div(id="redirect-volcano"),
html.Div(id="redirect-ma"),
dcc.Store(data=str(uuid.uuid4()), id='session-id'),
dbc.Row(
[
dbc.Col( dcc.Loading(
id="loading-output-1",
type="default",
children=html.Div(id="side_bar"),
style={"margin-top":"0%"}
),
md=3, style={"height": "100%",'overflow': 'scroll'} ),
dbc.Col( dcc.Loading(
id="loading-output-2",
type="default",
children=[ html.Div(id="my-output")],
style={"margin-top":"50%","height": "100%"} ),
md=9, style={"height": "100%","width": "100%",'overflow': 'scroll'})
],
style={"min-height": "87vh"}),
] )
] + make_footer()
)
## all callback elements with `State` will be updated only once submit is pressed
## all callback elements wiht `Input` will be updated everytime the value gets changed
@dashapp.callback(
Output(component_id='my-output', component_property='children'),
Input('session-id', 'data'),
Input('submit-button-state', 'n_clicks'),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
State(component_id='download_name', component_property='value'),
)
def update_output(session_id, n_clicks, datasets, groups, samples, genenames, geneids, download_name):
if not validate_user_access(current_user,CURRENTAPP):
return None
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
## samples
results_files=selected_results_files[["Set","Group","Reps"]]
results_files.columns=["Set","Group","Sample"]
results_files=results_files.drop_duplicates()
results_files_=make_table(results_files,"results_files")
# results_files_ = dbc.Table.from_dataframe(results_files, striped=True, bordered=True, hover=True)
download_samples=html.Div(
[
html.Button(id='btn-samples', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-samples")
]
)
## gene expression
if datasets or groups or samples or genenames or geneids :
gene_expression=filter_gene_expression(ids2labels,genenames,geneids,cache)
gene_expression_=make_table(gene_expression,"gene_expression")#,fixed_columns={'headers': True, 'data': 2} )
# gene_expression_ = dbc.Table.from_dataframe(gene_expression, striped=True, bordered=True, hover=True)
download_geneexp=html.Div(
[
html.Button(id='btn-geneexp', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-geneexp")
]
)
gene_expression_bol=True
else:
gene_expression_bol=False
## PCA
selected_sets=list(set(selected_results_files["Set"]))
if len(selected_sets) == 1 :
pca_data=filter_gene_expression(ids2labels,None,None,cache)
pca_plot, pca_pa, pca_df=make_pca_plot(pca_data,selected_sets[0])
pca_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".pca" }}
pca_plot=dcc.Graph(figure=pca_plot, config=pca_config, style={"width":"100%","overflow-x":"auto"})
iscatter_pca=html.Div(
[
html.Button(id='btn-iscatter_pca', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
pca_bol=True
else:
pca_bol=False
## differential gene expression
dge_bol=False
volcano_plot=None
if not samples:
if len(selected_sets) == 1 :
dge_groups=list(set(selected_results_files["Group"]))
if len(dge_groups) == 2:
dge=read_dge(selected_sets[0], dge_groups, cache)
dge_plots=dge.copy()
if genenames:
dge=dge[dge["gene name"].isin(genenames)]
if geneids:
dge=dge[dge["gene id"].isin(geneids)]
dge_=make_table(dge,"dge")
download_dge=html.Div(
[
html.Button(id='btn-dge', n_clicks=0, children='Download', style={"margin-top":4, 'background-color': "#5474d8", "color":"white"}),
dcc.Download(id="download-dge")
]
)
annotate_genes=[]
if genenames:
genenames_=dge[dge["gene name"].isin(genenames)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
if geneids:
genenames_=dge[dge["gene id"].isin(geneids)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
volcano_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".volcano" }}
volcano_plot, volcano_pa, volcano_df=make_volcano_plot(dge_plots, selected_sets[0], annotate_genes)
volcano_plot.update_layout(clickmode='event+select')
volcano_plot=dcc.Graph(figure=volcano_plot, config=volcano_config, style={"width":"100%","overflow-x":"auto"}, id="volcano_plot")
iscatter_volcano=html.Div(
[
html.Button(id='btn-iscatter_volcano', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
ma_config={ 'toImageButtonOptions': { 'format': 'svg', 'filename': download_name+".ma" }}
ma_plot, ma_pa, ma_df=make_ma_plot(dge_plots, selected_sets[0],annotate_genes )
ma_plot.update_layout(clickmode='event+select')
ma_plot=dcc.Graph(figure=ma_plot, config=ma_config, style={"width":"100%","overflow-x":"auto"}, id="ma_plot")
iscatter_ma=html.Div(
[
html.Button(id='btn-iscatter_ma', n_clicks=0, children='iScatterplot',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"})
])
dge_bol=True
if ( dge_bol ) & ( pca_bol ) :
minwidth=["Samples","Expression", "PCA", "DGE","Volcano","MA"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
dge_=change_table_minWidth(dge_,minwidth)
pca_plot=change_fig_minWidth(pca_plot,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ pca_plot, iscatter_pca ],
label="PCA", id="tab-pca",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
dcc.Tab( [ dge_, download_dge],
label="DGE", id="tab-dge",
style={"margin-top":"0%"}),
dcc.Tab( [ dbc.Row( [
dbc.Col(volcano_plot),
dbc.Col( [ html.Div(id="volcano-plot-table") ]
) ],
style={"minWidth":minwidth}),
dbc.Row([iscatter_volcano,html.Div(id="volcano-bt")]),
],
label="Volcano", id="tab-volcano",
style={"margin-top":"0%"}),
dcc.Tab( [ dbc.Row( [
dbc.Col(ma_plot),
dbc.Col( [ html.Div(id="ma-plot-table") ]
) ],
style={"minWidth":minwidth}),
dbc.Row([iscatter_ma,html.Div(id="ma-bt")]),
] ,
label="MA", id="tab-ma",
style={"margin-top":"0%"})
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
elif pca_bol :
minwidth=["Samples","Expression", "PCA"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
pca_plot=change_fig_minWidth(pca_plot,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ pca_plot, iscatter_pca ],
label="PCA", id="tab-pca",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
elif gene_expression_bol:
minwidth=["Samples","Expression"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
gene_expression_=change_table_minWidth(gene_expression_,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
dcc.Tab( [ gene_expression_, download_geneexp],
label="Expression", id="tab-geneexpression",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
else:
minwidth=["Samples"]
minwidth=len(minwidth) * 150
minwidth = str(minwidth) + "px"
results_files_=change_table_minWidth(results_files_,minwidth)
out=dcc.Tabs( [
dcc.Tab([ results_files_, download_samples],
label="Samples", id="tab-samples",
style={"margin-top":"0%"}),
],
mobile_breakpoint=0,
style={"height":"50px","margin-top":"0px","margin-botom":"0px", "width":"100%","overflow-x":"auto", "minWidth":minwidth} )
return out
@dashapp.callback(
Output('volcano-plot-table', 'children'),
Output('volcano-bt', 'children'),
Input('volcano_plot', 'selectedData')
)
def display_volcano_data(selectedData):
if selectedData:
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
df=pd.DataFrame({"Selected genes":selected_genes})
df=make_table(df,"selected_volcano")
st=df.style_table
st["width"]="50%"
st["margin-top"]="40px"
st["align"]="center"
st["margin-left"]="auto"
st["margin-right"]="auto"
df.style_table=st
df.style_cell={'whiteSpace': 'normal', 'textAlign': 'center'}
download_selected_volcano=html.Div(
[
html.Button(id='btn-selected_volcano', n_clicks=0, children='Excel',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"}),
dcc.Download(id="download-selected_volcano")
])
return df, download_selected_volcano
else:
return None, None
@dashapp.callback(
Output("download-selected_volcano", "data"),
Input("btn-selected_volcano", "n_clicks"),
State('volcano_plot', 'selectedData'),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_selected_volcano(n_clicks,selectedData,datasets,groups,download_name):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
dge_datasets=list(set(selected_results_files["Set"]))
dge_groups=list(set(selected_results_files["Group"]))
dge=read_dge(dge_datasets[0], dge_groups, cache)
dge=dge[dge["gene name"].isin(selected_genes)]
fileprefix=secure_filename(str(download_name))
filename="%s.dge.volcano_selected.xlsx" %fileprefix
return dcc.send_data_frame(dge.to_excel, filename, sheet_name="dge.volcano", index=False)
@dashapp.callback(
Output("redirect-volcano", 'children'),
Input("btn-iscatter_volcano", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
prevent_initial_call=True,
)
def volcano_to_iscatterplot(n_clicks,datasets, groups, genenames, geneids):
if n_clicks:
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
dge_datasets=list(set(selected_results_files["Set"]))
dge_groups=list(set(selected_results_files["Group"]))
dge=read_dge(dge_datasets[0], dge_groups, cache)
annotate_genes=[]
if genenames:
genenames_=dge[dge["gene name"].isin(genenames)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
if geneids:
genenames_=dge[dge["gene id"].isin(geneids)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
volcano_plot, volcano_pa, volcano_df=make_volcano_plot(dge, dge_datasets[0], annotate_genes)
reset_info=check_session_app(session,"iscatterplot",current_user.user_apps)
volcano_pa["xcols"]=volcano_df.columns.tolist()
volcano_pa["ycols"]=volcano_df.columns.tolist()
volcano_pa["groups"]=["None"]+volcano_df.columns.tolist()
volcano_df["datalake_search"]=volcano_df["gene name"].apply(lambda x: make_annotated_col(x, annotate_genes) )
volcano_pa["labels_col"]=["select a column.."]+volcano_df.columns.tolist()
volcano_pa["labels_col_value"]="select a column.."
volcano_df=volcano_df.drop(["___label___"],axis=1)
session["filename"]="<from RNAseq lake>"
session["plot_arguments"]=volcano_pa
session["COMMIT"]=app.config['COMMIT']
session["app"]="iscatterplot"
session["df"]=volcano_df.to_json()
return dcc.Location(pathname="/iscatterplot", id="index")
@dashapp.callback(
Output('ma-plot-table', 'children'),
Output('ma-bt', 'children'),
Input('ma_plot', 'selectedData')
)
def display_ma_data(selectedData):
if selectedData:
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
df=pd.DataFrame({"Selected genes":selected_genes})
df=make_table(df,"selected_ma")
st=df.style_table
st["width"]="50%"
st["margin-top"]="40px"
st["align"]="center"
st["margin-left"]="auto"
st["margin-right"]="auto"
df.style_table=st
df.style_cell={'whiteSpace': 'normal', 'textAlign': 'center'}
download_selected_ma=html.Div(
[
html.Button(id='btn-selected_ma', n_clicks=0, children='Excel',
style={"margin-top":4, \
"margin-left":4,\
"margin-right":4,\
'background-color': "#5474d8", \
"color":"white"}),
dcc.Download(id="download-selected_ma")
])
return df, download_selected_ma
else:
return None, None
@dashapp.callback(
Output("download-selected_ma", "data"),
Input("btn-selected_ma", "n_clicks"),
State('ma_plot', 'selectedData'),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_selected_ma(n_clicks,selectedData,datasets,groups,download_name):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
selected_genes=selectedData["points"]
selected_genes=[ s["text"] for s in selected_genes ]
dge_datasets=list(set(selected_results_files["Set"]))
dge_groups=list(set(selected_results_files["Group"]))
dge=read_dge(dge_datasets[0], dge_groups, cache)
dge=dge[dge["gene name"].isin(selected_genes)]
fileprefix=secure_filename(str(download_name))
filename="%s.dge.ma_selected.xlsx" %fileprefix
return dcc.send_data_frame(dge.to_excel, filename, sheet_name="dge.ma", index=False)
@dashapp.callback(
Output("redirect-ma", 'children'),
Input("btn-iscatter_ma", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
prevent_initial_call=True,
)
def ma_to_iscatterplot(n_clicks,datasets, groups, genenames, geneids):
if n_clicks:
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
dge_datasets=list(set(selected_results_files["Set"]))
dge_groups=list(set(selected_results_files["Group"]))
dge=read_dge(dge_datasets[0], dge_groups, cache)
annotate_genes=[]
if genenames:
genenames_=dge[dge["gene name"].isin(genenames)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
if geneids:
genenames_=dge[dge["gene id"].isin(geneids)]["gene name"].tolist()
annotate_genes=annotate_genes+genenames_
ma_plot, ma_pa, ma_df=make_ma_plot(dge, dge_datasets[0],annotate_genes )
reset_info=check_session_app(session,"iscatterplot",current_user.user_apps)
ma_pa["xcols"]=ma_df.columns.tolist()
ma_pa["ycols"]=ma_df.columns.tolist()
ma_pa["groups"]=["None"]+ma_df.columns.tolist()
ma_df["datalake_search"]=ma_df["gene name"].apply(lambda x: make_annotated_col(x, annotate_genes) )
ma_df=ma_df.drop(["___label___"],axis=1)
ma_pa["labels_col"]=["select a column.."]+ma_df.columns.tolist()
ma_pa["labels_col_value"]="select a column.."
session["filename"]="<from RNAseq lake>"
session["plot_arguments"]=ma_pa
session["COMMIT"]=app.config['COMMIT']
session["app"]="iscatterplot"
session["df"]=ma_df.to_json()
return dcc.Location(pathname="/iscatterplot", id="index")
@dashapp.callback(
Output("redirect-pca", 'children'),
Input("btn-iscatter_pca", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
prevent_initial_call=True,
)
def pca_to_iscatterplot(n_clicks,datasets, groups):
if n_clicks:
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=None, cache=cache)
pca_data=filter_gene_expression(ids2labels,None,None,cache)
selected_sets=list(set(selected_results_files["Set"]))
pca_plot, pca_pa, pca_df=make_pca_plot(pca_data,selected_sets[0])
reset_info=check_session_app(session,"iscatterplot",current_user.user_apps)
pca_pa["xcols"]=pca_df.columns.tolist()
pca_pa["ycols"]=pca_df.columns.tolist()
pca_pa["groups"]=["None"]+pca_df.columns.tolist()
pca_pa["labels_col"]=["select a column.."]+pca_df.columns.tolist()
pca_pa["labels_col_value"]="select a column.."
session["filename"]="<from RNAseq lake>"
session["plot_arguments"]=pca_pa
session["COMMIT"]=app.config['COMMIT']
session["app"]="iscatterplot"
session["df"]=pca_df.to_json()
return dcc.Location(pathname="/iscatterplot", id="index")
@dashapp.callback(
Output("download-samples", "data"),
Input("btn-samples", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_samples(n_clicks,datasets, groups, samples, fileprefix):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
results_files=selected_results_files[["Set","Group","Reps"]]
results_files.columns=["Set","Group","Sample"]
results_files=results_files.drop_duplicates()
fileprefix=secure_filename(str(fileprefix))
filename="%s.samples.xlsx" %fileprefix
return dcc.send_data_frame(results_files.to_excel, filename, sheet_name="samples", index=False)
@dashapp.callback(
Output("download-geneexp", "data"),
Input("btn-geneexp", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_geneexp(n_clicks,datasets, groups, samples, genenames, geneids, fileprefix):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
gene_expression=filter_gene_expression(ids2labels,genenames,geneids,cache)
fileprefix=secure_filename(str(fileprefix))
filename="%s.gene_expression.xlsx" %fileprefix
return dcc.send_data_frame(gene_expression.to_excel, filename, sheet_name="gene exp.", index=False)
@dashapp.callback(
Output("download-dge", "data"),
Input("btn-dge", "n_clicks"),
State("opt-datasets", "value"),
State("opt-groups", "value"),
State("opt-samples", "value"),
State("opt-genenames", "value"),
State("opt-geneids", "value"),
State('download_name', 'value'),
prevent_initial_call=True,
)
def download_dge(n_clicks,datasets, groups, samples, genenames, geneids, fileprefix):
selected_results_files, ids2labels=filter_samples(datasets=datasets,groups=groups, reps=samples, cache=cache)
# gene_expression=filter_gene_expression(ids2labels,genenames,geneids,cache)
if not samples:
dge_datasets=list(set(selected_results_files["Set"]))
if len(dge_datasets) == 1 :
dge_groups=list(set(selected_results_files["Group"]))
if len(dge_groups) == 2:
dge=read_dge(dge_datasets[0], dge_groups, cache, html=False)
if genenames:
dge=dge[dge["gene name"].isin(genenames)]
if geneids:
dge=dge[dge["gene id"].isin(geneids)]
fileprefix=secure_filename(str(fileprefix))
filename="%s.dge.xlsx" %fileprefix
return dcc.send_data_frame(dge.to_excel, filename, sheet_name="dge", index=False)
@dashapp.callback(
Output(component_id='opt-datasets', component_property='options'),
Output(component_id='opt-genenames', component_property='options'),
Output(component_id='opt-geneids', component_property='options'),
Input('session-id', 'data')
)
def update_datasets(session_id):
if not validate_user_access(current_user,CURRENTAPP):
return None
results_files=read_results_files(cache)
datasets=list(set(results_files["Set"]))
datasets=make_options(datasets)
genes=read_genes(cache)
genenames=list(set(genes["gene_name"]))
genenames=make_options(genenames)
geneids=list(set(genes["gene_id"]))
geneids=make_options(geneids)
return datasets, genenames, geneids
@dashapp.callback(
Output(component_id='opt-groups', component_property='options'),
Input('session-id', 'data'),
Input('opt-datasets', 'value') )
def update_groups(session_id, datasets):
if not validate_user_access(current_user,CURRENTAPP):
return None
selected_results_files, ids2labels=filter_samples(datasets=datasets, cache=cache)
groups_=list(set(selected_results_files["Group"]))
groups_=make_options(groups_)
return groups_
@dashapp.callback(
Output(component_id='opt-samples', component_property='options'),
Input('session-id', 'data'),
Input('opt-datasets', 'value'),
Input('opt-groups', 'value') )
def update_reps(session_id, datasets, groups):
if not validate_user_access(current_user,CURRENTAPP):
return None
selected_results_files, ids2labels=filter_samples(datasets=datasets, cache=cache)
groups_=list(set(selected_results_files["Group"]))
groups_=make_options(groups_)
selected_results_files, ids2labels=filter_samples(datasets=datasets, groups=groups,cache=cache)
reps_=list(set(selected_results_files["Reps"]))
reps_=make_options(reps_)
return reps_
# this call back prevents the side bar from being shortly
# show / exposed to users without access to this App
@dashapp.callback( Output('app_access', 'children'),
Output('side_bar', 'children'),
Output('navbar','children'),
Input('session-id', 'data') )
def get_side_bar(session_id):
if not validate_user_access(current_user,CURRENTAPP):
return dcc.Location(pathname="/index", id="index"), None, None
else:
navbar=make_navbar(navbar_title, current_user, cache)
return None, side_bar, navbar
@dashapp.callback(
Output("navbar-collapse", "is_open"),
[Input("navbar-toggler", "n_clicks")],
[State("navbar-collapse", "is_open")])
def toggle_navbar_collapse(n, is_open):
if n:
return not is_open
return is_open
# if __name__ == '__main__':
# app.run_server(host='0.0.0.0', debug=True, port=8050)
# #### HANDLING LARGE AMOUNT OF ARGUMENTS ####
# #### this will work for inputs with only one present in the list of Inputs+States
# ## all callback elements with `State` will be updated only once submit is pressed
# ## all callback elements wiht `Input` will be updated everytime the value gets changed
# inputs=[Input('submit-button-state', 'n_clicks')]
# states=[State('upload-data', 'contents'),
# State("opt-xcol", "search_value"),
# State(component_id='multiplier', component_property='value'),
# State('upload-data', 'filename'),
# State('upload-data', 'last_modified') ]
# @app.callback(
# Output(component_id='my-output', component_property='children'),
# inputs,
# states
# )
# def update_output(*args):
# input_names = [item.component_id for item in inputs + states]
# kwargs_dict = dict(zip(input_names, args))
# print(kwargs_dict)
# multiplier=kwargs_dict["multiplier"] | StarcoderdataPython |
5129753 | import asyncio
import errno
import logging
import os
import platform
import re
from functools import wraps, partial
from pathlib import Path
from stat import S_ISDIR
from typing import List
IS_WINDOWS = platform.system() == 'Windows'
def wrap(func):
@wraps(func)
async def run(*args, loop=None, executor=None, **kwargs):
if loop is None:
loop = asyncio.get_event_loop()
p = partial(func, *args, **kwargs)
return await loop.run_in_executor(executor, p)
return run
stat = wrap(os.stat)
lstat = wrap(os.lstat)
rename = wrap(os.rename)
remove = wrap(os.remove)
mkdir = wrap(os.mkdir)
rmdir = wrap(os.rmdir)
if hasattr(os, "sendfile"):
sendfile = wrap(os.sendfile)
async def exists(fs_path: str):
try:
await stat(fs_path)
except IOError as e:
if e.errno == errno.ENOENT:
return False
raise e
return True
async def is_directory(fs_path: str, use_stat: bool = False):
stats = await stat(fs_path) if use_stat else await lstat(fs_path)
return S_ISDIR(stats.st_mode)
async def is_rooted(p: str) -> bool:
p = normalize_separators(p)
if not p:
raise Exception('is_rooted() parameter "p" cannot be empty')
if IS_WINDOWS:
return p.startswith('\\') or re.match(r'^[A-Z]', p, re.IGNORECASE) is not None
return p.startswith('/')
async def mkdir_p(fs_path: str, max_depth: int = 1000, depth: int = 1):
if not fs_path:
raise Exception('a path argument must be provided')
fs_path = Path(fs_path).resolve()
if depth >= max_depth:
return mkdir(fs_path)
try:
await mkdir(fs_path)
return
except IOError as e:
if e.errno == errno.ENOENT:
await mkdir_p(str(fs_path), max_depth, depth + 1)
await mkdir(fs_path)
return
try:
stats = await stat(fs_path)
except Exception as e2:
raise e2
if not S_ISDIR(stats.st_mode):
raise e
async def try_get_executable_path(file_path: str, extensions: List[str]):
stats = None
try:
stats = await stat(file_path)
except IOError as e:
if e.errno != errno.ENOENT:
logging.log(f'Unexpected error attempting to determine if executable file exists "{file_path}": {e}')
if stats and Path(file_path).is_file():
if IS_WINDOWS:
upper_ext = Path(file_path).suffix.upper()
for valid_ext in extensions:
if valid_ext.upper() == upper_ext:
return file_path
else:
if is_unix_executable(file_path):
return file_path
return ''
def normalize_separators(p: str) -> str:
return os.path.normpath(p or '')
def is_unix_executable(file_path) -> bool:
return os.access(file_path, os.X_OK)
| StarcoderdataPython |
3476689 | """ Middleware classes for the main app"""
from django.conf import settings
from django.utils.deprecation import MiddlewareMixin
class CachelessAPIMiddleware(MiddlewareMixin):
""" Add Cache-Control header to API responses"""
def process_response(self, request, response):
""" Add a Cache-Control header to an API response """
if request.path.startswith(settings.CACHEABLE_ENDPOINTS):
response["Cache-Control"] = settings.CACHEABLE_ENDPOINTS_CACHE_VALUE
elif request.path.startswith("/api/"):
response["Cache-Control"] = "private, no-store"
return response
| StarcoderdataPython |
8003214 | """
Standard class of HTTP responses
"""
from enum import Enum
from flask import jsonify, make_response
INVALID_FIELD_NAME_SENT_422 = {
"http_code": 422,
"code": "invalidField"
}
INVALID_INPUT_422 = {
"http_code": 422,
"code": "invalidInput"
}
MISSING_PARAMETER_422 = {
"http_code": 422,
"code": "missingParameter"
}
BAD_REQUEST_400 = {
"http_code": 400,
"code": "badRequest"
}
SERVER_ERROR_500 = {
"http_code": 500,
"code": "serverError"
}
SERVER_ERROR_404 = {
"http_code": 404,
"code": "notFound"
}
UNAUTHORIZED_403 = {
"http_code": 403,
"code": "notAuthorized"
}
SUCCESS_200 = {
"http_code": 200,
"code": "success"
}
SUCCESS_201 = {
"http_code": 201,
"code": "success"
}
SUCCESS_204 = {
"http_code": 204,
"code": "success"
}
def create_response(data, http_resp, msg, classname, http_code):
resp = make_response(jsonify(data), http_code)
resp.headers['http_response'] = http_resp
resp.headers['msg'] = msg
resp.headers['class'] = classname
return resp
class ResponseMessages(Enum):
AUTH_USERNAME_NOT_PROVIDED = "[auth] no username provided"
AUTH_LOGIN_SUCCESSFUL = "[auth] login successful"
AUTH_LOGIN_FAILED = "[auth] login failed"
AUTH_USER_CREATED = "[auth] user successful created"
AUTH_DUPLICATE_PARAMS = "[auth] user params already exist"
AUTH_TOKEN_INVALID = "[auth] token invalid"
AUTH_USER_CONFIRMED = "[auth] user successfully confirmed"
AUTH_ALREADY_CONFIRMED = "[auth] user already confirmed"
AUTH_CONFIRMATION_RESEND = "[auth] user confirmation resend"
AUTH_INVALID_PARAMS = "[auth] invalid params"
AUTH_PASSWORD_CHANGED = "[auth] password changed"
AUTH_WRONG_PASSWORD = "[auth] old password is incorrect"
AUTH_PW_REQUESTED = "[auth] password reset requested"
AUTH_PASSWORD_NOT_PROVIDED = "[auth] no new password provided"
AUTH_RESET_SUCCESSFUL = "[auth] password reset successful"
AUTH_RESET_FAILED = "[auth] password reset failed"
AUTH_EMAIL_EXISTS = "[auth] new email equals old email"
AUTH_EMAIL_REQUESTED = "[auth] change email requested"
AUTH_EMAIL_CHANGED = "[auth] email change successful"
AUTH_EMAIL_FAILED = "[auth] email change failed"
MAIN_NO_USER_INFORMATION = "[main] no information about user provided"
MAIN_NO_DATA = "[main] data could not be retrieved from request"
CREATE_SUCCESS = "[create] {} successful"
CREATE_MISSING_PARAM = "[create] {}, missing parameter"
CREATE_NOT_AUTHORIZED = "[create] no permission"
CREATE_DUPLICATE_PARAMS = "[create] {}, params already exist"
UPDATE_SUCCESS = "[update] {} successful"
UPDATE_FAILED = "[update] {} failed"
UPDATE_MISSING_PARAM = "[update] {}, missing parameter"
UPDATE_NOT_AUTHORIZED = "[update] no permission"
LIST_SUCCESS = "[list] {} successful"
LIST_EMPTY = "[list] {} empty"
LIST_INVALID_INPUT = "[list] {}, invalid parameters provided"
FIND_MISSING_PARAMETER = "[find] {}, missing parameter"
FIND_NO_RESULTS = "[find] {}, no results"
FIND_SUCCESS = "[find] {}, successful"
FIND_NOT_AUTHORIZED = "[find] no permission"
INIT_NOT_AUTHORIZED = "[init] no permission"
INIT_SUCCESS = '[init] successful'
INIT_ERROR_DURING_CREATE = '[init] error during create'
def __str__(self):
return self.value
| StarcoderdataPython |
8096513 | <filename>comprehension/lab/no_vowels.py<gh_stars>0
vowels = {'a', 'o', 'u', 'e', 'i'}
vowels = vowels.union([s.upper() for s in vowels])
input_data = input()
result = [s for s in input_data if s not in vowels]
print(''.join(result)) | StarcoderdataPython |
10605 | """
sources.chicago
===============
Reads a CSV file in the format (as of April 2017) of data available from:
- https://catalog.data.gov/dataset/crimes-one-year-prior-to-present-e171f
- https://catalog.data.gov/dataset/crimes-2001-to-present-398a4
The default data is loaded from a file "chicago.csv" which should be downloaded
from one of the above links. The format of the data, frustratingly, differs
between the snapshot of last year, and the total.
The data is partly anonymous in that the address within a block is obscured,
while the geocoding seems complicated (work in progress to understand)...
The crime type "HOMICIDE" is reported multiple times in the dataset.
"""
import csv as _csv
import os.path as _path
import datetime
import numpy as _np
from ..data import TimedPoints
_datadir = None
_default_filename = "chicago.csv"
_FEET_IN_METERS = 3937 / 1200
def set_data_directory(datadir):
"""Set the default location for search for the default input file."""
global _datadir
_datadir = datadir
def get_default_filename():
"""Returns the default filename, if available. Otherwise raises
AttributeError.
"""
global _datadir
if _datadir is None:
raise AttributeError("datadir not set; call `set_data_directory()`.")
return _path.join(_datadir, _default_filename)
def _date_from_csv(date_string):
return datetime.datetime.strptime(date_string, "%m/%d/%Y %I:%M:%S %p")
def date_from_iso(iso_string):
"""Convert a datetime string in ISO format into a :class:`datetime`
instance.
:param iso_string: Like "2017-10-23T05:12:39"
:return: A :class:`datetime` instance.
"""
return datetime.datetime.strptime(iso_string, "%Y-%m-%dT%H:%M:%S")
def _date_from_other(dt_str):
# Like 4/16/13 5:00
try:
date, time = dt_str.split()
month, day, year = date.split("/")
hour, minutes = time.split(":")
return datetime.datetime(year=int(year)+2000, month=int(month), day=int(day),
hour=int(hour), minute=int(minutes))
except Exception as ex:
raise Exception("Failed to parse {}, cause {}/{}".format(dt_str, type(ex), ex))
_FIELDS = {
"snapshot" : {
"_DESCRIPTION_FIELD" : ' PRIMARY DESCRIPTION',
"_X_FIELD" : 'X COORDINATE',
"_Y_FIELD" : 'Y COORDINATE',
"_TIME_FIELD" : 'DATE OF OCCURRENCE',
"_GEOJSON_LOOKUP" : {"case": 'CASE#',
"address": "BLOCK",
"location": ' LOCATION DESCRIPTION',
"crime": ' PRIMARY DESCRIPTION',
"type": ' SECONDARY DESCRIPTION',
"timestamp": 'DATE OF OCCURRENCE'},
"GEOJSON_COORDS" : ('LONGITUDE', 'LATITUDE'),
"DT_CONVERT" : _date_from_csv
},
"all" : {
"_DESCRIPTION_FIELD" : 'Primary Type',
"_X_FIELD" : 'X Coordinate',
"_Y_FIELD" : 'Y Coordinate',
"_TIME_FIELD" : 'Date',
"_GEOJSON_LOOKUP" : {"case": 'Case Number',
"address": "Block",
"location": 'Location Description',
"crime": 'Primary Type',
"type": 'Description',
"timestamp": 'Date'},
"GEOJSON_COORDS" : ('Longitude', 'Latitude'),
"DT_CONVERT" : _date_from_csv
},
"gen" : {
"_DESCRIPTION_FIELD" : 'CRIME',
"_X_FIELD" : 'X',
"_Y_FIELD" : 'Y',
"_TIME_FIELD" : 'TIMESTAMP',
"_GEOJSON_LOOKUP" : {"case": 'CASE',
"address": "BLOCK",
"location": 'LOCATION',
"crime": 'CRIME',
"type": 'SUB-TYPE',
"timestamp": 'TIMESTAMP'},
"GEOJSON_COORDS" : ('X', 'Y'),
"DT_CONVERT" : _date_from_csv
}
}
_FIELDS["all_other"] = dict(_FIELDS["all"])
_FIELDS["all_other"]["DT_CONVERT"] = _date_from_other
def _convert_header(header, dic):
lookup = dict()
for field in [dic["_DESCRIPTION_FIELD"], dic["_X_FIELD"], dic["_Y_FIELD"], dic["_TIME_FIELD"]]:
if not field in header:
raise Exception("No field '{}' found in header".format(field))
lookup[field] = header.index(field)
return lookup
def default_burglary_data():
"""Load the default data, if available, giving just "THEFT" data.
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
try:
return load(get_default_filename(), {"THEFT"})
except Exception:
return None
def _get_dic(type):
try:
return _FIELDS[type]
except KeyError:
raise ValueError("Don't understand type {}".format(type))
def _load_to_list(file, dic, primary_description_names):
reader = _csv.reader(file)
lookup = _convert_header(next(reader), dic)
dt_convert = dic["DT_CONVERT"]
data = []
for row in reader:
description = row[lookup[dic["_DESCRIPTION_FIELD"]]].strip()
if not description in primary_description_names:
continue
x = row[lookup[dic["_X_FIELD"]]].strip()
y = row[lookup[dic["_Y_FIELD"]]].strip()
t = row[lookup[dic["_TIME_FIELD"]]].strip()
if x != "" and y != "":
data.append((dt_convert(t), float(x), float(y)))
return data
def load(file, primary_description_names, to_meters=True, type="snapshot"):
"""Load data from a CSV file in the expected format.
:param file: Name of the CSV file load, or a file-like object.
:param primary_description_names: Set of names to search for in the
"primary description field". E.g. pass `{"THEFT"}` to return only the
"theft" crime type.
:param to_meters: Convert the coordinates to meters; True by default.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: An instance of :class:`open_cp.data.TimedPoints` or `None`.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as file:
data = _load_to_list(file, dic, primary_description_names)
else:
data = _load_to_list(file, dic, primary_description_names)
data.sort(key = lambda triple : triple[0])
xcoords = _np.empty(len(data))
ycoords = _np.empty(len(data))
for i, (_, x, y) in enumerate(data):
xcoords[i], ycoords[i] = x, y
times = [t for t, _, _ in data]
if to_meters:
xcoords /= _FEET_IN_METERS
ycoords /= _FEET_IN_METERS
return TimedPoints.from_coords(times, xcoords, ycoords)
def _convert_header_for_geojson(header, dic):
try:
column_lookup = {}
for key, col_head in dic["_GEOJSON_LOOKUP"].items():
column_lookup[key] = header.index(col_head)
coord_lookup = [header.index(chead) for chead in dic["GEOJSON_COORDS"]]
return column_lookup, coord_lookup
except KeyError as ex:
raise ValueError("Header not in expected format: {} caused by {}/{}".format(
header, type(ex), ex))
def _generate_GeoJSON_Features(file, dic):
dt_convert = dic["DT_CONVERT"]
reader = _csv.reader(file)
column_lookup, coord_lookup = _convert_header_for_geojson(next(reader), dic)
for row in reader:
properties = {key : row[i] for key, i in column_lookup.items()}
properties["timestamp"] = dt_convert(properties["timestamp"]).isoformat()
if row[coord_lookup[0]] == "":
geometry = None
else:
coordinates = [float(row[i]) for i in coord_lookup]
geometry = {"type":"Point", "coordinates":coordinates}
yield {"geometry": geometry, "properties": properties,
"type": "Feature"}
def generate_GeoJSON_Features(file, type="snapshot"):
"""Generate a sequence of GeoJSON "features" from the CSV file.
See :func:`load_to_GeoJSON`.
:param file: Either a filename, or a file object.
"""
dic = _get_dic(type)
if isinstance(file, str):
with open(file) as f:
yield from _generate_GeoJSON_Features(f, dic)
else:
yield from _generate_GeoJSON_Features(file, dic)
def load_to_GeoJSON(filename, type="snapshot"):
"""Load the specified CSV file to a list of GeoJSON (see
http://geojson.org/) features. Events with no location data have `None`
as the geometry. Timestamps are converted to standard ISO string format.
The returned "properties" have these keys:
- "case" for the "CASE#" field
- "crime" for the "PRIMARY DESCRIPTION" field
- "type" for the "SECONDARY DESCRIPTION" field
- "location" for the "LOCATION DESCRIPTION" field
- "timestamp" for the "DATE OF OCCURRENCE" field
- "address" for the "BLOCK" field
:param filename: Filename of the CSV file to process
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:return: List of Python dictionaries in GeoJSON format.
"""
return list(generate_GeoJSON_Features(filename, type))
try:
import geopandas as gpd
import shapely.geometry as _geometry
except:
gpd = None
_geometry = None
def convert_null_geometry_to_empty(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to a Point type geometry which is empty. The
returned geoDateFrame is suitable for projecting and other geometrical
transformations.
"""
def null_to_point(x):
if x is None or x.is_empty:
return _geometry.Point()
return x
newgeo = frame.geometry.map(null_to_point)
return frame.set_geometry(newgeo)
def convert_null_geometry_to_none(frame):
"""Utility method. Convert any geometry in the geoDataFrame which is
"null" (`None` or empty) to `None`. The returned geoDateFrame is suitable
for saving.
"""
def null_to_none(x):
if x is None or x.is_empty:
return None
return x
newgeo = frame.geometry.map(null_to_none)
return frame.set_geometry(newgeo)
def load_to_geoDataFrame(filename, datetime_as_string=True,
type="snapshot", empty_geometry="none"):
"""Return the same data as :func:`load_to_GeoJSON` but as a geoPandas
data-frame.
:param filename: Filename of the CSV file to process
:param datetime_as_string: Write the timestamp as an ISO formatted string.
Defaults to True which is best for saving the dataframe as e.g. a shape
file. Set to False to get timestamps as python objects, which is best
for using (geo)pandas to analyse the data.
:param type: Either "snapshot" or "all" depending on whether the data
has headers conforming the the data "last year" or "2001 to present".
:param empty_geometry: Either "none" to return `None` as the geometry of
crimes which have no location data in the CSV file (this is correct if
you wish to save the data-frame); or "empty" to return an empty `Point`
type (which is correct, for example, if you wish to re-project the
data-frame). Yes, GeoPandas appears to be annoying like this.
"""
geo_data = load_to_GeoJSON(filename, type=type)
if not datetime_as_string:
for feature in geo_data:
feature["properties"]["timestamp"] = _date_from_iso(feature["properties"]["timestamp"])
frame = gpd.GeoDataFrame.from_features(geo_data)
if empty_geometry == "none":
pass
elif empty_geometry == "empty":
frame = convert_null_geometry_to_empty(frame)
else:
raise ValueError("Unknown `empty_geometry` parameter `{}`".format(empty_geometry))
frame.crs = {"init":"epsg:4326"}
return frame
_sides = None
def _load_sides():
global _sides
if _sides is not None:
return
global _datadir
geojson = _path.join(_datadir, "Chicago_Areas.geojson")
frame = gpd.read_file(geojson)
side_mapping = {
"Far North" : [1,2,3,4,9,10,11,12,13,14,76,77],
"Northwest" : [15,16,17,18,19,20],
"North" : [5,6,7,21,22],
"West" : list(range(23, 32)),
"Central" : [8,32,33],
"South" : list(range(34,44)) + [60, 69],
"Southwest" : [56,57,58,59] + list(range(61,69)),
"Far Southwest" : list(range(70,76)),
"Far Southeast" : list(range(44,56))
}
frame["side"] = frame.area_numbe.map(lambda x : next(key
for key, item in side_mapping.items() if int(x) in item) )
_sides = frame.drop(["area", "area_num_1", "comarea", "comarea_id",
"perimeter", "shape_area", "shape_len"], axis=1)
_sides.crs = {"init": "epsg:4326"}
_sides = _sides.to_crs({"init": "epsg:2790"})
def get_side(name):
"""Return a geometry (a polygon, typically) of the outline of the shape
of the given "side" of Chicago, projected to {"init":"epsg:2790"}, which
is Illinois in metres.
Needs the file "Chicago_Areas.geojson" to be in the "datadir". This can
be downloaded from:
https://data.cityofchicago.org/Facilities-Geographic-Boundaries/Boundaries-Community-Areas-current-/cauq-8yn6
:param name: One of "Far North", "Northwest", "North", "West", "Central",
"South", "Southwest", "Far Southwest", "Far Southeast"
"""
_load_sides()
return _sides[_sides.side == name].unary_union
| StarcoderdataPython |
3363489 | <filename>python/tests/core/conftest.py
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
import os
import random
import tempfile
import time
from iceberg.api import Files, PartitionSpec, Schema
from iceberg.api.types import BooleanType, LongType, NestedField
from iceberg.core import (BaseSnapshot,
BaseTable,
ConfigProperties,
GenericManifestFile,
SnapshotLogEntry,
TableMetadata,
TableMetadataParser,
TableOperations)
from iceberg.exceptions import AlreadyExistsException, CommitFailedException
import pytest
SCHEMA = Schema([NestedField.optional(1, "b", BooleanType.get())])
METADATA = dict()
VERSIONS = dict()
class LocalTableOperations(TableOperations):
def current(self):
raise RuntimeError("Not implemented for tests")
def refresh(self):
raise RuntimeError("Not implemented for tests")
def commit(self, base, metadata):
raise RuntimeError("Not implemented for tests")
def new_input_file(self, path):
return Files.local_input(path)
def new_metadata_file(self, filename):
return Files.local_output(tempfile.mkstemp(prefix=filename))
def delete_file(self, path):
if os.path.exists(path):
os.remove(path)
def new_snapshot_id(self):
raise RuntimeError("Not implemented for tests")
def create(temp, name, schema, spec):
ops = TestTableOperations(name, temp)
if ops.current() is not None:
raise AlreadyExistsException("Table %s already exists at location: %s" % (name, temp))
ops.commit(None, TableMetadata.new_table_metadata(ops, schema, spec, str(temp)))
return TestTable(ops, name)
def begin_create(temp, name, schema, spec):
raise RuntimeError("Not yet implemented")
# ops = TestTableOperations(name, temp)
# if ops.current() is None:
# raise AlreadyExistsException("Table %s already exists at location: %s" % (name, temp))
#
# metadata = TableMetadata.new_table_metadata(ops, schema, spec, str(temp))
# return BaseTransaction.create_table_transaction(ops, metadata)
class TestTable(BaseTable):
def __init__(self, ops, name):
super(TestTable, self).__init__(ops, name)
self.ops = ops
class TestTableOperations(TableOperations):
def __init__(self, table_name, location):
self.last_snapshot_id = 0
self._fail_commits = 0
self.table_name = table_name
self.metadata = os.path.join(location, "metadata")
os.makedirs(self.metadata)
self._current = None
self.refresh()
if self._current is not None:
for snap in self.current.snapshots:
self.last_snapshot_id = max(self.last_snapshot_id, snap.snapshot_id)
def current(self):
return self._current
def refresh(self):
self._current = METADATA.get(self.table_name)
return self._current
def commit(self, base, metadata):
if base != self.current():
raise RuntimeError("Cannot commit changes based on stale metadata")
self.refresh()
if base == self.current:
if self._fail_commits > 0:
self._fail_commits - 1
raise RuntimeError("Injected failure")
version = VERSIONS.get(self.table_name)
VERSIONS[self.table_name] = 0 if version is None else version + 1
METADATA[self.table_name] = metadata
self._current = metadata
else:
raise CommitFailedException("Commit failed: table was updated at %s", self.current.last_updated_millis)
def new_input_file(self, path):
return Files.local_input(path)
def new_metadata_file(self, filename):
return Files.local_output(os.path.join(self.metadata, filename))
def delete_file(self, path):
if not os.remove(path):
raise RuntimeError("Failed to delete file: %s" % path)
def new_snapshot_id(self):
next_snapshot_id = self.last_snapshot_id + 1
self.last_snapshot_id = next_snapshot_id
return next_snapshot_id
@pytest.fixture(scope="session")
def expected():
return TableMetadata.new_table_metadata(None, SCHEMA, PartitionSpec.unpartitioned(), "file://tmp/db/table")
@pytest.fixture(scope="session",
params=[True, False])
def prop(request):
config = {ConfigProperties.COMPRESS_METADATA: request.param}
yield request.param
if os.path.exists(TableMetadataParser.get_file_extension(config)):
os.remove(TableMetadataParser.get_file_extension(config))
@pytest.fixture(scope="session")
def ops():
return LocalTableOperations()
@pytest.fixture(scope="session")
def expected_metadata():
spec_schema = Schema(NestedField.required(1, "x", LongType.get()),
NestedField.required(2, "y", LongType.get()),
NestedField.required(3, "z", LongType.get()))
spec = PartitionSpec \
.builder_for(spec_schema) \
.with_spec_id(5) \
.build()
random.seed(1234)
previous_snapshot_id = int(time.time()) - random.randint(0, 3600)
previous_snapshot = BaseSnapshot(None, previous_snapshot_id, None,
timestamp_millis=previous_snapshot_id,
manifests=[GenericManifestFile(file=Files.local_input("file:/tmp/manfiest.1.avro"),
spec_id=spec.spec_id)])
current_snapshot_id = int(time.time())
current_snapshot = BaseSnapshot(None, current_snapshot_id, previous_snapshot_id,
timestamp_millis=current_snapshot_id,
manifests=[GenericManifestFile(file=Files.local_input("file:/tmp/manfiest.2.avro"),
spec_id=spec.spec_id)])
snapshot_log = [SnapshotLogEntry(previous_snapshot.timestamp_millis, previous_snapshot.snapshot_id),
SnapshotLogEntry(current_snapshot.timestamp_millis, current_snapshot.snapshot_id)]
return TableMetadata(ops, None, "s3://bucket/test/location",
int(time.time()), 3, spec_schema, 5, [spec], {"property": "value"}, current_snapshot_id,
[previous_snapshot, current_snapshot], snapshot_log)
@pytest.fixture(scope="session")
def expected_metadata_sorting():
spec_schema = Schema(NestedField.required(1, "x", LongType.get()),
NestedField.required(2, "y", LongType.get()),
NestedField.required(3, "z", LongType.get()))
spec = PartitionSpec \
.builder_for(spec_schema) \
.with_spec_id(5) \
.build()
random.seed(1234)
previous_snapshot_id = int(time.time()) - random.randint(0, 3600)
previous_snapshot = BaseSnapshot(ops, previous_snapshot_id, None,
timestamp_millis=previous_snapshot_id,
manifests=[GenericManifestFile(file=Files.local_input("file:/tmp/manfiest.1.avro"),
spec_id=spec.spec_id)])
current_snapshot_id = int(time.time())
current_snapshot = BaseSnapshot(ops, current_snapshot_id, previous_snapshot_id,
timestamp_millis=current_snapshot_id,
manifests=[GenericManifestFile(file=Files.local_input("file:/tmp/manfiest.2.avro"),
spec_id=spec.spec_id)])
reversed_snapshot_log = list()
metadata = TableMetadata(ops, None, "s3://bucket/test/location",
int(time.time()), 3, spec_schema, 5, [spec], {"property": "value"}, current_snapshot_id,
[previous_snapshot, current_snapshot], reversed_snapshot_log)
reversed_snapshot_log.append(SnapshotLogEntry(current_snapshot.timestamp_millis, current_snapshot.snapshot_id))
reversed_snapshot_log.append(SnapshotLogEntry(previous_snapshot.timestamp_millis, previous_snapshot.snapshot_id))
return metadata
@pytest.fixture(scope="session")
def missing_spec_list():
schema = Schema(NestedField.required(1, "x", LongType.get()),
NestedField.required(2, "y", LongType.get()),
NestedField.required(3, "z", LongType.get()))
spec = PartitionSpec.builder_for(schema).identity("x").with_spec_id(6).build()
random.seed(1234)
previous_snapshot_id = int(time.time()) - random.randint(0, 3600)
previous_snapshot = BaseSnapshot(ops, previous_snapshot_id, None,
timestamp_millis=previous_snapshot_id,
manifests=[GenericManifestFile(file=Files.local_input("file:/tmp/manfiest.1.avro"),
spec_id=spec.spec_id)])
current_snapshot_id = int(time.time())
current_snapshot = BaseSnapshot(ops, current_snapshot_id, previous_snapshot_id,
timestamp_millis=current_snapshot_id,
manifests=[GenericManifestFile(file=Files.local_input("file:/tmp/manfiest.2.avro"),
spec_id=spec.spec_id)])
return TableMetadata(ops, None, "s3://bucket/test/location", int(time.time()), 3, schema, 6,
(spec,), {"property": "value"}, current_snapshot_id, [previous_snapshot, current_snapshot],
[])
@pytest.fixture(scope="session")
def expected_base_snapshot():
return BaseSnapshot(LocalTableOperations(), int(time.time()), manifests=["file:/tmp/manfiest.1.avro",
"file:/tmp/manfiest.2.avro"])
| StarcoderdataPython |
11283335 | <filename>bitmovin_api_sdk/encoding/encodings/muxings/text/customdata/__init__.py
from bitmovin_api_sdk.encoding.encodings.muxings.text.customdata.customdata_api import CustomdataApi
| StarcoderdataPython |
11214311 | <filename>Python/benchmarking/req_test_creator.py<gh_stars>0
import os
os.chdir("..")
from release_creator import build_utility_application
"""
Create the ReqTest application.
"""
build_utility_application("Windows", "ReqTest", "Assets/TDWTest/ReqTest.unity", "TEST")
| StarcoderdataPython |
8020408 | """Add UserParticipation table
Revision ID: 017a0dd30585
Revises: <PASSWORD>
Create Date: 2018-10-14 11:25:03.460864
"""
# revision identifiers, used by Alembic.
import sqlalchemy as sa
import transaction
from alembic import op
from dbas.database import DBDiscussionSession
revision = '017a0dd30585'
down_revision = '<KEY>8'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_table('user_participation',
sa.Column('user_uid', sa.Integer(), nullable=False),
sa.Column('issue_uid', sa.Integer(), nullable=False),
sa.ForeignKeyConstraint(['issue_uid'], ['issues.uid'], ),
sa.ForeignKeyConstraint(['user_uid'], ['users.uid'], ),
sa.PrimaryKeyConstraint('user_uid', 'issue_uid')
)
# ### end Alembic commands ###
DBDiscussionSession.remove()
DBDiscussionSession.configure(bind=op.get_bind())
issue_query = "SELECT issues.uid, issues.slug FROM discussion.public.issues"
slug2uid = {slug: uid for (uid, slug) in DBDiscussionSession.execute(issue_query).fetchall()}
history_query = "SELECT history.author_uid, history.path FROM discussion.public.history;"
historys = DBDiscussionSession.execute(history_query).fetchall()
insert_query = "INSERT INTO discussion.public.user_participation (user_uid, issue_uid) VALUES (?, ?) ON CONFLICT DO NOTHING;"
for author_uid, path in historys:
# /discuss/<slug>/ ...
slug = path.split('/', 2)[1]
# in case that the line above gets something weird (path IS sometimes malformed)
issue_id = slug2uid.get(slug)
if issue_id:
DBDiscussionSession.execute(insert_query, author_uid, issue_id)
transaction.commit()
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('user_participation')
# ### end Alembic commands ###
| StarcoderdataPython |
11393417 | from services.yfinance import get_price
from .optimizer import get_ebitda_df
from .heatmap import esg_data_df, merged_esg_scores
import pandas as pd
def get_esg_score(request):
data = request.get_json()
ticker = data["ticker"]
esg_df = esg_data_df()
single_esg = esg_df.loc[ticker, :]
companies_df = pd.read_csv("./data/companies_br.csv").set_index("company_id")
single_company = companies_df.set_index("ticker").loc[ticker]
single_esg = single_esg.to_frame().T.reset_index().rename(columns={"index": "ticker"})
single_company = single_company.to_frame().T.reset_index().rename(columns={"index": "ticker"})
single_esg = single_esg.merge(single_company, how="inner", on="ticker")
return single_esg
def get_historical_price(request):
data = request.get_json()
ticker = data["ticker"]
df = get_price(ticker+".SA")
df.iloc[:, 1:] = df.iloc[:, 1:].astype(float)
df["DATE"] = pd.to_datetime(df["DATE"]).dt.strftime("%Y-%m-%d")
return df
def get_ebitda_growth(request):
data = request.get_json()
ticker = data["ticker"]
ebitda_df = get_ebitda_df()
single_stock_ebtida = ebitda_df.loc[ticker, :]
return single_stock_ebtida.reset_index().rename(columns={"index": "DATE"})
def get_esg_growth(request):
data = request.get_json()
ticker = data["ticker"]
esg_df = merged_esg_scores(None)
esg_individual_scores = esg_df[(esg_df["parent_aspect"] == "S&P Global ESG Score") | (esg_df["aspect"] == "S&P Global ESG Score")]
esg_individual_scores = esg_individual_scores[esg_individual_scores["ticker"] == ticker]
pivoted_df = esg_individual_scores.pivot_table(index='ticker', columns=['aspect', 'assessment_year'], values='score_value')
e = ["Environmental Dimension", "Environmental"]
s = ["Social Dimension", "Social"]
g = ["Governance & Economic Dimension", "Governance"]
score = ["S&P Global ESG Score", "ESG Score"]
dfs = []
dimensions=[e, s, g, score]
for dimension in dimensions:
dimension_df = pivoted_df[dimension[0]]
dimension_df["metric"] = dimension[1]
dfs.append(dimension_df)
all_portfolios = pd.concat(dfs)
return all_portfolios.reset_index()
| StarcoderdataPython |
427 | # Generated by Django 3.1 on 2020-09-08 07:43
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='OpeningSystem',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
],
),
migrations.CreateModel(
name='Opening',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(max_length=40)),
('eco', models.CharField(max_length=3)),
('moves', models.TextField()),
('opening_system', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.openingsystem')),
],
),
migrations.CreateModel(
name='Game',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('elo_mean', models.IntegerField(default=0)),
('elo_diff', models.IntegerField(default=0)),
('result', models.CharField(max_length=40)),
('timecontrol', models.CharField(max_length=40)),
('timestamp', models.DateTimeField()),
('raw', models.TextField()),
('opening', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.opening')),
],
),
migrations.CreateModel(
name='Analyse',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('turnover_move', models.IntegerField(default=0)),
('turnover_evaluation', models.IntegerField(default=0)),
('unbalance_material', models.IntegerField(default=0)),
('unbalance_officers', models.IntegerField(default=0)),
('unbalance_exchange', models.IntegerField(default=0)),
('game', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='insight.game')),
],
),
]
| StarcoderdataPython |
11273547 | <gh_stars>1-10
import pytest
import fair
from fair.RCPs import rcp3pd, rcp45, rcp6, rcp85, rcp26, rcp60
import numpy as np
import os
from fair.constants import molwt, radeff, lifetime
from fair.tools.constrain import hist_temp
from fair.tools.gwp import gwp
def test_ten_GtC_pulse():
emissions = np.zeros(250)
emissions[125:] = 10.0
other_rf = np.zeros(emissions.size)
for x in range(0,emissions.size):
other_rf[x] = 0.5*np.sin(2*np.pi*(x)/14.0)
C,F,T = fair.forward.fair_scm(
emissions=emissions, other_rf=other_rf, useMultigas=False,
r0=32.4, tcr_dbl=70)
datadir = os.path.join(os.path.dirname(__file__), 'ten_GtC_pulse/')
C_expected = np.load(datadir + 'C.npy')
F_expected = np.load(datadir + 'F.npy')
T_expected = np.load(datadir + 'T.npy')
assert np.allclose(C, C_expected)
assert np.allclose(F, F_expected)
assert np.allclose(T, T_expected)
def test_multigas_fullemissions_error():
with pytest.raises(ValueError):
fair.forward.fair_scm(emissions=rcp3pd.Emissions.emissions,
useMultigas=False)
# There must be a good way to avoid duplication here
def test_rcp3pd():
C,F,T = fair.forward.fair_scm(
emissions=rcp3pd.Emissions.emissions,
b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),
efficacy=np.ones(13)
)
datadir = os.path.join(os.path.dirname(__file__), 'rcp3pd/')
C_expected = np.load(datadir + 'C.npy')
F_expected = np.load(datadir + 'F.npy')
T_expected = np.load(datadir + 'T.npy')
assert np.allclose(C, C_expected)
assert np.allclose(F, F_expected)
assert np.allclose(T, T_expected)
def test_rcp45():
C,F,T = fair.forward.fair_scm(
emissions=rcp45.Emissions.emissions,
b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),
efficacy=np.ones(13)
)
datadir = os.path.join(os.path.dirname(__file__), 'rcp45/')
C_expected = np.load(datadir + 'C.npy')
F_expected = np.load(datadir + 'F.npy')
T_expected = np.load(datadir + 'T.npy')
assert np.allclose(C, C_expected)
assert np.allclose(F, F_expected)
assert np.allclose(T, T_expected)
def test_rcp6():
C,F,T = fair.forward.fair_scm(
emissions=rcp6.Emissions.emissions,
b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),
efficacy=np.ones(13)
)
datadir = os.path.join(os.path.dirname(__file__), 'rcp6/')
C_expected = np.load(datadir + 'C.npy')
F_expected = np.load(datadir + 'F.npy')
T_expected = np.load(datadir + 'T.npy')
assert np.allclose(C, C_expected)
assert np.allclose(F, F_expected)
assert np.allclose(T, T_expected)
def test_rcp85():
C,F,T = fair.forward.fair_scm(
emissions=rcp85.Emissions.emissions,
b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),
efficacy=np.ones(13)
)
datadir = os.path.join(os.path.dirname(__file__), 'rcp85/')
C_expected = np.load(datadir + 'C.npy')
F_expected = np.load(datadir + 'F.npy')
T_expected = np.load(datadir + 'T.npy')
assert np.allclose(C, C_expected)
assert np.allclose(F, F_expected)
assert np.allclose(T, T_expected)
# rcp3pd and rcp6 have been renamed. The modules should still work otherwise
# the tests would not have got to this point. But we import directly here to
# ensure compatibility.
def test_rcp_aliases():
# 1. rcp26
C,F,T = fair.forward.fair_scm(
emissions=rcp26.Emissions.emissions,
b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),
efficacy=np.ones(13)
)
datadir = os.path.join(os.path.dirname(__file__), 'rcp3pd/')
C_expected = np.load(datadir + 'C.npy')
F_expected = np.load(datadir + 'F.npy')
T_expected = np.load(datadir + 'T.npy')
assert np.allclose(C, C_expected)
assert np.allclose(F, F_expected)
assert np.allclose(T, T_expected)
# 2. rcp60
C,F,T = fair.forward.fair_scm(
emissions=rcp60.Emissions.emissions,
b_aero = np.array([-35.29e-4*1.3741*molwt.SO2/molwt.S, 0.0, -5.034e-4*1.3741, -5.763e-4*1.3741*molwt.NO/molwt.N, 453e-4*1.3741,-37.83e-4*1.3741, -10.35e-4*1.3741]),
efficacy=np.ones(13)
)
datadir = os.path.join(os.path.dirname(__file__), 'rcp6/')
C_expected = np.load(datadir + 'C.npy')
F_expected = np.load(datadir + 'F.npy')
T_expected = np.load(datadir + 'T.npy')
assert np.allclose(C, C_expected)
assert np.allclose(F, F_expected)
assert np.allclose(T, T_expected)
def test_co2_concentration_driven():
C, F, T = fair.forward.fair_scm(
emissions_driven=False,
C=rcp45.Concentrations.co2,
useMultigas=False
)
assert (C==rcp45.Concentrations.co2).all()
datadir = os.path.join(os.path.dirname(__file__), 'rcp45/')
T_expected = np.load(datadir + 'T_concdriven.npy')
assert np.allclose(T, T_expected)
def test_multigas_concentration_driven():
C, F, T = fair.forward.fair_scm(
emissions_driven=False,
C=rcp45.Concentrations.gases,
F_tropO3 = rcp45.Forcing.tropo3,
F_aerosol = rcp45.Forcing.aero+rcp45.Forcing.cloud,
F_bcsnow = rcp45.Forcing.bcsnow,
useMultigas=True
)
datadir = os.path.join(os.path.dirname(__file__), 'rcp45/')
T_expected = np.load(datadir + 'T_concdriven_multi.npy')
assert np.allclose(T, T_expected)
def test_inverse_fair():
"""Tests reproducibility of concentrations-to-emissions FaIR."""
# initialise a 1% run
nt = 140
C = 1.01**np.arange(nt)*278.
E,F,T = fair.inverse.inverse_fair_scm(C=C, tcrecs=np.array([1.7, 3.0]))
datadir = os.path.join(os.path.dirname(__file__), '1pctCO2/')
E_expected = np.load(datadir + 'E.npy')
F_expected = np.load(datadir + 'F.npy')
T_expected = np.load(datadir + 'T.npy')
assert np.allclose(E, E_expected)
assert np.allclose(F, F_expected)
assert np.allclose(T, T_expected)
def test_forward_versus_reverse():
"""Does inverse FaIR recover the same emissions as forward FaIR?
Both methods require numerical root finding methods so exact correspondence
is quite unlikely, so accept a small tolerance"""
E_forward = rcp85.Emissions.co2
other_rf = np.sin(np.arange(736)) * 0.2
C_forward, F_forward, T_forward = fair.forward.fair_scm(emissions=E_forward, other_rf=other_rf, useMultigas=False)
E_inverse, F_inverse, T_inverse = fair.inverse.inverse_fair_scm(C=C_forward, other_rf=other_rf)
assert np.allclose(E_forward, E_inverse, atol=0.01, rtol=0.01)
assert np.allclose(F_forward, F_inverse, atol=0.01, rtol=0.01)
assert np.allclose(T_forward, T_inverse, atol=0.01, rtol=0.01)
def test_restart_co2_continuous():
"""Tests to check that a CO2-only run with a restart produces the same
results as a CO2-only run without a restart."""
C, F, T = fair.forward.fair_scm(
emissions = rcp45.Emissions.co2[:20],
useMultigas = False
)
C1, F1, T1, restart = fair.forward.fair_scm(
emissions = rcp45.Emissions.co2[:10],
useMultigas = False,
restart_out = True
)
C2, F2, T2 = fair.forward.fair_scm(
emissions = rcp45.Emissions.co2[10:20],
useMultigas = False,
restart_in = restart
)
assert np.all(C == np.concatenate((C1, C2)))
assert np.all(F == np.concatenate((F1, F2)))
assert np.all(T == np.concatenate((T1, T2)))
def test_inverse_restart():
"""Tests restarts for inverse FaIR."""
E, F, T = fair.inverse.inverse_fair_scm(
C = rcp85.Concentrations.co2[:20])
E1, F1, T1, restart = fair.inverse.inverse_fair_scm(
C = rcp85.Concentrations.co2[:10], restart_out=True)
E2, F2, T2 = fair.inverse.inverse_fair_scm(
C = rcp85.Concentrations.co2[10:20], restart_in=restart)
assert np.all(E == np.concatenate((E1, E2)))
assert np.all(F == np.concatenate((F1, F2)))
assert np.all(T == np.concatenate((T1, T2)))
def test_constrain():
"""Checks that the historical temperature constraining function works"""
datadir = os.path.join(os.path.dirname(__file__),
'../../fair/tools/tempobs/')
tempobsdata = np.loadtxt(datadir+'had4_krig_annual_v2_0_0.csv')
years = tempobsdata[:,0]
tempobs = tempobsdata[:,1]
C,F,T = fair.forward.fair_scm(emissions=rcp45.Emissions.emissions)
accept1,sm1,im1,so1,io1 = hist_temp(tempobs, T[85:252], years)
assert accept1==True
accept2,sm2,im2,so2,io2 = hist_temp(tempobs, T[85:252], years,
inflate=False)
assert sm1==sm2
assert so1==so2
assert accept2==True
accept3,_,_,_,_ = hist_temp(tempobs, np.zeros(167), years)
assert accept3==False
def test_gwp():
"""Checks that GWP calculator produces correct GWPs."""
# methane uses "perturbation lifetime" for GWP calculations and feedback
# factor
assert np.round(gwp(100, 12.4, radeff.CH4, molwt.CH4, f=0.65))==28
# for N2O, I think the IPCC AR5 value is out by one year. Most likely
# explanation is that they have rounded off the feedback somewhere.
# This is calculated as 1-(1-0.36*(1.65)*radeff.CH4/radeff.N2O). See
# eq. 8.SM.20 in the supplement to Chapter 8, AR5
assert np.round(
gwp(20, lifetime.N2O, radeff.N2O, molwt.N2O, f=-0.071874))==263
assert np.round(
gwp(100, lifetime.N2O, radeff.N2O, molwt.N2O, f=-0.071874))==264
# Now check a nice straightforward example
assert np.round(
gwp(100, lifetime.CFC11, radeff.CFC11, molwt.CFC11), decimals=-1)==4660
| StarcoderdataPython |
11863 | __author__ = '<NAME> - www.tonybeltramelli.com'
# scripted agents taken from PySC2, credits to DeepMind
# https://github.com/deepmind/pysc2/blob/master/pysc2/agents/scripted_agent.py
import numpy as np
import uuid
from pysc2.agents import base_agent
from pysc2.lib import actions
from pysc2.lib import features
_SCREEN_PLAYER_RELATIVE = features.SCREEN_FEATURES.player_relative.index
_SCREEN_SELECTED = features.SCREEN_FEATURES.selected.index
_PLAYER_FRIENDLY = 1
_PLAYER_NEUTRAL = 3
_PLAYER_HOSTILE = 4
_NO_OP = actions.FUNCTIONS.no_op.id
_MOVE_SCREEN = actions.FUNCTIONS.Move_screen.id
_ATTACK_SCREEN = actions.FUNCTIONS.Attack_screen.id
_SELECT_ARMY = actions.FUNCTIONS.select_army.id
_NOT_QUEUED = [0]
_SELECT_ALL = [0]
class ScriptedAgent(base_agent.BaseAgent):
def step(self, obs):
super(ScriptedAgent, self).step(obs)
# we expand dims because keras wants 4 dims for convolutions
# observation = np.expand_dims(obs.observation["screen"][_SCREEN_PLAYER_RELATIVE], axis=3)
screens = [obs.observation["screen"][_SCREEN_PLAYER_RELATIVE],
obs.observation["screen"][_SCREEN_SELECTED]]
observation = np.stack(screens, axis=2)
if self.game == "beacon":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
if not neutral_y.any():
action = _NO_OP
params = []
else:
target = [int(neutral_x.mean()), int(neutral_y.mean())]
action = _MOVE_SCREEN
params = [[0], target]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "mineral":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
player_y, player_x = (player_relative == 1).nonzero()
if not neutral_y.any() or not player_y.any():
action = _NO_OP
params = []
else:
action = _MOVE_SCREEN
index_x = np.argmin(neutral_x)
index_y = np.argmin(neutral_y)
index = index_x if neutral_x[index_x] + neutral_y[index_x] < neutral_x[index_y] + neutral_y[index_y] else index_y
target = [neutral_x[index], neutral_y[index]]
params = [[0], target]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "minerals":
if actions.FUNCTIONS.Move_screen.id in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
neutral_y, neutral_x = (player_relative == 3).nonzero()
player_y, player_x = (player_relative == 1).nonzero()
if not neutral_y.any() or not player_y.any():
action = _NO_OP
params = []
else:
player = [int(player_x.mean()), int(player_y.mean())]
closest, min_dist = None, None
for p in zip(neutral_x, neutral_y):
dist = np.linalg.norm(np.array(player) - np.array(p))
if not min_dist or dist < min_dist:
closest, min_dist = p, dist
action = _MOVE_SCREEN
params = [[0], closest]
else:
action = _SELECT_ARMY
params = [[0]]
elif self.game == "roaches":
if _ATTACK_SCREEN in obs.observation["available_actions"]:
player_relative = obs.observation["screen"][_SCREEN_PLAYER_RELATIVE]
roach_y, roach_x = (player_relative == _PLAYER_HOSTILE).nonzero()
if not roach_y.any():
action = _NO_OP
params = [_NOT_QUEUED]
else:
index = np.argmax(roach_y)
target = [roach_x[index], roach_y[index]]
action = _ATTACK_SCREEN
params = [_NOT_QUEUED, target]
elif _SELECT_ARMY in obs.observation["available_actions"]:
action = _SELECT_ARMY
params = [_SELECT_ALL]
else:
action = _NO_OP
params = [_NOT_QUEUED]
self.states.append(np.array([observation, obs.observation["available_actions"], action, params]))
if len(self.states) == 64:
new_file_name = str(uuid.uuid1())
np.save("dataset_{}/{}".format(self.game, new_file_name), np.array(self.states))
self.states = []
return actions.FunctionCall(action, params)
class AgentRoaches(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "roaches"
self.states = []
class AgentBeacon(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "beacon"
self.states = []
class AgentMineral(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "mineral"
self.states = []
class AgentMinerals(ScriptedAgent):
def __init__(self):
base_agent.BaseAgent.__init__(self)
self.game = "minerals"
self.states = []
| StarcoderdataPython |
5182738 | def gc_content(seq):
if not seq:
return 0
gc_cnt = total_chars = 0
for a in seq:
if a in 'GC':
gc_cnt += 1
total_chars += 1
return round(100.0 * gc_cnt / total_chars, 2)
| StarcoderdataPython |
6574373 | <gh_stars>0
BOARD_LENGTH = 5
VICTORY_STRIKE = 4
DEBUG = False | StarcoderdataPython |
1920744 | <reponame>another-s347/learning-to-communicate-pytorch<gh_stars>0
"""
DRQN-based agent that learns to communicate with other agents to play
the Switch game.
"""
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable
from pysc2.lib import features
import numpy as np
from sc2.flat_features import FLAT_FEATURES
from sc2.convgru import ConvGRU
NUM_FUNCTIONS = 2
device = torch.device(
'cuda') if torch.cuda.is_available() else torch.device('cpu')
class Sc2CNet(nn.Module):
def __init__(self, opt):
super(Sc2CNet, self).__init__()
self.opt = opt
self.comm_size = opt.game_comm_bits
self.init_param_range = (-0.08, 0.08)
self.data_format = 'NCHW'
self.ch = opt.static_shape_channels
self.res = opt.resolution
self.size2d = [opt.resolution, opt.resolution]
self.screen_embed_spatial_conv = {}
self.create_screen_embed_obs()
self.screen_input_conv = nn.Sequential(
nn.Conv2d(in_channels=52, out_channels=16,
kernel_size=7, stride=1, padding=2),
nn.BatchNorm2d(16),
nn.ReLU(),
nn.Conv2d(in_channels=16, out_channels=32,
kernel_size=3, stride=1, padding=2),
nn.BatchNorm2d(32),
nn.ReLU()
)
# self.fn_conv = nn.Conv2d(32, num_units, kernel_size=1, stride=1)
self.convgru = ConvGRU(input_size=(24, 24),
input_dim=34,
hidden_dim=[34,34],
kernel_size=(3,3),
num_layers=2,
dtype=torch.FloatTensor,
batch_first=True,
bias = True,
return_all_layers = False)
self.fc1 = nn.Sequential(
nn.Flatten(),
nn.Linear(in_features=19584, out_features=256),
nn.ReLU(),
)
self.fc2 = nn.Linear(in_features=256, out_features=1)
self.fn_non_spatial_output = nn.Sequential(
nn.Linear(256, NUM_FUNCTIONS+opt.game_comm_bits),
nn.Softmax()
)
self.world_output = nn.Sequential(
nn.Conv2d(34, 1, kernel_size=1, stride=1),
nn.Flatten(),
nn.Linear(opt.resolution * opt.resolution,
opt.resolution*opt.resolution)
)
def get_params(self):
return list(self.parameters())
def create_screen_embed_obs(self):
for s in features.SCREEN_FEATURES:
if s.type == features.FeatureType.CATEGORICAL:
dims = np.round(np.log2(s.scale)).astype(np.int32).item()
dims = max(dims, 1)
self.screen_embed_spatial_conv[s.index] = nn.Sequential(
nn.Conv2d(
in_channels=s.scale,
out_channels=dims,
kernel_size=1,
stride=1
),
nn.BatchNorm2d(dims),
nn.ReLU()
)
self.add_module(
f"screen_embed_spatial_conv:{s.name}", self.screen_embed_spatial_conv[s.index])
def screen_embed_obs(self, x):
print(x.shape)
feats = list(x.split(1, dim=-1))
out_list = []
for s in features.SCREEN_FEATURES:
f = feats[s.index]
if s.type == features.FeatureType.CATEGORICAL:
f = torch.squeeze(f, -1).type(torch.LongTensor)
indices = torch.nn.functional.one_hot(f, num_classes=s.scale)
x = self.from_nhwc(indices.type(torch.FloatTensor))
out = self.screen_embed_spatial_conv[s.index](x)
out = self.to_nhwc(out)
elif s.type == features.FeatureType.SCALAR:
out = self.log_transform(
f.type(torch.FloatTensor), s.scale)
out_list.append(out)
return torch.cat(out_list, dim=-1).to(device)
def log_transform(self, x, scale):
return torch.log(x + 1.)
def flat_embed_obs(self, x):
spec = FLAT_FEATURES
feats = list(x.split(1, dim=-1))
out_list = []
for s in spec:
f = feats[s.index]
if s.type == features.FeatureType.CATEGORICAL:
dims = np.round(np.log2(s.scale)).astype(np.int32).item()
dims = max(dims, 1)
indices = torch.nn.functional.one_hot(
torch.squeeze(f, -1), s.scale)
out = self.embed_flat_fc[s.index](indices)
elif s.type == features.FeatureType.SCALAR:
out = self.log_transform(f.type(torch.FloatTensor), s.scale)
out_list.append(out)
return torch.cat(out_list, dim=-1).to(device)
def concat2d(self, lst):
if self.data_format == 'NCHW':
return torch.cat(lst, dim=1).to(device)
return torch.cat(lst, dim=3).to(device)
def broadcast_along_channels(self, flat, size2d):
if self.data_format == 'NCHW':
return flat.unsqueeze(2).unsqueeze(3).repeat(1, 1, size2d[0], size2d[1])
return flat.unsqueeze(1).unsqueeze(2).repeat(1, size2d[0], size2d[1], 1)
def to_nhwc(self, map2d):
if self.data_format == 'NCHW':
return map2d.permute(0, 2, 3, 1)
return map2d
def from_nhwc(self, map2d):
if self.data_format == 'NCHW':
return map2d.permute(0, 3, 1, 2)
return map2d
def reset_parameters(self):
opt = self.opt
# self.messages_mlp.linear1.reset_parameters()
# self.rnn.reset_parameters()
# self.agent_lookup.reset_parameters()
# self.state_lookup.reset_parameters()
# self.prev_action_lookup.reset_parameters()
# if self.prev_message_lookup:
# self.prev_message_lookup.reset_parameters()
# if opt.comm_enabled and opt.model_dial:
# self.messages_mlp.batchnorm1.reset_parameters()
# self.outputs.linear1.reset_parameters()
# self.outputs.linear2.reset_parameters()
# for p in self.rnn.parameters():
# p.data.uniform_(*self.init_param_range)
def forward(self, s_t, messages, hidden, prev_action, agent_index):
opt = self.opt
# s_t = Variable(s_t)
# hidden = Variable(hidden)
# prev_message = None
# if opt.model_dial:
# if opt.model_action_aware:
# prev_action = Variable(prev_action)
# else:
# if opt.model_action_aware:
# prev_action, prev_message = prev_action
# prev_action = Variable(prev_action)
# prev_message = Variable(prev_message)
# messages = Variable(messages)
# agent_index = Variable(agent_index)
# z_a, z_o, z_u, z_m = [0]*4
# z_a = self.agent_lookup(agent_index)
# z_o = self.state_lookup(s_t)
# if opt.model_action_aware:
# z_u = self.prev_action_lookup(prev_action)
# if prev_message is not None:
# z_u += self.prev_message_lookup(prev_message)
# z_m = self.messages_mlp(messages.view(-1, self.comm_size))
# z = z_a + z_o + z_u + z_m
# z = z.unsqueeze(1)
# rnn_out, h_out = self.rnn(z, hidden)
# outputs = self.outputs(rnn_out[:, -1, :].squeeze())
flat_input = messages.flatten(1)
screen_input = self.to_nhwc(torch.stack([torch.from_numpy(x["feature_screen"]) for x in s_t]))
screen_emb = self.screen_embed_obs(screen_input)
flat_emb = self.flat_embed_obs(flat_input)
# screen_emb = self.layer_norm(screen_emb)
screen_out = self.screen_input_conv(self.from_nhwc(screen_emb))
broadcast_out = self.broadcast_along_channels(flat_emb, self.size2d)
state_out = self.concat2d([screen_out, broadcast_out])
# state_out = screen_out
state_out, h = self.convgru(state_out.unsqueeze(dim=1))
state_out = state_out[0].squeeze(1)
x = self.to_nhwc(state_out)
fc = self.fc1(x)
fn_out = self.fn_non_spatial_output(fc)
world_out = self.world_output(state_out)
policy = (fn_out, world_out)
h_out = h[0][0]
return h_out, policy
| StarcoderdataPython |
9731825 | <reponame>tdiprima/code<filename>recipes/Python/576823_Prints_full_name_all_occurrences_given_filename_/recipe-576823.py
"""Prints full name of all occurrences of given filename in your PATH.
Usage: findinpath.py filename"""
import os
import sys
def main():
if len(sys.argv) < 2:
print __doc__
return 2
filename = sys.argv[1]
status = 1
sep = ';' if sys.platform == 'win32' else ':'
for path in os.environ['PATH'].split(sep):
fullname = os.path.join(path, filename)
if os.path.exists(fullname):
print fullname
status = 0
return status
if __name__ == '__main__':
sys.exit(main())
| StarcoderdataPython |
151375 | <reponame>bossjones/docker-compose-prometheus<filename>contrib/grok-to-regex.py<gh_stars>0
#!/usr/bin/env python
import argparse
import re
from os import walk
from os.path import join
def get_patterns(patterns_dir):
patterns = {}
for (dirpath, _, filenames) in walk(patterns_dir):
for name in filenames:
with open(join(dirpath, name)) as f:
for line in f.readlines():
if not line.startswith('#') and not line.strip() == "":
k, v = line.split(' ', 1)
patterns[k] = v.rstrip('\n')
return patterns
def convert(expression, patterns):
groks = re.compile('%{[^}]*}')
failed_matches = set()
matches_prev_len = 0
while True:
matches = groks.findall(expression)
matches_cur_len = len(matches)
if matches_cur_len == 0 or matches_cur_len == matches_prev_len:
break
for m in matches:
inner = m.strip('%{}')
if ':' in inner:
patt, name = inner.split(':')
replacement = '(?<{}>{{}})'.format(name)
else:
patt = inner
replacement = '{}'
if not patt in list(patterns.keys()):
failed_matches.add(patt)
continue
expression = expression.replace(m, replacement.format(patterns[patt]))
matches_prev_len = matches_cur_len
print(expression)
if failed_matches:
global args
print('\nWarning! Unable to match the following expressions:')
print(' {}'.format(', '.join(failed_matches)))
print('This could be a typo or a missing grok pattern file. Double check your grok patterns directory: {}'.format(
args.patterns_dir
))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('expression', metavar='expr', help='A grok expression.')
parser.add_argument('-d', '--patterns-dir', dest='patterns_dir', default='patterns',
help='Directory to find grok patterns.')
args = parser.parse_args()
patterns = get_patterns(args.patterns_dir)
convert(args.expression, patterns) | StarcoderdataPython |
4881592 | <reponame>vitormiura/django-escola<gh_stars>0
from django.contrib import admin
from home.models import Curso, Aluno
@admin.register(Curso)
class detCurso(admin.ModelAdmin):
list_display = ('id',)
@admin.register(Aluno)
class detAluno(admin.ModelAdmin):
list_display = ('id',)
| StarcoderdataPython |
6452538 | from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status
from rest_framework import viewsets
# Token Authentication: is the type of authentication we use for users to authenticate themselves with our API.
# It works by generating a random token string when the user logs in, and then every request we make to that API
# we add this token string to the request, and that's effectively a password to check every request made is authenticated correctly.
from rest_framework.authentication import TokenAuthentication
from rest_framework import filters
from profiles_api import serializers
from profiles_api import models
from profiles_api import permissions
from rest_framework.authtoken.views import ObtainAuthToken
from rest_framework.settings import api_settings
class HelloApiView(APIView):
"""Test API View"""
serializer_class = serializers.HelloSerializer
def get(self, request, format=None):
"""Returns a list of APIView features"""
an_apiview = [
'Uses HTTP methods as function (get, post, patch, put, delete)',
'Is similar to a traditional Django View',
'Gives you the most control over your application logic',
'Is mapped manually to URLs',
]
return Response({'message': 'hello!', 'an_apiview': an_apiview})
def post(self, request):
"""Create a hello message with our name"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f"Hello, {name}"
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# PATCH: to do an update, but only update the fields that were provided in the REQUEST. If you had a first name and a last name field,
# and you made a PATCH request with just providing the "last name". It would only update the "last name".
# PUT: whereas, if you did a PUT request, and you only provided the last name. Then, in that case it would remove the first name completely.
# Because, HTTP PUT is basically replacing an object with the object that was provided.
def put(self, request, pk=None):
"""Handle updating an object"""
return Response({'method': 'PUT'})
def patch(self, request, pk=None):
"""Handle a partial update of an object"""
return Response({'method': 'PATCH'})
def delete(self, request, pk=None):
"""Delete an object"""
return Response({'method': 'DELETE'})
class HelloViewSet(viewsets.ViewSet):
"""Test API ViewSet"""
serializer_class = serializers.HelloSerializer
def list(self, request):
"""Return a hello message"""
a_viewset = [
'Uses actions (list, create, retrieve, update, partial_update)',
'Automatically maps to URLs using Routers',
'Provides more functionality with less code',
]
return Response({'message': 'Hello!', 'a_viewset': a_viewset})
def create(self, request):
"""Create a new hello message"""
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
name = serializer.validated_data.get('name')
message = f'Hello {name}!'
return Response({'message': message})
else:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def retrieve(self, request, pk=None):
"""Handle getting an object by its ID"""
return Response({'http_method': 'GET'})
def update(self, request, pk=None):
"""Handle updating an object"""
return Response({'http_method': 'PUT'})
def partial_update(self, request, pk=None):
"""Handle updating part of an object"""
return Response({'http_method': 'PATCH'})
def destroy(self, request, pk=None):
"""Handle removing an object"""
return Response({'http_method': 'DELETE'})
class UserProfileViewSet(viewsets.ModelViewSet):
"""Handle creating and updating profiles"""
serializer_class = serializers.UserProfileSerializer
queryset = models.UserProfile.objects.all()
authentication_classes = (TokenAuthentication,)
# Permission Class: how the user gets permission to do certain things.
# So, you may have an authenticated userwho has permission to do certain things or use certain APIs,
# but not the other APIs.
# You can control those fine grained permissions by using Permission classes.
permission_classes = (permissions.UpdateOwnProfile,)
filter_backends = (filters.SearchFilter, )
search_fields = ('name', 'email', ) # search by name or email field.
class UserLoginApiView(ObtainAuthToken):
"""Handle creating user authentication tokens"""
renderer_classes = api_settings.DEFAULT_RENDERER_CLASSES
| StarcoderdataPython |
4827955 | <filename>runme.py
#!/usr/bin/env python
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import sys
import argparse
import json
import ast
import psutil
import algorithms
from metrics import get_metrics
from datasets import prepare_dataset
def get_number_processors(args):
if args.cpus == 0:
return psutil.cpu_count(logical=False)
return args.cpus
def print_sys_info(args):
import xgboost
import lightgbm
import catboost
print("System : %s" % sys.version)
print("Xgboost : %s" % xgboost.__version__)
print("LightGBM: %s" % lightgbm.__version__)
print("CatBoost: %s" % catboost.__version__)
print("#jobs : %d" % args.cpus)
def parse_args():
parser = argparse.ArgumentParser(
description="Benchmark xgboost/lightgbm/catboost on real datasets")
parser.add_argument("-dataset", default="all", type=str,
help="The dataset to be used for benchmarking. 'all' for all datasets.")
parser.add_argument("-root", default="/opt/gbm-datasets",
type=str, help="The root datasets folder")
parser.add_argument("-algorithm", default="all", type=str,
help=("Comma-separated list of algorithms to run; "
"'all' run all"))
parser.add_argument("-gpus", default=-1, type=int,
help=("#GPUs to use for the benchmarks; "
"ignored when not supported. Default is to use all."))
parser.add_argument("-cpus", default=0, type=int,
help=("#CPUs to use for the benchmarks; "
"0 means psutil.cpu_count(logical=False)"))
parser.add_argument("-output", default=None, type=str,
help="Output json file with runtime/accuracy stats")
parser.add_argument("-ntrees", default=500, type=int,
help=("Number of trees. Default is as specified in "
"the respective dataset configuration"))
parser.add_argument("-nrows", default=None, type=int,
help=(
"Subset of rows in the datasets to use. Useful for test running "
"benchmarks on small amounts of data. WARNING: Some datasets will "
"give incorrect accuracy results if nrows is specified as they have "
"predefined train/test splits."))
parser.add_argument("-warmup", action="store_true",
help=("Whether to run a small benchmark (fraud) as a warmup"))
parser.add_argument("-verbose", action="store_true", help="Produce verbose output")
parser.add_argument("-extra", default='{}', help="Extra arguments as a python dictionary")
args = parser.parse_args()
# default value for output json file
if not args.output:
args.output = "%s.json" % args.dataset
return args
# benchmarks a single dataset
def benchmark(args, dataset_folder, dataset):
data = prepare_dataset(dataset_folder, dataset, args.nrows)
results = {}
# "all" runs all algorithms
if args.algorithm == "all":
args.algorithm = "xgb-gpu,xgb-cpu,xgb-gpu-dask,lgbm-cpu,lgbm-gpu,cat-cpu,cat-gpu"
for alg in args.algorithm.split(","):
print("Running '%s' ..." % alg)
runner = algorithms.Algorithm.create(alg)
with runner:
train_time = runner.fit(data, args)
pred = runner.test(data)
results[alg] = {
"train_time": train_time,
"accuracy": get_metrics(data, pred),
}
return results
def main():
args = parse_args()
args.cpus = get_number_processors(args)
args.extra = ast.literal_eval(args.extra)
print_sys_info(args)
if args.warmup:
benchmark(args, os.path.join(args.root, "fraud"), "fraud")
if args.dataset == 'all':
args.dataset = 'airline,bosch,fraud,higgs,year,epsilon,covtype'
results = {}
for dataset in args.dataset.split(","):
folder = os.path.join(args.root, dataset)
results.update({dataset: benchmark(args, folder, dataset)})
print(json.dumps({dataset: results[dataset]}, indent=2, sort_keys=True))
output = json.dumps(results, indent=2, sort_keys=True)
output_file = open(args.output, "w")
output_file.write(output + "\n")
output_file.close()
print("Results written to file '%s'" % args.output)
if __name__ == "__main__":
main()
| StarcoderdataPython |
125733 | <filename>forum/urls.py
from django.urls import path
from forum.views import (
new_post_view,
)
app_name = 'forum'
urlpatterns = [
path('new-post/', new_post_view, name='new_post'),
]
| StarcoderdataPython |
363888 | <reponame>codezero00/codeGenerate
import json
from jinja2 import Template, Environment, FileSystemLoader
import os
from utils import str2Hump, str2BigHump, openapiType2pydanticType
with open('../dlop_dp.json', 'r', encoding='utf-8') as f:
json_str = f.read()
struct = json.loads(json_str)
info = struct['info']
tags = struct['tags']
paths = struct['paths']
components = struct['components']
print(info)
env = Environment(loader=FileSystemLoader('../template'))
env.filters['str2BigHump'] = str2BigHump
template = env.get_template('main.template')
genmodel = template.render({"tags": tags,"info": info})
path = '../out/openapi_server/'
if not os.path.exists(path):
os.makedirs(path)
with open(os.path.join(path, 'main.py'), 'w', encoding='utf8') as f:
f.write(genmodel) | StarcoderdataPython |
3230445 | from zope.interface import implementer
from twisted.python.components import registerAdapter
from nevow import loaders, rend, inevow, tags as T
from formless import annotate, webform
class Tree(dict):
def __init__(self, name, description, *children):
self.name = name
self.description = description
for child in children:
self.add(child)
def add(self, child):
self[child.name] = child
def __bool__(self):
return True
class ITreeEdit(annotate.TypedInterface):
def setDescription(description=annotate.String()):
pass
setDescription = annotate.autocallable(setDescription)
def deleteChild(name=annotate.String(required=True)):
pass
deleteChild = annotate.autocallable(deleteChild, invisible=True)
def addChild(name=annotate.String(required=True),
description=annotate.String()):
pass
addChild = annotate.autocallable(addChild)
@implementer(ITreeEdit)
class TreeRenderer(rend.Page):
addSlash = True
docFactory = loaders.htmlstr("""
<html>
<head><title>Tree Editor</title></head>
<body><h1><span nevow:data="description"
nevow:render="string">Tree Description</span></h1>
<span nevow:render="descriptionForm"/>
<ol nevow:data="children" nevow:render="sequence">
<li nevow:pattern="item"><span nevow:render="childLink"/>
<span nevow:render="childDel"/>
</li>
</ol>
<a href="..">Up</a>
</body>
</html>
""")
def setDescription(self, description):
self.original.description = description
def addChild(self, name, description):
self.original.add(Tree(name, description))
def deleteChild(self, name):
del self.original[name]
def data_description(self, context, data):
return self.original.description
def data_children(self, context, data):
return list(self.original.items())
def render_childLink(self, context, data):
return T.a(href='subtree_%s/'%data[0])[data[1].description]
def childFactory(self, ctx, name):
if name.startswith('subtree_'):
return self.original[name[len('subtree_'):]]
def render_descriptionForm(self, context, data):
return webform.renderForms()
def render_childDel(self, context, xxx_todo_changeme):
(name, _) = xxx_todo_changeme
ret = T.form(action="./freeform_post!!deleteChild",
enctype="multipart/form-data", method="POST")[
T.input(type="hidden", name="name", value=name),
T.input(type="submit", value="Delete")]
return ret
registerAdapter(TreeRenderer, Tree, inevow.IResource)
| StarcoderdataPython |
6589134 | from django.contrib import admin
from django.urls import path,re_path
from django.conf.urls import url
from cronjob import views
from django.contrib.auth.views import LoginView,LogoutView
from django.contrib.auth import views as auth_views
app_name = 'cronjob'
urlpatterns = [
url(r'^report/$', views.ProjectDashboardeEmail.as_view(), name="share-dashboard-view"),
url(r'^five-minutes-cron/$', views.FiveMinutesCron.as_view(), name="five-minutes-cron"),
url(r'^one-day-cron/$', views.OneDayCron.as_view(), name="one-day-cron"),
url(r'^sub-df-update/',views.ProjectEndPointSubDfView.as_view(),name='sub-df-update'),
] | StarcoderdataPython |
85827 | from xyw_eyes.spider.spider import Spider, Request
from lxml import etree
| StarcoderdataPython |
4902131 | def printno(upper):
if(upper>0):
printno(upper-1)
print(upper)
upper=int(input("Enter upper limit: "))
printno(upper) | StarcoderdataPython |
5071883 | # Copyright (c) 2021 PaddlePaddle Authors. All rights reserved.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Create masked LM/next sentence masked_lm examples for BERT."""
import argparse
import os
import subprocess
from paddlenlp.utils.log import logger
from text_formatting.bookcorpus import BookscorpusTextFormatter
from text_formatting.wikicorpus import WikicorpusTextFormatter
from text_sharding import Sharding, EnglishSegmenter, ChineseSegmenter
from create_pretraining_data import create_instances_from_document, write_instance_to_example_file
# yapf: disable
parser = argparse.ArgumentParser()
parser.add_argument("--formatted_file", default=None, type=str,
help="The input train corpus which should be already formatted as one article one line."
"It can be directory with .txt files or a path to a single file")
parser.add_argument('--skip_formatting', type=eval, default=True, required=True,
help="If the input file already have forrmatted as formatted as one article one line, "
"you can skip text formatting precoess.")
parser.add_argument("--output_dir", default=None, type=str, required=True,
help="The output directory where the pretrained data will be written.")
parser.add_argument("--model_name", choices=['bert-base-uncased', 'bert-base-chinese', 'bert-wwm-chinese','ernie-1.0'],
default="bert-base-chinese", required=True,
help="Select which model to pretrain, defaults to bert-base-chinese.")
parser.add_argument("--max_seq_length", default=128, type=int,
help="The maximum total input sequence length after WordPiece tokenization. \n"
"Sequences longer than this will be truncated, and sequences shorter \n"
"than this will be padded.")
parser.add_argument("--max_word_length", default=4, type=int,
help="The maximum total chinese characters in a chinese word after chinese word segmentation tokenization.")
parser.add_argument("--dupe_factor", default=10, type=int,
help="Number of times to duplicate the input data (with different masks).")
parser.add_argument("--max_predictions_per_seq", default=20, type=int, help="Maximum sequence length.")
parser.add_argument("--masked_lm_prob", default=0.15, type=float, help="Masked LM probability.")
parser.add_argument("--short_seq_prob", default=0.1, type=float,
help="Probability to create a sequence shorter than maximum sequence length")
parser.add_argument("--do_lower_case", action="store_true", default=True,
help="Whether to lower case the input text. True for uncased models, False for cased models.")
parser.add_argument('--random_seed', type=int, default=10000, help="random seed for initialization")
parser.add_argument('--n_train_shards', type=int, default=256, help='Specify the number of train shards to generate')
parser.add_argument('--n_test_shards', type=int, default=1, help='Specify the number of test shards to generate')
parser.add_argument('--fraction_test_set', type=float, default=0.1,
help='Specify the fraction (0.1) of the data to withhold for the test data split (based on number of sequences)')
args = parser.parse_args()
# yapf: enable
def create_record_worker(shardingfile_prefix,
outputfile_prefix,
shard_id,
do_lower_case,
model_name,
max_seq_length,
masked_lm_prob,
max_predictions_per_seq,
random_seed=10000,
dupe_factor=10):
bert_preprocessing_command = 'python create_pretraining_data.py'
bert_preprocessing_command += ' --input_file=' + shardingfile_prefix \
+ '_' + str(shard_id) + '.txt'
bert_preprocessing_command += ' --output_file=' + outputfile_prefix \
+ '_' + str(shard_id) + '.hdf5'
bert_preprocessing_command += ' --do_lower_case' if do_lower_case else ''
bert_preprocessing_command += ' --max_seq_length=' + str(max_seq_length)
bert_preprocessing_command += ' --max_predictions_per_seq=' + str(
max_predictions_per_seq)
bert_preprocessing_command += ' --masked_lm_prob=' + str(masked_lm_prob)
bert_preprocessing_command += ' --random_seed=' + str(random_seed)
bert_preprocessing_command += ' --dupe_factor=' + str(dupe_factor)
bert_preprocessing_command += ' --model_name=' + str(model_name)
bert_preprocessing_process = subprocess.Popen(
bert_preprocessing_command, shell=True)
last_process = bert_preprocessing_process
# This could be better optimized (fine if all take equal time)
if shard_id % 10 == 0 and shard_id > 0:
bert_preprocessing_process.wait()
return last_process
def do_text_formatting(model_name):
if model_name not in [
"bert-base-uncased", "bert-base-chinese", "bert-wwm-chinese"
]:
logger.error(
"The implimented text formattting process only fits"
"bert-base-uncased, bert-base-chinese and bert-wwm-chinese."
"Preraining model %s you should format the corpus firstly by your own."
)
logger.info("=" * 50)
logger.info("Start to text formatting.")
if model_name == "bert-base-uncased":
wiki_formatter = WikicorpusTextFormatter('en', args.output_dir)
formatted_files = [wiki_formatter.formatted_file]
book_formatter = BookscorpusTextFormatter(args.output_dir)
formatted_files.append(book_formatter.formatted_file)
else:
wiki_formatter = WikicorpusTextFormatter('zh', args.output_dir)
formatted_files = wiki_formatter.formatted_file
logger.info("End to text formatting")
return formatted_files
def do_text_sharding(model_name, formatted_files, output_dir, n_train_shards,
n_test_shards, fraction_test_set):
logger.info("=" * 50)
logger.info("Start to text Sharding. Formated files: {}".format(
formatted_files))
sharding_path = os.path.join(output_dir,
'sharded_train_shards_' + str(n_train_shards) \
+ "_test_shards_" + str(n_test_shards)) \
+ "_fraction_" + str(fraction_test_set)
if not os.path.exists(sharding_path):
os.makedirs(sharding_path)
# Segmentation is here because all datasets look the same in one article/book/whatever per line format, and
# it seemed unnecessarily complicated to add an additional preprocessing step to call just for this.
# For english, we use EnglishSegmenter. For chinese, we use ChineseSegmenter.
if model_name == "bert-base-uncased":
segmenter = EnglishSegmenter()
else:
segmenter = ChineseSegmenter()
sharding_output_name_prefix = os.path.join(sharding_path, "sharding")
sharding = Sharding(formatted_files, sharding_output_name_prefix,
n_train_shards, n_test_shards, fraction_test_set)
sharding.load_articles()
logger.info("Splitting the articles into sentences.")
sharding.segment_articles_into_sentences(segmenter)
sharding.distribute_articles_over_shards()
sharding.write_shards_to_disk()
logger.info("End to text sharding. Sharding files save as {}".format(
sharding_path))
return sharding_output_name_prefix
def create_data(do_lower_case, max_seq_length, max_predictions_per_seq,
masked_lm_prob, random_seed, dupe_factor, output_dir,
n_train_shards, n_test_shards, sharding_output_name_prefix):
logger.info("=" * 50)
logger.info("Start to create pretrainging data and save it to hdf5 files.")
hdf5_folder = "hdf5_lower_case_" + str(do_lower_case) + "_seq_len_" + str(max_seq_length) \
+ "_max_pred_" + str(max_predictions_per_seq) + "_masked_lm_prob_" + str(masked_lm_prob) \
+ "_random_seed_" + str(random_seed) + "_dupe_factor_" + str(dupe_factor)
if not os.path.exists(os.path.join(output_dir, hdf5_folder)):
os.makedirs(os.path.join(output_dir, hdf5_folder))
hdf5_folder_prefix = os.path.join(output_dir, hdf5_folder, "pretraing")
for i in range(n_train_shards):
last_process = create_record_worker(
sharding_output_name_prefix + "_train",
hdf5_folder_prefix + "_train", i, args.do_lower_case,
args.model_name, args.max_seq_length, args.masked_lm_prob,
args.max_predictions_per_seq, args.random_seed, args.dupe_factor)
last_process.wait()
for i in range(n_test_shards):
last_process = create_record_worker(
sharding_output_name_prefix + '_test', hdf5_folder_prefix + "_test",
i, args.do_lower_case, args.model_name, args.max_seq_length,
args.masked_lm_prob, args.max_predictions_per_seq, args.random_seed,
args.dupe_factor)
last_process.wait()
logger.info(
f"End to create pretrainging data and save it to hdf5 files "
f"{sharding_output_name_prefix}_train and {sharding_output_name_prefix}_test ."
)
if __name__ == "__main__":
if not args.skip_formatting:
formatted_files = do_text_formatting(args.model_name)
else:
logger.info("=" * 50)
logger.info("Skip text formatting, formatted file: %s" %
args.formatted_file)
formatted_files = [args.formatted_file]
sharding_output_name_prefix = do_text_sharding(
args.model_name, formatted_files, args.output_dir, args.n_train_shards,
args.n_test_shards, args.fraction_test_set)
create_data(args.do_lower_case, args.max_seq_length,
args.max_predictions_per_seq, args.masked_lm_prob,
args.random_seed, args.dupe_factor, args.output_dir,
args.n_train_shards, args.n_test_shards,
sharding_output_name_prefix)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.