seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
14186262586 | import json
from sksurv.functions import StepFunction
from sksurv.linear_model import CoxPHSurvivalAnalysis
from sksurv.metrics import concordance_index_censored
from sksurv.nonparametric import nelson_aalen_estimator, kaplan_meier_estimator
from core.cox_wrapper import CoxFairBaseline
from core.drawing import draw_points_tsne
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sksurv.ensemble import RandomSurvivalForest
from exp_config import CONFIG, RES_DIR
from core.cox_generator import CoxGenerator
from survshap import SurvivalModelExplainer, ModelSurvSHAP
from survbex.estimators import BeranModel
from survbex.explainers import SurvBexExplainer
########################################################################################################################
# ------------------------------------------------ PREPARE DATA --------------------------------------------------------
########################################################################################################################
def get_cox_data(coefs: np.ndarray):
cox_generator = CoxGenerator(coefs=coefs)
x_cox_train, x_cox_test, y_cox_train, y_cox_test = train_test_split(
*cox_generator.generate_data(size=CONFIG['TRAIN_SIZE'], censored_part=0.2),
train_size=0.7
)
x_cox_train = pd.DataFrame(x_cox_train, columns=[f'f{i + 1}' for i in range(len(coefs))])
x_cox_test = pd.DataFrame(x_cox_test, columns=[f'f{i + 1}' for i in range(len(coefs))])
return [x_cox_train, y_cox_train], [x_cox_test, y_cox_test]
# np.random.seed(42)
# train, test = get_veterans_data()
cox_clusters = [get_cox_data(coefs=cox_coefs) for cox_coefs in CONFIG['COX_COEFS_CLS']]
cox_clusters = [
(
[cox_cluster[0][0] + 2.0 / len(cox_clusters) * cl_i, cox_cluster[0][1]],
[cox_cluster[1][0] + 2.0 / len(cox_clusters) * cl_i, cox_cluster[1][1]]
# [cox_cluster[0][0] + 1. * cl_i, cox_cluster[0][1]],
# [cox_cluster[1][0] + 1. * cl_i, cox_cluster[1][1]]
)
for cl_i, cox_cluster in enumerate(cox_clusters)
]
all_train = [
pd.concat([cox_cluster[0][0] for cox_cluster in cox_clusters]),
np.hstack([cox_cluster[0][1] for cox_cluster in cox_clusters])
]
all_test = [
pd.concat([cox_cluster[1][0] for cox_cluster in cox_clusters]),
np.hstack([cox_cluster[1][1] for cox_cluster in cox_clusters])
]
# Use SurvLimeExplainer class to find the feature importance
training_events = np.array([event for event, _ in all_train[1]])
training_times = np.array([time for _, time in all_train[1]])
training_features = all_train[0]
test_events = np.array([event for event, _ in all_test[1]])
test_times = np.array([time for _, time in all_test[1]])
test_features = all_test[0]
with open(f'{RES_DIR}/dataset.json', 'w+') as fp:
json.dump(fp=fp, obj=dict(
training_features=training_features.to_dict(orient='raw'),
training_events=training_events.tolist(),
training_times=training_times.tolist(),
test_features=test_features.to_dict(orient='raw'),
test_events=test_events.tolist(),
test_times=test_times.tolist()
))
########################################################################################################################
# ------------------------------------------------ BUILD BBOX ----------------------------------------------------------
########################################################################################################################
if CONFIG['BBOX'] == 'rf':
model = RandomSurvivalForest(n_estimators=100, max_samples=min(500, len(all_train[0])), max_depth=8)
model.fit(all_train[0], all_train[1])
pred_surv_fn = model.predict_survival_function
pred_hazard_fn = model.predict_cumulative_hazard_function
pred_risk_fn = model.predict
elif CONFIG['BBOX'] == 'beran':
assert len(CONFIG['COX_COEFS_CLS']) == 1
model = BeranModel(kernel_width=250, kernel_name='gaussian')
model.fit(X=all_train[0].to_numpy(), b=CONFIG['COX_COEFS_CLS'][0],
y_events=training_events, y_event_times=training_times)
def surv_np_to_step_surv(surv_arr: np.ndarray):
return np.array([StepFunction(x=model.unique_times_, y=sample) for sample in surv_arr])
pred_surv_fn = lambda X: surv_np_to_step_surv(model.predict_survival_torch_optimized(X))
pred_hazard_fn = lambda X: -np.log(model.predict_survival_torch_optimized(X))
pred_risk_fn = lambda X: np.sum(pred_hazard_fn(X), axis=1)
elif 'cox' in CONFIG['BBOX']:
model = CoxPHSurvivalAnalysis(alpha=1)
model.fit(all_train[0], all_train[1])
pred_surv_fn = model.predict_survival_function
pred_hazard_fn = model.predict_cumulative_hazard_function
pred_risk_fn = model.predict
if CONFIG['BBOX'] in ['cox_na', 'cox_km']:
if CONFIG['BBOX'] == 'cox_na':
cox_fair_baseline = CoxFairBaseline(
training_events=training_events,
training_times=training_times,
baseline_estimator_f=nelson_aalen_estimator
)
elif CONFIG['BBOX'] == 'cox_km':
cox_fair_baseline = CoxFairBaseline(
training_events=training_events,
training_times=training_times,
baseline_estimator_f=kaplan_meier_estimator
)
else:
raise Exception(f'Undefined cox model = {CONFIG["BBOX"]}')
model.coef_ /= np.abs(model.coef_).sum()
pred_surv_fn = lambda X: cox_fair_baseline.predict_survival_function(X, cox_coefs=model.coef_)
pred_hazard_fn = lambda X: cox_fair_baseline.predict_cum_hazard_from_surv_np(X, cox_coefs=model.coef_)
pred_risk_fn = lambda X: np.dot(X, model.coef_)
elif CONFIG['BBOX'] != 'cox':
raise Exception(f'Undefined cox model = {CONFIG["BBOX"]}')
else:
raise Exception(f"Undefined bbox = {CONFIG['BBOX']}")
cindex_train = concordance_index_censored(
event_indicator=training_events, event_time=training_times, estimate=pred_risk_fn(training_features))[0]
print(f'cindex train = {cindex_train}')
cindex_test = concordance_index_censored(
event_indicator=test_events, event_time=test_times, estimate=pred_risk_fn(test_features))[0]
print(f'cindex test = {cindex_test}')
########################################################################################################################
# ------------------------------------------------ SELECT POINTS TO EXPLAIN --------------------------------------------
########################################################################################################################
# draw_comparison(ex_i=random.randint(0, len(test)))
cluster_centroids = [
cox_cluster[0][0].mean() + all_test[0].std() * CONFIG['DATA_POINT_DEV']
for cox_cluster in cox_clusters
]
cl_distances = [
[sum((cl_centroid - fs) ** 2) for fs in all_test[0].to_numpy()]
for cl_centroid in cluster_centroids
]
exp_test_ids = [np.argmin(distances) for distances in cl_distances]
draw_points_tsne(
pt_groups=[
*[cox_cluster[0][0].to_numpy() for cox_cluster in cox_clusters],
*list(all_test[0].to_numpy()[exp_test_ids])
],
names=[
*[f'cl{i}' for i, _ in enumerate(cox_clusters)],
*[f'ex for cl {i}' for i, _ in enumerate(exp_test_ids)]
],
colors=[None] * len(cox_clusters) * 2,
path=f'{RES_DIR}/clusters.png'
# path=f'clusters.png'
)
with open(RES_DIR.joinpath("y_true.json"), 'w+') as fp:
json.dump(
fp=fp,
obj=[
dict(event=bool(all_test[1][ex_i][0]), event_time=all_test[1][ex_i][1])
for ex_i in exp_test_ids
]
)
########################################################################################################################
# ------------------------------------------------ SurvSHAP ------------------------------------------------------------
########################################################################################################################
surv_shap = SurvivalModelExplainer(model, all_test[0].iloc[exp_test_ids], all_test[1][exp_test_ids],
predict_survival_function=lambda model, X: pred_surv_fn(X))
exp_survshap = ModelSurvSHAP(random_state=42)
exp_survshap.fit(surv_shap)
shap_explanations = np.array(
[
[
imp[1]
for imp in pt_exp.simplified_result.values
]
for pt_exp in exp_survshap.individual_explanations
]
)
with open(RES_DIR.joinpath("explanation_shap.json"), 'w+') as fp:
json.dump(fp=fp, obj=shap_explanations.tolist())
########################################################################################################################
# ------------------------------------------------ SurvLIME ------------------------------------------------------------
########################################################################################################################
explainer = SurvBexExplainer(
training_features=training_features,
training_events=list(training_events),
training_times=list(training_times),
model_output_times=model.event_times_,
kernel_width=CONFIG['KERNEL_WIDTH']
)
cox_explanations = np.array(
[
explainer.explain_instance(
data_row=all_test[0].iloc[ex_i],
predict_fn=pred_surv_fn,
num_samples=CONFIG['NEIGH_SIZE'],
type_fn='survival',
optimizer='convex'
)
for ex_i in exp_test_ids
]
)
with open(RES_DIR.joinpath("explanation_cox.json"), 'w+') as fp:
json.dump(fp=fp, obj=cox_explanations.tolist())
########################################################################################################################
# ------------------------------------------------ SurvBeX -------------------------------------------------------------
########################################################################################################################
beran_explanations = []
for cl_i, ex_i in enumerate(exp_test_ids):
beran_explanations.append(
explainer.explain_instance(
data_row=all_test[0].iloc[ex_i],
predict_fn=pred_surv_fn,
num_samples=CONFIG['NEIGH_SIZE'],
num_val_samples=CONFIG['NEIGH_VAL_SIZE'],
type_fn='survival',
optimizer='gradient',
grid_info_file=f"{RES_DIR}/optimization_cl={cl_i}.csv",
max_iter=CONFIG['MAX_ITER']
)
)
with open(RES_DIR.joinpath("explanation_beran.json"), 'w+') as fp:
json.dump(
fp=fp,
obj=np.array(beran_explanations).tolist()
)
| DanilaEremenko/SurvBeX | main_run_synth_data_explainers.py | main_run_synth_data_explainers.py | py | 10,714 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "core.cox_generator.CoxGenerator",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 26,
"usage_type": "call"
}... |
34197446896 | #!/bin/python
import sys
import os
import time
import datetime
import hashlib
from os import walk
import mysql.connector
from sys import argv
import json
import boto3
from botocore.exceptions import ClientError
import requests
from requests.exceptions import HTTPError
game_client = argv[1]
target_dir = argv[2]
backoffice_url = argv[3]
enable_forcing = argv[4]
version = argv[5].split("/")[1]
source_dir = argv[5].split("/")[0]
environment = argv[6]
build_numer = argv[7]
performer = argv[8]
bucket_name = "cdn.project.com"
database_conf = "/var/lib/jenkins/mysql_engine.cnf"
def get_db_data() -> List[str]:
global client_s3_name
global short_code
global game_id
try:
cnx = mysql.connector.connect(option_files=database_conf,
option_groups="client")
cursor = cnx.cursor()
print("*** Collecting information about Game")
query = ("select short_code, game_id from core_game where game_name='{}'".format(game_client))
cursor.execute(query)
results = cursor.fetchall()
for code in results:
short_code = code[0].replace("_", "")
game_id = code[1]
client_s3_name = short_code.replace("social", "")
print("*** Data was successfully collected")
return (client_s3_name, short_code, game_id)
except mysql.connector.Error as e:
print("*** ERROR: {}".format(e.msg))
exit()
finally:
if (cnx.is_connected()):
cnx.close()
cursor.close()
print("*** MySQL connection is closed")
def ensure_dir(dir_name: str):
try:
if not os.path.exists(dir_name):
os.makedirs(dir_name)
except OSError as e:
print("*** ERROR: {}".format(sys.exc_info()[1]))
exit()
def cleanup(item: str):
try:
os.system("rm -rf {}".format(item))
print("*** {} was successfully removed from workspace".format(item))
except OSError as e:
print("*** Error occurs: {}".format(sys.exc_info()[1]))
exit()
def download_from_s3():
ensure_dir(short_code)
try:
os.system("aws s3 cp s3://cdn.project.com/ags/{0}/{1}/{2}/ ./{3} --recursive".format(source_dir, client_s3_name, version, short_code))
except OSError as e:
print("*** Error during downloading from s3: {}".format(sys.exc_info()[1]))
cleanup(short_code)
exit()
def get_sha1sum(sha1sum_target: str) -> str:
try:
sha1hash = hashlib.sha1(open("{0}/{1}".format(client_s3_name, sha1sum_target),"rb").read()).hexdigest()
return sha1hash
except OSError as e:
print("*** ERROR: {}".format(sys.exc_info()[1]))
exit()
def update_devops_data(client_artifact: str):
try:
cnx = mysql.connector.connect(option_files=database_conf, option_groups="devops")
cursor = cnx.cursor()
print("*** Working with devops database")
artifact_data = datetime.datetime.now()
sha1sum_data = get_sha1sum(client_artifact)
update_sql = ("INSERT INTO deployments (Product, Date, Environment, Version, BuildNumber, Artifact, MD5sum, Performer) \
VALUES ('{0} client', '{1}', '{2}', '{3}', '{4}', '{5}', '{6}', '{7}' \
);".format(game_client, artifact_data, environment, version, build_numer, client_artifact, sha1sum_data, performer))
cursor.execute(update_sql)
cnx.commit()
print("*** Updating devops database with {} artifact".format(client_artifact))
print("*** record(s) affected: ", cursor.rowcount)
except mysql.connector.Error as e:
print("*** ERROR: {}".format(e.msg))
exit()
finally:
if (cnx.is_connected()):
cnx.close()
cursor.close()
print("*** MySQL connection is closed")
def modify_json():
with open("{}/game-config.json".format(short_code), "r") as json_file:
data = json.load(json_file)
data["enableForcing"] = bool(enable_forcing)
with open("{}/game-config.json".format(short_code), "w") as json_file:
json.dump(data, json_file, sort_keys=True, indent=2)
def upload_to_s3() -> bool:
print("*** Uploading {0} version:{1} to S3".format(game_client, version))
s3 = boto3.resource('s3')
try:
engine_files = []
total_file_count = 0
total_file_size = 0
for path, dirs, files in os.walk(short_code):
for file in files:
file_name = (os.path.join(path, file)).replace("{}/".format(short_code), "")
size_file = os.path.getsize("{0}/{1}".format(short_code, file_name))
engine_files.append(file_name)
total_file_size += size_file
total_file_count += 1
print(" START TIME: {}".format(time.asctime()))
print(" - Files to upload: {}".format(str(total_file_count)))
print(" - Total size to upload: {}MB".format(int(total_file_size/1024/1024)))
for f in engine_files:
if f == "index.html":
s3.meta.client.upload_file(
Filename="{0}/{1}".format(short_code, f),
Bucket=bucket_name,
Key="ags/{0}/{1}/{2}/{3}".format(target_dir, short_code, version, f),
ExtraArgs={"ContentType": "text/html"}
)
else:
s3.meta.client.upload_file(
Filename="{0}/{1}".format(short_code, f),
Bucket=bucket_name,
Key="ags/{0}/{1}/{2}/{3}".format(target_dir, short_code, version, f)
)
print(" FINISH TIME: {}".format(time.asctime()))
return True
except ClientError as err:
print("*** Error during uploading to s3: {}".format(err))
return False
def invalidate_s3() -> bool:
client = boto3.client('cloudfront')
try:
response = client.create_invalidation(
DistributionId="E30T6SVV8C",
InvalidationBatch={
"Paths": {
"Quantity": 1,
"Items": [
"/ags/{0}/{1}/{2}/*".format(target_dir, short_code, version),
]
},
"CallerReference": str(time.asctime())
}
)
return True
except ClientError as err:
print("*** Error during invalidation: {}".format(err))
return False
finally:
print("*** Data {0}/{1}/{2}/* was invalidated on s3.".format(target_dir, short_code, version))
def get_url(action: str) -> str:
if action == "clearCache":
url = "https://{0}/backoffice/{1}".format(backoffice_url, action)
else:
url = "https://{0}/backoffice/games/{1}/".format(backoffice_url, game_id)
return url
def request_data():
headers={"Authorization": "Basic 123asdluczo", # jenkins user pass from BO
"Content-type": "application/json"
}
launch_address = "https://cdn.project.com/ags/{0}/{1}/{2}/index.html".format(target_dir, short_code, version)
try:
response_get = requests.get(get_url(game_id), headers=headers, verify=False) # verify=False, issue with ssl on NJ
game_json = response_get.json()
print("*** Changing Launch Adresses")
game_json["desktopLaunchAddress"] = unicode(launch_address)
game_json["mobileLaunchAddress"] = unicode(launch_address)
print(" - DesktopLaunchAddress: {}".format(game_json["desktopLaunchAddress"]))
print(" - MobileLaunchAddress: {}".format(game_json["mobileLaunchAddress"]))
response_put = requests.put(get_url(game_id), headers=headers, verify=False, data=json.dumps(game_json)) # verify=False, issue with ssl on NJ
response_post = requests.post(get_url("clearCache"), headers=headers, verify=False) # verify=False, issue with ssl on NJ
print("*** Clean Cache: status {}".format(response_post.status_code))
except HTTPError as http_err:
print("*** HTTP error occurred: {}".format(http_err))
except Exception as err:
print("*** Other error occurred: {}".format(err))
def main():
get_db_data()
download_from_s3()
update_devops_data("app-{}.js".format(version))
update_devops_data("index.html")
modify_json()
upload_to_s3()
request_data()
invalidate_s3()
cleanup(short_code)
if __name__ == '__main__':
main()
| vlad-solomai/viam_automation | automation_gambling/deploy_game_client/deploy_game_client.py | deploy_game_client.py | py | 8,482 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 20,
"usa... |
40696737073 | import asyncio
import importlib
from abc import ABC, abstractmethod
from functools import partial
from typing import Any, Awaitable, Callable, Dict, List, Union
ParamValueT = Union[str, int, float, bool, List[Union[str, int, float, bool]]]
ExecutorFuncT = Callable[[Dict[str, ParamValueT]], Awaitable[Dict[str, Any]]]
class CommandExecutionException(Exception):
pass
class CommandExecutor(ABC):
"""
Abstract class for command executors
"""
def __init__(
self,
config: Dict[str, Any],
loop: asyncio.AbstractEventLoop,
) -> None:
self._config = config
self._loop = loop
async def execute_command(
self,
command: str,
params: Dict[str, ParamValueT],
) -> Dict[str, Any]:
"""
Run the command from the dispatch table with params
"""
cmd = self.get_command_dispatch().get(command)
if not cmd:
raise CommandExecutionException(f"no config for {command}")
allow_params = isinstance(cmd, partial) and cmd.args[-1]
if allow_params and list(params.keys()) != ["shell_params"]:
raise CommandExecutionException("the parameters must be JSON with one key, 'shell_params'")
result = await cmd(params)
return result
@abstractmethod
def get_command_dispatch(self) -> Dict[str, ExecutorFuncT]:
"""
Returns the command dispatch table for this command executor
"""
pass
def get_command_executor_impl(service):
"""
Gets the command executor impl from the service config
"""
config = service.config.get('generic_command_config', None)
assert config is not None, 'generic_command_config not found'
module = config.get('module', None)
impl_class = config.get('class', None)
assert module is not None, 'generic command module not found'
assert impl_class is not None, 'generic command class not found'
command_executor_class = getattr(
importlib.import_module(module),
impl_class,
)
command_executor = command_executor_class(service.config, service.loop)
assert isinstance(command_executor, CommandExecutor), \
'command_executor is not an instance of CommandExecutor'
return command_executor
| magma/magma | orc8r/gateway/python/magma/magmad/generic_command/command_executor.py | command_executor.py | py | 2,311 | python | en | code | 1,605 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number"... |
5759883314 | # -*- coding: utf-8 -*-
"""
Editor de Spyder
Este es un archivo temporal
"""
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm
#%%
np.random.seed(5)
X = np.r_[np.random.randn(20,2)-[2,2],np.random.randn(20,2)+[2,2]]
Y = [0]*20+[1]*20
plt.scatter(X[:,0],X[:,1],c=Y)
plt.show()
#%% Modelo de clasificación.
modelo = svm.SVC(kernel= 'linear')
#modelo = svm.SVC(kernel= 'poly', degree=2)
#modelo = svm.SVC(kernel= 'rbf')
modelo.fit(X,Y)
Yhat = modelo.predict(X)
#%% Dibujar vector soporte (aplica únicamente con modelo lineal, con polinomial o gausssiana no permite ver los polinomios)
W = modelo.coef_[0]
m = -W[0]/W[1]
xx = np.linspace(-4,4)
yy = m*xx-(modelo.intercept_[0]/W[1])
VS = modelo.support_vectors_
plt.plot(xx,yy, 'k--')
plt.scatter(X[:,0],X[:,1],c=Y)
plt.scatter(VS[:,0],VS[:,1],s=80,facecolors='k')
plt.show()
| OscarFlores-IFi/CDINP19 | code/p18.py | p18.py | py | 902 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.randn"... |
8927043584 | from collections import OrderedDict
from concurrent import futures
import six
from nose import tools
from tornado import gen
from tornado import testing as tt
import tornado.concurrent
from flowz.artifacts import (ExtantArtifact, DerivedArtifact, ThreadedDerivedArtifact,
WrappedArtifact, TransformedArtifact, KeyedArtifact,
maybe_artifact)
from ..channels.util import raises_channel_done
class ArtifactsTest(tt.AsyncTestCase):
NAME = "Fooble"
NUM_ARR = [1, 2, 3, 4, 5]
NUM_DICT = {1: "one", 2: "two", 3: "three", 4: "four", 5: "five"}
@classmethod
def setUpClass(cls):
cls.NUM_ORDERED_DICT = OrderedDict([(i, cls.NUM_DICT[i]) for i in cls.NUM_ARR])
cls.NUM_REVERSED_DICT = OrderedDict([(i, cls.NUM_DICT[i]) for i in reversed(cls.NUM_ARR)])
# Possible getter/deriver/transform functions
@staticmethod
@gen.coroutine
def get_ordered_dict():
raise gen.Return(ArtifactsTest.NUM_ORDERED_DICT)
@staticmethod
def derive_ordered_dict(num_arr, num_dict):
return OrderedDict([(i, num_dict[i]) for i in num_arr])
@staticmethod
def transform_reversed_dict(orig_dict):
return OrderedDict([(i, orig_dict[i]) for i in reversed(orig_dict.keys())])
@staticmethod
def derive_value(key, dict_):
return dict_[key]
@staticmethod
def derive_key(dict_, value):
for (k, v) in six.iteritems(dict_):
if v == value:
return k
return None
@staticmethod
@gen.coroutine
def battery(artifact_maker, exp_value, exists_pre_get):
"""
A batter of tests to run against a particular artifact type
@param artifact_maker: a callable to build the artifact
@param exp_value: the expected value of getting the artifact
@param exists_pre_get: the expect value of calling exists() before calling get()
"""
artifact = artifact_maker()
tools.assert_true(ArtifactsTest.NAME in str(artifact))
tools.assert_equal(artifact.exists(), exists_pre_get)
tools.assert_true(artifact.ensure())
value = yield artifact.get()
tools.assert_equal(value, exp_value)
tools.assert_true(artifact.exists())
tools.assert_true(artifact.ensure())
@gen.coroutine
def check_channel(channel, exp_value):
"""
Validate a channel with one artifact in it
@param channel: the channel
@param exp_value: the expected value of the entry in the channel
"""
result = yield channel.start()
tools.assert_true(result)
obj = yield channel.next()
# the object might be an artifact or a direct value
val = yield maybe_artifact(obj)
tools.assert_equal(val, exp_value)
yield raises_channel_done(channel)
raise gen.Return(True)
yield check_channel(artifact_maker().as_channel(), exp_value)
yield check_channel(artifact_maker().value_channel(), exp_value)
yield check_channel(artifact_maker().ensure_channel(), True)
raise gen.Return(True)
@tt.gen_test
def test_extant_artifact(self):
maker = lambda: ExtantArtifact(self.get_ordered_dict, name=self.NAME)
yield self.battery(maker, self.NUM_ORDERED_DICT, True)
@tt.gen_test
def test_derived_artifact(self):
maker = lambda: DerivedArtifact(self.derive_ordered_dict, self.NUM_ARR,
self.NUM_DICT, name=self.NAME)
yield self.battery(maker, self.NUM_ORDERED_DICT, False)
@tt.gen_test
def test_threaded_derived_artifact(self):
executor = futures.ThreadPoolExecutor(1)
maker = lambda: ThreadedDerivedArtifact(executor, self.derive_ordered_dict,
self.NUM_ARR, self.NUM_DICT, name=self.NAME)
result = yield self.battery(maker, self.NUM_ORDERED_DICT, False)
@tt.gen_test
def test_wrapped_artifact(self):
maker = lambda: WrappedArtifact(DerivedArtifact(self.derive_ordered_dict,
self.NUM_ARR, self.NUM_DICT),
name=self.NAME)
yield self.battery(maker, self.NUM_ORDERED_DICT, False)
@tt.gen_test
def test_wrapped_artifact_getattr(self):
artifact = WrappedArtifact(DerivedArtifact(self.derive_ordered_dict,
self.NUM_ARR, self.NUM_DICT),
name=self.NAME)
# in a normal situation, getting attributes should work fine, passing the call
# onto the underlying value...
tools.assert_equal(self.derive_ordered_dict, getattr(artifact, 'deriver'))
# ...and throwing AttributeError if it didn't have the attribute
tools.assert_raises(AttributeError, getattr, artifact, 'phamble')
# If you had not yet set a value attribute on the artifact, though...
delattr(artifact, 'value')
# ...this used to infinitely recurse until Python complained.
# But now it should return a proper AttributeError
tools.assert_raises(AttributeError, getattr, artifact, 'deriver')
@tt.gen_test
def test_transformed_artifact(self):
# Try with an ExtantArtifact
maker = lambda: TransformedArtifact(ExtantArtifact(self.get_ordered_dict),
self.transform_reversed_dict, name=self.NAME)
yield self.battery(maker, self.NUM_REVERSED_DICT, True)
# Try with a DerivedArtifact
maker = lambda: TransformedArtifact(DerivedArtifact(self.derive_ordered_dict,
self.NUM_ARR, self.NUM_DICT),
self.transform_reversed_dict, name=self.NAME)
yield self.battery(maker, self.NUM_REVERSED_DICT, False)
@tt.gen_test
def test_keyed_artifact(self):
key = 1
maker = lambda: KeyedArtifact(key,
DerivedArtifact(self.derive_value, key, self.NUM_DICT),
name=self.NAME)
yield self.battery(maker, 'one', False)
artifact = maker()
tools.assert_equal(artifact[0], key)
tools.assert_equal(artifact[1], artifact)
tools.assert_equal(artifact['key'], key)
tools.assert_raises(KeyError, artifact.__getitem__, 'spaz')
for (a,b) in zip((key, artifact), iter(artifact)):
tools.assert_equal(a, b)
@tt.gen_test
def test_keyed_artifact_transform(self):
key = 1
artifact = KeyedArtifact(key, DerivedArtifact(self.derive_value, key, self.NUM_DICT))
artifact2 = artifact.transform(self.derive_key, self.NUM_DICT)
key2 = yield artifact2.get()
tools.assert_equal(key, key2)
tools.assert_is_instance(artifact2, KeyedArtifact)
@tt.gen_test
def test_keyed_artifact_threaded_transform(self):
executor = futures.ThreadPoolExecutor(1)
key = 1
artifact = KeyedArtifact(key, DerivedArtifact(self.derive_value, key, self.NUM_DICT))
artifact2 = artifact.threaded_transform(executor, self.derive_key, self.NUM_DICT)
key2 = yield artifact2.get()
tools.assert_equal(key, key2)
tools.assert_is_instance(artifact2, KeyedArtifact)
@tt.gen_test
def test_maybe_artifact(self):
# prove that both artifacts and non-artifacts result in futures
key = 1
artifact = DerivedArtifact(self.derive_value, key, self.NUM_DICT)
future1 = maybe_artifact(artifact)
tools.assert_is_instance(future1, tornado.concurrent.Future)
future2 = maybe_artifact('one')
tools.assert_is_instance(future2, tornado.concurrent.Future)
val1 = yield future1
val2 = yield future2
tools.assert_equal(val1, val2)
# Make sure that just having a "get" function isn't enough to be an artifact!
dict_ = {1: 'one'}
tools.assert_true(hasattr(dict_, 'get'))
future3 = maybe_artifact(dict_)
val3 = yield future3
tools.assert_equal(val3, dict_)
| ethanrowe/flowz | flowz/test/artifacts/artifacts_test.py | artifacts_test.py | py | 8,314 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "tornado.testing.AsyncTestCase",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "tornado.testing",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 25,
"usage_type": "call"
},
{
"api_na... |
37588584638 | from sqlalchemy import TypeDecorator
from sqlalchemy.types import VARCHAR
from sqlalchemy import dialects
from sqlalchemy.dialects import postgresql, mysql
import json
from typing import Union, Optional
DialectType = Union[postgresql.UUID, VARCHAR]
ValueType = Optional[Union[dict, str]]
class JSON(TypeDecorator):
impl = VARCHAR
_MAX_VARCHAR_LIMIT = 100000
def load_dialect_impl(self, dialect: dialects) -> DialectType:
if dialect.name == 'postgresql':
return dialect.type_descriptor(postgresql.JSON())
elif dialect.name == 'mysql':
if 'JSON' in dialect.ischema_names:
return dialect.type_descriptor(mysql.JSON())
else:
return dialect.type_descriptor(
VARCHAR(self._MAX_VARCHAR_LIMIT)
)
else:
return dialect.type_descriptor(VARCHAR(self._MAX_VARCHAR_LIMIT))
def process_bind_param(self, value: ValueType, dialect: dialects) -> Optional[str]:
if value is None:
return value
else:
return json.dumps(value)
def process_result_value(self, value: Optional[str], dialect: dialects) -> Optional[dict]:
if value is None:
return value
else:
return json.loads(value)
def copy(self, *args, **kwargs) -> 'JSON':
return JSON(*args, **kwargs)
| infrascloudy/gandalf | gandalf/database/json_type.py | json_type.py | py | 1,390 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.dialects.postgresql.UUID",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sqlalchemy.dialects.postgresql",
"line_number": 8,
"usage_type": "name"
},
{
... |
20600597111 | import functools
import os
import google.protobuf.json_format
from synthtool.protos.preconfig_pb2 import Preconfig
PRECONFIG_ENVIRONMENT_VARIABLE = "SYNTHTOOL_PRECONFIG_FILE"
PRECONFIG_HELP = """
A json file containing a description of prefetch sources that this synth.py may
us. See preconfig.proto for detail about the format.
"""
@functools.lru_cache(maxsize=None)
def load():
"""Loads the preconfig file specified in an environment variable.
Returns:
An instance of Preconfig
"""
preconfig_file_path = os.environ.get(PRECONFIG_ENVIRONMENT_VARIABLE)
if not preconfig_file_path:
return Preconfig()
with open(preconfig_file_path, "rt") as json_file:
return google.protobuf.json_format.Parse(json_file.read(), Preconfig())
| googleapis/google-cloud-java | owl-bot-postprocessor/synthtool/preconfig.py | preconfig.py | py | 777 | python | en | code | 1,781 | github-code | 6 | [
{
"api_name": "os.environ.get",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "synthtool.protos.preconfig_pb2.Preconfig",
"line_number": 25,
"usage_type": "call"
},
{
"api_name"... |
4369034891 | # coding: utf-8
import pandas as pd
import xgboost as xgb
from sklearn.preprocessing import LabelEncoder
import numpy as np
train_df = pd.read_csv('../data/train.csv')
test_df = pd.read_csv('../data/test.csv')
# 填充空值,用中位数填充数值型空值,用众数填充字符型空值
from sklearn.base import TransformerMixin
class DataFrameImputer(TransformerMixin):
def fit(self, X, y=None):
self.fill = pd.Series([X[c].value_counts().index[0]
if X[c].dtype == np.dtype('O') else X[c].median() for c in X],
index=X.columns)
return self
def transform(self, X, y=None):
return X.fillna(self.fill)
train_df['Family'] = train_df['Parch'] + train_df['SibSp']
test_df['Family'] = test_df['Parch'] + test_df['SibSp']
# print(train_df.loc[:,['Family','Parch','SibSp']])
feature_columns_to_use = ['Pclass', 'Age', 'Sex', 'Fare', 'Family', 'Embarked']
nonnumeric_columns = ['Sex', 'Embarked']
big_X = train_df[feature_columns_to_use].append(test_df[feature_columns_to_use])
big_X_Imputed = DataFrameImputer().fit_transform(big_X)
le = LabelEncoder()
for feature in nonnumeric_columns:
big_X_Imputed[feature] = le.fit_transform(big_X_Imputed[feature])
X_train = big_X_Imputed[0:train_df.shape[0]].as_matrix()
Y_train = train_df['Survived']
X_test = big_X_Imputed[train_df.shape[0]:].as_matrix()
gbm = xgb.XGBClassifier(max_depth=3, n_estimators=300, learning_rate=0.05)
gbm.fit(X_train, Y_train)
Y_pred = gbm.predict(X_test)
print(gbm.score(X_train, Y_train))
submission = pd.DataFrame({
'PassengerId': test_df['PassengerId'],
"Survived": Y_pred
})
# print(submission.head())
submission.to_csv('../submission/submission_7.csv', index=False) | Gczaizi/kaggle | Titanic/XGBoost/XGBoost.py | XGBoost.py | py | 1,786 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pandas.... |
12998412388 | import uuid
from django.db import models
from django.conf import settings
User = settings.AUTH_USER_MODEL
# Create your models here.
class PlanCharge(models.Model):
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
tier = models.IntegerField()
charge_id = models.CharField(max_length=100)
created_at = models.DateTimeField(auto_now_add=True)
modified_at = models.DateTimeField(auto_now=True)
user = models.ForeignKey(User, related_name='plans', on_delete=models.CASCADE)
def __unicode__(self):
return str(self.charge_id) | kapphire/99typos-server | plan/models.py | models.py | py | 585 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.conf.settings.AUTH_USER_MODEL",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
... |
42124061830 | import requests
import os
from django.http import HttpResponse
from django.conf import settings
class ProductClient:
global host
def __init__(self):
global host
print("came inside product constructor")
if os.getenv("PRODUCT_HOST") != "":
host = os.getenv("PRODUCT_HOST")
elif settings.PRODUCT_HOST == "":
host = "http://google.com"
else:
host = settings.PRODUCT_HOST
def getAllProducts(self):
global host
print("Call all products api")
fullUrl = host + "/productmanagement/v1/products/all"
print("url is:" + fullUrl)
response = requests.get(fullUrl)
print(response.content)
return response
| Robinrrr10/storeorderui | client/productClient.py | productClient.py | py | 738 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.getenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.PRODUCT_HOST",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "django.con... |
29778129362 | from flask import Flask, render_template, request, url_for
import y_u_so_stupid as fle
import json
app = Flask(__name__)
correctAnswer = ''
score = 0
highscore = 0
@app.route('/')
def play():
global correctAnswer
q = json.loads(fle.getRandomQuestion())
question = q['question']
choices = q['choices']
correctAnswer = q['answer']
return render_template('index.html',
question = question,
choices = choices,
score = score)
@app.route('/', methods=['POST'])
def game():
global score
global highscore
answer = request.form['answer']
if answer == correctAnswer:
score += 10
return play()
else:
if score > highscore:
highscore = score
return fail()
@app.route('/')
def fail():
global score
currScore = score
score = 0
return render_template('fail.html',
currScore = currScore,
highscore = highscore,
correctAnswer = correctAnswer)
if __name__ == '__main__':
app.run()
| asav13/PRLA-Verk5 | part2/y_u_so_stupid_SERVER.py | y_u_so_stupid_SERVER.py | py | 1,208 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "y_u_so_stupid.getRandomQuestion",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "flask.render_te... |
49613121 | from collections import deque, defaultdict
import random
class RandomizedSet:
def __init__(self):
self.vec = deque()
self.hash_map = defaultdict(int)
def insert(self, val: int) -> bool:
if val in self.hash_map:
return False
self.vec.append(val)
self.hash_map[val] = len(self.vec) - 1
return True
def remove(self, val: int) -> bool:
if val not in self.hash_map:
return False
idx = self.hash_map[val]
last_val = self.vec[-1]
self.vec[idx] = last_val
self.vec.pop()
# NOTE, this line should be before del
self.hash_map[last_val] = idx
del self.hash_map[val]
return True
def getRandom(self) -> int:
return self.vec[random.randint(0, len(self.vec) - 1)]
if __name__ == "__main__":
obj = RandomizedSet()
assert obj.insert(1)
assert not obj.remove(2)
assert obj.insert(2)
print(obj.getRandom())
assert obj.remove(1)
assert not obj.insert(2)
assert obj.getRandom() == 2 | code-cp/leetcode | solutions/380/main.py | main.py | py | 1,102 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 35,
"usage_type": "call"
}
] |
3954866418 | # -*- coding: utf-8 -*-
"""view module prilog application
* view function, and run Flask
"""
from glob import glob
from flask import Flask, render_template, request, session, redirect, jsonify
import os
import re
import json
import urllib.parse
import subprocess
import time as tm
import analyze as al
import common as cm
import state_list as state
import configparser
config = configparser.ConfigParser()
config.read("config.ini")
SERVER_ERROR_STATE = config.get("general", "error_state")
SERVER_TOKEN_AUTH = config.get("general", "token_auth")
MULTI_SERVER = config.get("rest", "multi_server")
DL_INTERVAL = int(config.get("rest", "interval"))
# movie download directory
stream_dir = "tmp/"
if not os.path.exists(stream_dir):
os.mkdir(stream_dir)
# analyze result save as cache directory
cache_dir = "cache/"
if not os.path.exists(cache_dir):
os.mkdir(cache_dir)
# save analyzing id as file directory
download_dir = "download/"
if not os.path.exists(download_dir):
os.mkdir(download_dir)
# waiting analyze id as file directory
dl_queue_dir = "download/queue/"
if not os.path.exists(dl_queue_dir):
os.mkdir(dl_queue_dir)
# save analyzing id as file directory
dl_ongoing_dir = "download/ongoing/"
if not os.path.exists(dl_ongoing_dir):
os.mkdir(dl_ongoing_dir)
# save analyzing id as file directory
dl_pending_dir = "download/pending/"
if not os.path.exists(dl_pending_dir):
os.mkdir(dl_pending_dir)
# save analyzing id as file directory
dl_server_dir = "download/server/"
if not os.path.exists(dl_server_dir):
os.mkdir(dl_server_dir)
# waiting analyze id as file directory
queue_dir = "queue/"
if not os.path.exists(queue_dir):
os.mkdir(queue_dir)
# save analyzing id as file directory
pending_dir = "pending/"
if not os.path.exists(pending_dir):
os.mkdir(pending_dir)
# api token as file directory
token_dir = "token/"
if not os.path.exists(token_dir):
os.mkdir(token_dir)
def get_web_txt(youtube_id, title, time_line, debuff_value, total_damage):
debuff_dict = None
if debuff_value:
debuff_dict = ({key: val for key, val in zip(time_line, debuff_value)})
data_url = "https://prilog.jp/?v=" + youtube_id
data_txt = "@PriLog_Rより%0a"
data_txt += title + "%0a"
if total_damage:
total_damage = "総ダメージ " + "".join(total_damage)
data_txt += total_damage + "%0a"
return debuff_dict, data_txt, data_url, total_damage
def get_rest_result(title, time_line, time_line_enemy, time_data, total_damage, debuff_value):
rest_result = {"title": title, "timeline": time_line, "timeline_enemy": time_line_enemy, "process_time": time_data,
"total_damage": total_damage, "debuff_value": debuff_value}
if time_line:
rest_result["timeline_txt"] = "\r\n".join(time_line)
if time_line_enemy:
rest_result["timeline_txt_enemy"] = "\r\n".join(time_line_enemy)
else:
rest_result["timeline_txt_enemy"] = False
if debuff_value:
rest_result["timeline_txt_debuff"] = "\r\n".join(list(
map(lambda x: "↓{} {}".format(str(debuff_value[x[0]][0:]).rjust(3, " "), x[1]),
enumerate(time_line))))
else:
rest_result["timeline_txt_debuff"] = False
else:
rest_result["timeline_txt"] = False
rest_result["timeline_txt_enemy"] = False
rest_result["timeline_txt_debuff"] = False
return rest_result
app = Flask(__name__)
app.config.from_object(__name__)
app.config["SECRET_KEY"] = "zJe09C5c3tMf5FnNL09C5e6SAzZuY"
app.config["JSON_AS_ASCII"] = False
@app.route("/", methods=["GET", "POST"])
def index():
if request.method == "POST":
url = (request.form["Url"])
# urlからid部分の抽出
youtube_id = al.get_youtube_id(url)
if youtube_id is False:
error = state.get_error_message(state.ERR_BAD_URL)
return render_template("index.html", error=error)
cache = cm.cache_check(youtube_id)
if cache:
title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = cache
if past_status % 100 // 10 == 0:
debuff_dict, data_txt, data_url, total_damage = get_web_txt(youtube_id, title,
time_line, debuff_value, total_damage)
return render_template("result.html", title=title, timeLine=time_line, timeLineEnemy=time_line_enemy,
timeData=time_data, totalDamage=total_damage, debuffDict=debuff_dict,
data_txt=data_txt, data_url=data_url)
else:
error = state.get_error_message(past_status)
return render_template("index.html", error=error)
if SERVER_ERROR_STATE:
error = state.get_error_message(state.ERR_SERVICE_UNAVAILABLE)
return render_template("index.html", error=error)
# start download
dl_queue_path = dl_queue_dir + str(youtube_id)
dl_ongoing_path = dl_ongoing_dir + str(youtube_id)
# 既にキューに登録されているか確認
queued = os.path.exists(dl_queue_path)
if not queued: # 既にダウンロード待機中ではない場合、ダウンロード待機キューに登録
cm.queue_append(dl_queue_path)
# キューが回ってきたか確認し、来たらダウンロード実行
while True:
if not cm.is_path_exists(dl_ongoing_path) and cm.is_path_due(dl_queue_path):
if cm.is_pending_download(DL_INTERVAL): # check pending download
break
timeout = cm.watchdog_download(youtube_id, 300) # 5分間タイムアウト監視
if timeout:
cm.clear_path(dl_queue_path)
error = "動画の解析待ちでタイムアウトが発生しました。再実行をお願いします。"
return render_template("index.html", error=error)
tm.sleep(1)
else: # ダウンロード待機中の場合エラーメッセージ表示
cm.clear_path(dl_queue_path)
error = "同一の動画が解析中です。時間を置いて再実行をお願いします。"
return render_template("index.html", error=error)
path, title, length, thumbnail, url_result = al.search(youtube_id)
cm.clear_path(dl_queue_path)
if url_result % 100 // 10 == 2:
error = state.get_error_message(url_result)
cm.save_cache(youtube_id, title, False, False, False, False, False, url_result)
return render_template("index.html", error=error)
session["path"] = path
session["title"] = title
session["youtube_id"] = youtube_id
length = int(int(length) / 8) + 3
return render_template("analyze.html", title=title, length=length, thumbnail=thumbnail)
elif request.method == "GET":
if "v" in request.args: # ?v=YoutubeID 形式のGETであればリザルト返却
youtube_id = request.args.get("v")
if re.fullmatch(r"^([a-zA-Z0-9_-]{11})$", youtube_id):
cache = cm.cache_check(youtube_id)
if cache:
title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = cache
if past_status % 100 // 10 == 0:
debuff_dict, data_txt, data_url, total_damage = get_web_txt(youtube_id, title,
time_line, debuff_value,
total_damage)
return render_template("result.html", title=title, timeLine=time_line,
timeLineEnemy=time_line_enemy,
timeData=time_data, totalDamage=total_damage, debuffDict=debuff_dict,
data_txt=data_txt, data_url=data_url)
else:
error = state.get_error_message(past_status)
return render_template("index.html", error=error)
else: # キャッシュが存在しない場合は解析
if SERVER_ERROR_STATE:
error = state.get_error_message(state.ERR_SERVICE_UNAVAILABLE)
return render_template("index.html", error=error)
# start download
dl_queue_path = dl_queue_dir + str(youtube_id)
dl_ongoing_path = dl_ongoing_dir + str(youtube_id)
# 既にキューに登録されているか確認
queued = os.path.exists(dl_queue_path)
if not queued: # 既にダウンロード待機中ではない場合、ダウンロード待機キューに登録
cm.queue_append(dl_queue_path)
# キューが回ってきたか確認し、来たらダウンロード実行
while True:
if not cm.is_path_exists(dl_ongoing_path) and cm.is_path_due(dl_queue_path):
if cm.is_pending_download(DL_INTERVAL): # check pending download
break
timeout = cm.watchdog_download(youtube_id, 300) # 5分間タイムアウト監視
if timeout:
cm.clear_path(dl_queue_path)
error = "動画の解析待ちでタイムアウトが発生しました。再実行をお願いします。"
return render_template("index.html", error=error)
tm.sleep(1)
else: # ダウンロード待機中の場合エラーメッセージ表示
cm.clear_path(dl_queue_path)
error = "同一の動画が解析中です。時間を置いて再実行をお願いします。"
return render_template("index.html", error=error)
path, title, length, thumbnail, url_result = al.search(youtube_id)
cm.clear_path(dl_queue_path)
if url_result % 100 // 10 == 2:
error = state.get_error_message(url_result)
cm.save_cache(youtube_id, title, False, False, False, False, False, url_result)
return render_template("index.html", error=error)
session["path"] = path
session["title"] = title
session["youtube_id"] = youtube_id
length = int(int(length) / 8) + 3
return render_template("analyze.html", title=title, length=length, thumbnail=thumbnail)
else: # prilog.jp/(YoutubeID)に該当しないリクエスト
error = "不正なリクエストです"
return render_template("index.html", error=error)
else:
path = session.get("path")
session.pop("path", None)
session.pop("title", None)
session.pop("youtube_id", None)
error = None
if str(path).isdecimal():
error = state.get_error_message(path)
elif path is not None:
cm.clear_path(path)
return render_template("index.html", error=error)
@app.route("/analyze", methods=["GET", "POST"])
def analyze():
path = session.get("path")
title = session.get("title")
youtube_id = session.get("youtube_id")
session.pop("path", None)
if request.method == "GET" and path is not None:
# TL解析
time_line, time_line_enemy, time_data, total_damage, debuff_value, status = al.analyze_movie(path)
# キャッシュ保存
status = cm.save_cache(youtube_id, title, time_line, time_line_enemy, False, total_damage, debuff_value, status)
if status % 100 // 10 == 0:
# 解析が正常終了ならば結果を格納
session["time_line"] = time_line
session["time_line_enemy"] = time_line_enemy
session["time_data"] = time_data
session["total_damage"] = total_damage
session["debuff_value"] = debuff_value
return render_template("analyze.html")
else:
session["path"] = status
return render_template("analyze.html")
else:
return redirect("/")
@app.route("/result", methods=["GET", "POST"])
def result():
title = session.get("title")
time_line = session.get("time_line")
time_line_enemy = session.get("time_line_enemy")
time_data = session.get("time_data")
total_damage = session.get("total_damage")
debuff_value = session.get("debuff_value")
youtube_id = session.get("youtube_id")
session.pop("title", None)
session.pop("time_line", None)
session.pop("time_line_enemy", None)
session.pop("time_data", None)
session.pop("total_damage", None)
session.pop("debuff_value", None)
session.pop("youtube_id", None)
if request.method == "GET" and time_line is not None:
debuff_dict, data_txt, data_url, total_damage = get_web_txt(youtube_id, title,
time_line, debuff_value, total_damage)
return render_template("result.html", title=title, timeLine=time_line, timeLineEnemy=time_line_enemy,
timeData=time_data, totalDamage=total_damage, debuffDict=debuff_dict,
data_txt=data_txt, data_url=data_url)
else:
return redirect("/")
@app.route("/download", methods=["GET", "POST"])
def download():
if request.method == "GET":
return render_template("download.html")
else:
return redirect("/")
@app.route("/rest", methods=["GET", "POST"])
def rest():
if request.method == "GET":
return render_template("rest.html")
else:
return redirect("/")
@app.route("/standalone/version", methods=["GET"])
def standalone_version():
ret = {"version": "", "update": False}
if request.method == "GET":
path = "./static/release"
fl = glob(path + "/*")
if not fl:
return jsonify(ret)
# sort time stamp and find latest version
fl.sort(key=lambda x: os.path.getctime(x), reverse=True)
version = os.path.basename(fl[0])
ret["version"] = version
if "Version" in request.args:
if request.args.get("Version") < version:
ret["update"] = True
return jsonify(ret)
else:
return jsonify(ret)
@app.route("/rest/analyze", methods=["POST", "GET"])
def rest_analyze():
status = state.ERR_REQ_UNEXPECTED
is_parent = False
rest_result = {}
ret = {}
url = ""
raw_url = ""
token = ""
# clear old movie if passed 2 hours
cm.tmp_movie_clear()
if request.method == "POST":
if "Url" not in request.form:
status = state.ERR_BAD_REQ
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
else:
raw_url = request.form["Url"]
if SERVER_TOKEN_AUTH and "Token" not in request.form:
status = state.ERR_BAD_REQ
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
else:
token = request.form["Token"]
elif request.method == "GET":
if "Url" not in request.args:
status = state.ERR_BAD_REQ
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
else:
raw_url = request.args.get("Url")
if SERVER_TOKEN_AUTH and "Token" not in request.args:
status = state.ERR_BAD_REQ
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
else:
token = request.args.get("Token")
try:
# tokenの確認とロード
if SERVER_TOKEN_AUTH:
json.load(open(token_dir + urllib.parse.quote(token) + ".json"))
except FileNotFoundError:
status = state.ERR_BAD_TOKEN
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
# URL抽出
tmp_group = re.search('(?:https?://)?(?P<host>.*?)(?:[:#?/@]|$)', raw_url)
if tmp_group:
host = tmp_group.group('host')
if host == "www.youtube.com" or host == "youtu.be":
url = raw_url
# キャッシュ確認
youtube_id = al.get_youtube_id(url)
queue_path = queue_dir + str(youtube_id)
pending_path = pending_dir + str(youtube_id)
dl_queue_path = dl_queue_dir + str(youtube_id)
if youtube_id is False:
# 不正なurlの場合
status = state.ERR_BAD_URL
else:
# 正常なurlの場合
cache = cm.cache_check(youtube_id)
if cache:
# キャッシュ有りの場合
# キャッシュを返信
title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = cache
if past_status % 100 // 10 == 0:
rest_result = get_rest_result(title, time_line, time_line_enemy, time_data, total_damage, debuff_value)
ret["result"] = rest_result
ret["msg"] = state.get_error_message(past_status)
ret["status"] = past_status
return jsonify(ret)
else:
ret["result"] = rest_result
ret["msg"] = state.get_error_message(past_status)
ret["status"] = past_status
return jsonify(ret)
if SERVER_ERROR_STATE:
ret["result"] = rest_result
ret["msg"] = state.get_error_message(state.ERR_SERVICE_UNAVAILABLE)
ret["status"] = state.ERR_SERVICE_UNAVAILABLE
return jsonify(ret)
# start analyze
# 既にキューに登録されているか確認
queued = os.path.exists(queue_path)
if not queued: # 既に解析中ではない場合、解析キューに登録
cm.queue_append(queue_path)
# キューが回ってきたか確認し、来たら解析実行
while True:
cm.watchdog(youtube_id, is_parent, 1800, state.TMP_QUEUE_TIMEOUT)
rest_pending = cm.is_path_exists(pending_path)
rest_queue = cm.is_path_due(queue_path)
web_download = cm.is_path_exists(dl_queue_path)
if not rest_pending and rest_queue and not web_download:
if cm.is_pending_download(DL_INTERVAL): # check pending download
if not MULTI_SERVER:
analyzer_path = f'python exec_analyze.py {url}'
cm.pending_append(pending_path)
subprocess.Popen(analyzer_path.split())
is_parent = True
else:
analyzer_path = f'python multi_exec_analyze.py {url}'
cm.pending_append(pending_path)
subprocess.Popen(analyzer_path.split())
is_parent = True
break
tm.sleep(1)
while True: # キューが消えるまで監視
queued = os.path.exists(queue_path)
if queued:
if is_parent:
# 親ならばpendingを監視
cm.watchdog(youtube_id, is_parent, 300, state.TMP_ANALYZE_TIMEOUT)
else:
# 子ならばqueueを監視
cm.watchdog(youtube_id, is_parent, 2160, state.TMP_QUEUE_TIMEOUT)
tm.sleep(1)
continue
else: # 解析が完了したら、そのキャッシュJSONを返す
cache = cm.queue_cache_check(youtube_id)
if cache:
title, time_line, time_line_enemy, time_data, total_damage, debuff_value, past_status = cache
rest_result = get_rest_result(title, time_line, time_line_enemy, time_data, total_damage,
debuff_value)
status = past_status
break
else: # キャッシュ未生成の場合
# キャッシュを書き出してから解析キューから削除されるため、本来起こり得ないはずのエラー
status = state.TMP_UNEXPECTED
break
ret["result"] = rest_result
ret["msg"] = state.get_error_message(status)
ret["status"] = status
return jsonify(ret)
if __name__ == "__main__":
app.run()
| Neilsaw/PriLog_web | app.py | app.py | py | 21,596 | python | en | code | 30 | github-code | 6 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 33,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
... |
75093396986 | from pubnub.callbacks import SubscribeCallback
from pubnub.enums import PNStatusCategory
from pubnub.pnconfiguration import PNConfiguration
from pubnub.pubnub import PubNub
from pprint import pprint
from dotenv import load_dotenv
import os
EVENT_UPLOADED_MESSAGE = "message_uploaded"
load_dotenv()
UUID = os.getenv("uuid")
print("UUID desde dotenv es->")
print(UUID)
pnconfig = PNConfiguration()
pnconfig.subscribe_key = "sub-c-5640dcb4-620c-11ea-9a99-f2f107c29c38"
pnconfig.publish_key = "pub-c-3c259a14-9e90-49f0-bf85-03615209e485"
pnconfig.uuid = UUID
class PubNubClient:
display_controller = None
# PubNub configurations
class NewMessageSubscribeCallback(SubscribeCallback):
def __init__(self, firebase_client, drecorder, display_controller):
self.firebase_client = firebase_client
# self._drecorder = drecorder
self.display_controller = display_controller
def status(self, pubnub, status):
pass
def presence(self, pubnub, presence):
pprint(presence.__dict__)
def message(self, pubnub, message):
print('\n')
print('message from pubnub received')
print('\n')
if message.__dict__["message"]["content"] == "message_uploaded":
# self.display_controller.stop_loading()
num_messages = self.firebase_client.num_relevant_recordings()
self.display_controller.display_message_counter(num_messages)
# if message.__dict__["message"]["sender"] == pnconfig.uuid:
# pass
# self._firebase_client.fetch_relevant_recordings()
def __init__(self, firebase_client, drecorder, display_controller):
self.pubnub = PubNub(pnconfig)
self.pubnub.add_listener(
self.NewMessageSubscribeCallback(firebase_client, drecorder, display_controller))
self.pubnub.subscribe()\
.channels("pubnub_onboarding_channel")\
.with_presence()\
.execute()
# self.firebase_client = firebase_client
self.drecorder = drecorder
self.display_controller = display_controller
def publish_callback(self, envelope, status):
# print('full circle')
print('\n')
print('pubnub message published')
print('\n')
# print(envelope, status)
def broadcastUploadedMessage(self):
self.pubnub.publish()\
.channel("pubnub_onboarding_channel")\
.message({"sender": pnconfig.uuid, "content": EVENT_UPLOADED_MESSAGE, "url": self.drecorder.firebase_filename})\
.pn_async(self.publish_callback)
| deibid/radio-azar | my_modules/PubNubClient.py | PubNubClient.py | py | 2,682 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pubnub.pnconfiguration.PNConfiguration",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "p... |
42572073330 | import abc
import collections
from typing import List, Callable, Optional, OrderedDict, Tuple
import pandas as pd
class PreProcessingBase:
def __init__(self,
df: pd.DataFrame,
actions: Optional[OrderedDict[Callable, Tuple]] = None):
self._df = df
self._actions = actions
if self._actions is None:
self._actions = collections.OrderedDict()
@abc.abstractmethod
def _get_actions(self) -> OrderedDict[Callable, Tuple]:
raise NotImplementedError
def setup(self):
self._actions = self._get_actions()
return self
def run(self) -> pd.DataFrame:
for action, args in self._actions.items():
self._df = self._df.apply(action, args=args)
return self._df
| gilcaspi/COVID-19-Vaccinations | data_processing/preprocessing/pre_processing_base.py | pre_processing_base.py | py | 791 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.DataFrame",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "typing.Optional",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.OrderedDict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Calla... |
5500500071 | import time
import base64
from google.cloud import pubsub_v1
from google.oauth2 import service_account
project_id = "<gcp_project_id>"
topic_name = "<topic_name>"
credentials = service_account.Credentials.from_service_account_file("<gcp_Service_account_file_path>")
print(credentials)
publisher = pubsub_v1.PublisherClient(credentials = credentials)
topic_path = publisher.topic_path(project_id, topic_name)
def callback(message_future):
# When timeout is unspecified, the exception method waits indefinitely.
print("1")
if message_future.exception(timeout=30):
print('Publishing message on {} threw an Exception {}.'.format(
topic_name, message_future.exception()))
else:
print(message_future.result())
with open("15.jpg", "rb") as imageFile:
str = base64.b64encode(imageFile.read())
#print(str)
data = "sample data"
# Data must be a bytestring
data = data.encode('utf-8')
# When you publish a message, the client returns a Future.
message_future = publisher.publish(topic_path, data=str)
message_future.add_done_callback(callback)
print(data)
print('Published message IDs:')
##############################################################################################
subscriber = pubsub_v1.SubscriberClient(credentials = credentials)
subscription_path = subscriber.subscription_path(
project_id, "subscribe")
def callback1(message):
print('Received message: {}'.format(message))
message.ack()
subscriber.subscribe(subscription_path, callback=callback1)
# The subscriber is non-blocking. We must keep the main thread from
# exiting to allow it to process messages asynchronously in the background.
print('Listening for messages on {}'.format(subscription_path))
while True:
time.sleep(60)
| natsu1628/hackathons | ML/GCP-python-ML2/to_pubsub.py | to_pubsub.py | py | 1,772 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "google.oauth2.service_account.Credentials.from_service_account_file",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "google.oauth2.service_account.Credentials",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "google.oauth2.service_account",
... |
7807070088 | import logging
from copy import deepcopy
from itertools import permutations
import numpy as np
from scipy.special import softmax
from scipy.stats import entropy
def true_entropy(team_generator, batch_predict, num_items: int, num_selections: int):
P_A = np.zeros((num_selections, num_items)) # basically P(A^i_j)
past_probs = []
for i in range(num_selections):
# separately calculate P(A^i_j)
# all possible permutations with team size upto i
sets = list(permutations(range(num_items), i + 1))
teams = [team_generator() for x in range(len(sets))]
for j, s in enumerate(sets):
for item in s:
teams[j].mark(item)
# put them together for a batch update
vals = batch_predict(teams)
# reshape them, so we can group by same prefix teams (so that p(last_element) sums to 1
struct_vals = softmax(vals.reshape(-1, num_items - i), axis=1)
vals = struct_vals.reshape(-1)
# to add to past probabilities coz P(A^j| prod of A's < j)
P = np.zeros((num_items,) * (i + 1))
for j, team in enumerate(teams):
prefix_p = 1
for k in range(len(team)):
pp = past_probs[k - 1][tuple(team[z] for z in range(k))] if k > 0 else 1 # to help find the prefix
prefix_p *= pp
P[tuple(team[z] for z in range(len(team)))] += vals[j]
P_A[i, team[-1]] += prefix_p * vals[j]
# print(team.pkms, P_A[i, team[-1]], prefix_p, vals[j])
past_probs.append(P) # somevariant of vals so that its easily indexible)
# print(P_A, np.sum(P_A, axis=1))
# print((np.sum(P_A, axis=0)))
# P_A = np.sum(P_A, axis = 0)
"""
P_X = np.zeros((num_items))
for i in range(num_selections):
accumulated_P = np.ones((num_items))
for j in range(num_selections):
if i != j:
accumulated_P *= (np.ones((num_items)) - P_A[j])
P_X += P_A[i] * accumulated_P
"""
P_X = np.sum(P_A, axis=0) / num_selections
entropy_loss = -entropy(P_X)
logging.info("P_A=%s\tEntropy=%s\t", str(list(P_X)), str(entropy_loss))
return entropy_loss
def sample_based_entropy(team_generator, batch_predict, num_items: int, num_selections: int, num_samples: int):
counts = np.zeros(num_items)
for i in range(num_samples):
team = team_generator()
for j in range(num_selections):
tmp_teams = [deepcopy(team) for z in range(num_items)]
items = [z for z in range(num_items)]
for k, item in enumerate(items):
tmp_teams[k].mark(item)
vals = (batch_predict(tmp_teams))
for k in range(len(team) - 1):
vals[team[k]] = float("-inf")
p = softmax(vals)
selection = np.random.choice(range(num_items), p=p)
team.mark(selection)
counts[selection] += 1
P_A = counts / sum(counts)
entropy_loss = -entropy(P_A)
logging.info("P_A=%s\tEntropy=%s\t", str(list(P_A)), str(entropy_loss))
return entropy_loss
def lower_bound_entropy(team_generator, batch_predict, num_items: int, num_selections: int):
all_teams = [team_generator() for x in range(num_items)]
for i in range(num_items):
all_teams[i].mark(i) # just mark one element
P_A = softmax(batch_predict(all_teams))
entropy_loss = -entropy(P_A)
logging.info("P_A=%s\tEntropy=%s\t", str(list(P_A)), str(entropy_loss))
return entropy_loss
| nianticlabs/metagame-balance | src/metagame_balance/entropy_fns.py | entropy_fns.py | py | 3,539 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "numpy.zeros",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "scipy.special.softmax",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.zeros"... |
28398915179 | import datetime
import termux
from sync.misc.Config import config
from sync.misc.Logger import logger
class Notification:
__instance__ = None
def __init__(self):
self.sync_all = {}
self.watchers = {}
self.global_status = "Active"
now_date = datetime.datetime.now()
self.last_start = f"Started: {now_date.strftime('%Y-%m-%d@%H:%M:%S')}"
self.last_stop = f"Stopped: -"
self.last_start_stop_time = now_date.strftime('%a. %H:%M:%S')
self.last_full_sync = f"Fully synchronized: -"
@staticmethod
def get() -> "Notification":
if Notification.__instance__ is None:
Notification.__instance__ = Notification()
return Notification.__instance__
def set_full_sync_status(self, sync_all):
self.sync_all = sync_all
self.update()
def set_watchers(self, watchers):
self.watchers = watchers
self.update()
def set_global_status(self, global_status):
self.global_status = global_status
def set_inactive(self):
self.set_global_status("Inactive")
now_date = datetime.datetime.now()
self.last_stop = f"Stopped: {now_date.strftime('%Y-%m-%d@%H:%M:%S')}"
self.last_start_stop_time = now_date.strftime('%a. %H:%M:%S')
self.update()
def set_active(self):
self.set_global_status("Active")
now_date = datetime.datetime.now()
self.last_start = f"Started: {now_date.strftime('%Y-%m-%d@%H:%M:%S')}"
self.last_start_stop_time = now_date.strftime('%a. %H:%M:%S')
self.update()
def full_sync_done(self):
self.last_full_sync = f"Fully synchronized: {datetime.datetime.now().strftime('%Y-%m-%d@%H:%M:%S')}"
self.update()
def exiting(self):
self.set_global_status("Exited")
now_date = datetime.datetime.now()
self.last_start_stop_time = now_date.strftime('%a. %H:%M:%S')
self.update()
def update(self):
notification_title = f"Termux-sync [{self.global_status}] [{self.last_start_stop_time}]"
notification_id = 999
notification_content = ""
if config.debug:
notification_content += self.last_stop + "\n"
notification_content += self.last_start + "\n"
notification_content += self.last_full_sync + "\n"
notification_content += "\n"
for sync_info in config.sync_info_list:
item_line = f"{sync_info.label} "
if sync_info.id in self.sync_all:
item_line += f"{self.sync_all[sync_info.id]} | "
else:
item_line += f"- | "
if sync_info.id in self.watchers:
watcher = self.watchers[sync_info.id]
item_line += watcher.files_info.get_status()
if watcher.last_sync_date is not None:
last_sync_date = watcher.last_sync_date.strftime('%H:%M:%S')
item_line += f" ({last_sync_date})"
else:
item_line += " [Not watching]"
notification_content += item_line + "\n"
action = f"termux-open --content-type yaml {logger.log_file}"
termux.Notification.notify(notification_title,
notification_content,
notification_id,
args=("alert-once", "ongoing"),
kwargs={"button1": "See logs", "button1-action": action})
| dpjl/termux-sync | sync/misc/Notification.py | Notification.py | py | 3,522 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "da... |
11579230306 | import sklearn
import cv2
import pandas as pd
import numpy as np
import math
from sklearn.cluster import KMeans
from sklearn.metrics import silhouette_samples, silhouette_score
from collections import Counter
from scipy.spatial import distance_matrix
from scipy.sparse.csgraph import shortest_path
class ImageClassifier:
def __init__(self, n_clusters, target):
self._n_clusters = n_clusters
self._colorspaces = {
cv2.COLOR_BGR2HSV: cv2.COLOR_HSV2BGR,
cv2.COLOR_BGR2LAB: cv2.COLOR_LAB2BGR,
cv2.COLOR_BGR2HLS: cv2.COLOR_HLS2BGR,
}
self._img = cv2.imread(target)
self._rows,self._cols,_ = self._img.shape
def run(self, dst):
df = self.get_dataframe(colorspace=cv2.COLOR_BGR2HSV)
cluster_map = self.run_kmeans(df, [0])
clusters = self.get_clusters(cluster_map)
cmp = lambda pixel: int(pixel[0])
clusters = self.sort_clusters(clusters, cmp, color_sort=cv2.COLOR_BGR2LAB)
res = self.merge_clusters(clusters, lambda cluster: sum(cluster[0][0]))
cv2.imwrite(dst, res)
def get_dataframe(self, colorspace=None):
"""
Function to get a dataframe from an image's data.
Return value (pandas.DataFrame):
dataframe with every pixel's information (3 channels).
pixels are extracted left to right, top to bottom.
Parameters:
img_mat (cv2.Mat): image to extract data from (must be in BGR colorspace)
colorspace (cv2.COLOR_BGR2*): used if you want to form dataframe from other colorspace
"""
data = {'val1':[], 'val2':[], 'val3':[]}
img = self._img.copy()
# Convert image to desired colorspace
if colorspace is not None:
img = cv2.cvtColor(img, colorspace).astype(np.uint8)
for i in range(self._rows):
for j in range(self._cols):
data['val1'].append(img[i][j][0])
data['val2'].append(img[i][j][1])
data['val3'].append(img[i][j][2])
df = pd.DataFrame(data=data)
return df
def get_optimal_n_clusters(self, dataframe, keys):
max_n = 0
max_score = 0
x = dataframe.iloc[:, keys].values
print("Finding optimal cluster count...")
for n_clusters in range(2, 11):
kmeans = KMeans(n_clusters=n_clusters, n_init=10, max_iter=300, n_jobs=-1)
preds = kmeans.fit_predict(x)
print("start silhouette")
score = silhouette_score(x, preds)
print("end silhouette")
if (score > max_score):
max_n = n_clusters
max_score = score
print("For n_clusters = {}, silhouette score is {})".format(n_clusters, score))
print("Optimal cluster count is {}".format(max_n))
return max_n
def run_kmeans(self, dataframe, keys):
"""
Run kmeans from dataframe and returns clustering information.
Return value (list):
cluster id for each entry in the dataframe
Parameters:
dataframe (pandas.DataFrame): dataframe to run kmeans on
keys (list): indexes of the dataframe's columns used to run kmeans
"""
if self._n_clusters == -1:
self._n_clusters = self.get_optimal_n_clusters(dataframe, keys)
kmeans = KMeans(n_clusters=self._n_clusters, n_init=10, max_iter=300, n_jobs=-1)
x = dataframe.iloc[:, keys].values
y = kmeans.fit_predict(x)
return y
def get_clusters(self, cluster_map):
"""
Extract clusters from image
Return value (list):
List containing each cluster as a list of pixels.
Parameters:
n_clusters (int): Number of clusters to use
img_mat (cv2.Mat): img to extract pixels from
cluster_map (list): list containing cluster id for each pixel of img_mat (left to right, top to bottom)
"""
groups = [[] for i in range(self._n_clusters)]
for i in range(self._rows):
for j in range(self._cols):
group_id = cluster_map[i * self._cols + j]
groups[group_id].append(self._img[i][j])
return groups
def sort_clusters(self, clusters, comparator, color_sort=None):
"""
Sorts each cluster with a custom comparator
Return value (list):
list of sorted np.arrays
Parameters:
clusters (list): list of clusters to sort
comparator (lambda x): comparator function to use to sort clusters
colorspace: in which colorspace to be to sort the clusters
"""
avg = [np.zeros((3), dtype=np.uint64) for i in range (self._n_clusters)]
for i in range(len(clusters)):
cluster = clusters[i]
cluster = np.reshape(cluster, (1, len(cluster), 3)) # Reshape cluster so it fits cv2.Mat format, allowing to change its colorspace
if color_sort is not None: # Convert cluster to desired colorspace
cluster = cv2.cvtColor(cluster, color_sort).astype(np.uint8)
cluster[0] = np.array(sorted(cluster[0], key=comparator)).astype(np.uint8) # Sort cluster with specified comparator
if color_sort is not None: # Convert cluster back to BGR
cluster = cv2.cvtColor(cluster, self._colorspaces[color_sort]).astype(np.uint8)
clusters[i] = cluster
return clusters
def merge_clusters(self, clusters, comparator):
"""
Merges all clusters into one image. Clusters are places from left to right, top to bottom.
Return value (cv2.Mat):
cv2 image with merged clusters
Parameters:
clusters (list): list of clusters (np.arrays) (shape: (1, x, 3))
shape (2 value tuple): desired image shape (rows, cols)
"""
res = np.zeros((self._rows * self._cols, 3), dtype=np.uint8)
merge_index = 0
clusters = sorted(clusters, key=comparator)
for cluster in clusters:
res[merge_index:merge_index+len(cluster[0])] = cluster[0]
merge_index = merge_index + len(cluster[0])
res = np.reshape(res, (self._rows, self._cols, 3))
return res
| elraffray/pyImage | imageclassifier.py | imageclassifier.py | py | 6,442 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.COLOR_BGR2HSV",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2LAB",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "cv2.COLOR_BGR2HLS",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "... |
24102936874 | from helpers import ReadLines
from typing import Tuple, List
class DayFive(ReadLines):
def __init__(
self, file_path="/home/jonathan/projects/2020-advent-of-code/five/input.txt"
):
super().__init__(file_input=file_path)
self.seat_ids = sorted(
[DayFive.identify_seat(seat_code)[2] for seat_code in self.inputs]
)
@staticmethod
def _process_code(code: List[str], _range: Tuple[int, int]) -> int:
"""
I'm leaving this method in, because it's quite neat - but it has been rendered useless by the more practical _binary_count method below
"""
if len(code) == 1:
keys = {"L": 0, "F": 0, "R": 1, "B": 1}
return _range[keys[code[0]]]
else:
next_letter = code.pop(0)
mid_point = int((_range[1] + 1 - _range[0]) / 2)
if next_letter == "F" or next_letter == "L":
new_range = _range[0], _range[0] + mid_point - 1
elif next_letter == "B" or next_letter == "R":
new_range = _range[0] + mid_point, _range[1]
return DayFive._process_code(code, new_range)
@staticmethod
def _binary_count(seat_code: str):
letter_key = {"F": "0", "L": "0", "B": "1", "R": "1"}
binary_string_code = "".join([letter_key[letter] for letter in seat_code])
return int(binary_string_code, 2)
@staticmethod
def identify_seat(seat_reference: str) -> Tuple[int, int, int]:
row = DayFive._binary_count(seat_reference[:7])
column = DayFive._binary_count(seat_reference[-3:])
seat_id = row * 8 + column
return row, column, seat_id
def highest_id(self):
return max(self.seat_ids)
def find_missing_id(self) -> int:
all_ids = set([i for i in range(min(self.seat_ids), max(self.seat_ids) + 1)])
seat_ids = set(self.seat_ids)
return all_ids.difference(seat_ids).pop()
| jonodrew/2020-advent-of-code | five/five.py | five.py | py | 1,952 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "helpers.ReadLines",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_nu... |
70945120828 | # 형태소 분석
from konlpy.tag import Okt
from docutils.parsers.rst.directives import encoding
okt = Okt()
#result = okt.pos('고추 등 매운음식을 오랫동안 너무 많이 먹었을 경우 인지능력과 기억력을 저하시킬 위험이 높다는 연구결과가 나왔다.')
#result = okt.morphs('고추 등 매운음식을 오랫동안 너무 많이 먹었을 경우 인지능력과 기억력을 저하시킬 위험이 높다는 연구결과가 나왔다.')
#result = okt.nouns('고추 등 매운음식을 오랫동안 너무 많이 먹었을 경우 인지능력과 기억력을 저하시킬 위험이 높다는 연구결과가 나왔다.')
#print(result)
import urllib
from bs4 import BeautifulSoup
from urllib import parse
para = parse.quote("이순신")
print(para)
url = "https://ko.wikipedia.org/wiki/" + para
page = urllib.request.urlopen(url)
soup = BeautifulSoup(page.read(), 'lxml')
print(soup)
wordlist = []
for item in soup.select("#mw-content-text > div > p"):
if item.string != None:
#print(item.string)
ss = item.string
wordlist += okt.nouns(ss)
print('wordlist 출력')
print(wordlist)
print('단어 수 : ' + str(len(wordlist)))
word_dict = {}
for i in wordlist:
if i in word_dict:
word_dict[i] += 1
else:
word_dict[i] = 1
print('\n\n word_dict 출력')
print(word_dict)
print('중복 단어 제거')
setdata = set(wordlist)
print(setdata)
print('발견된 단어 수 (중복x) : ' + str(len(setdata)))
# csv 파일로 저장
import csv
import pandas as pd
try:
f = csv.writer(open('ws1.csv', 'w', encoding='utf-8'))
f.writerow(word_dict)
except Exception as e:
print('err : ', e)
# df1 = pd.read_csv('ws1.csv', encoding='utf-8')
# print(df1)
with open('ws1.csv', 'r', encoding='utf-8')as f:
print(f.read())
print()
from pandas import Series, DataFrame
li_data = Series(wordlist)
#print(li_data)
print(li_data.value_counts()[:5])
print()
li_data = Series(word_dict)
print(li_data.value_counts()[:5])
print('-----------------')
df = DataFrame(wordlist, columns = ['단어'])
print(df.head())
###############################################################
| kangmihee/EX_python | py_morpheme/pack/morp1.py | morp1.py | py | 2,358 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "konlpy.tag.Okt",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urllib.parse.quote",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "urllib.request.urlopen"... |
27094908089 | import pandas as pd
import random
from tqdm.auto import tqdm
tqdm.pandas()
import re
from tqdm import tqdm
import numpy as np
import cv2
from albumentations import (
Compose, OneOf, Normalize, Resize, HorizontalFlip, VerticalFlip, Rotate, RandomRotate90, CenterCrop
)
from albumentations.pytorch import ToTensorV2
from InChI_extra_image_gen import add_noise
def split_form(text):
PATTERN = re.compile('\d+|[A-Z][a-z]?|[^A-Za-z\d\/]|\/[a-z]')
return ' '.join(re.findall(PATTERN, text))
def get_atom_counts(df):
TARGETS = [
'B', 'Br', 'C', 'Cl',
'F', 'H', 'I', 'N',
'O', 'P', 'S', 'Si']
formula_regex = re.compile(r'[A-Z][a-z]?[0-9]*')
element_regex = re.compile(r'[A-Z][a-z]?')
number_regex = re.compile(r'[0-9]*')
atom_dict_list = []
for i in tqdm(df['Formula'].values):
atom_dict = dict()
for j in formula_regex.findall(i):
atom = number_regex.sub("", j)
dgts = element_regex.sub("", j)
atom_cnt = int(dgts) if len(dgts) > 0 else 1
atom_dict[atom] = atom_cnt
atom_dict_list.append(atom_dict)
atom_df = pd.DataFrame(atom_dict_list).fillna(0).astype(int)
atom_df = atom_df.sort_index(axis = 1)
for atom in TARGETS:
df[atom] = atom_df[atom]
return df
def train_file_path(image_id):
#pay attention to the directory before /train, need to change accordingly.
return "./bms-molecular-translation/train/{}/{}/{}/{}.png".format(
image_id[0], image_id[1], image_id[2], image_id
)
#Two ways to treat the input images. 1.crop and pad to fit the images' size to be constant. 2.resize images to certain w and h. Here is the crop function.
def crop_image(img,
contour_min_pixel = 2,
small_stuff_size = 2,
small_stuff_dist = 5,
pad_pixels = 5):
# idea: pad with contour_min_pixels just in case we cut off
# a small part of the structure that is separated by a missing pixel
#findContours only find white obj in black background color.
img = 255 - img
#Make all pixels except pure background, i.e. pure black, white and distinguish them using method BINARY and OTSU in order to not missing any obj. OTSU plus BINARY basically make the obj more distinguishable compared with just BINARY.
_, thresh = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)
#RETR_LIST lists all the contours without hierarchy of nested contours. CHAIN_APPROX_SIMPLE returns only the key pixels that form the contour, e.g., 4 points for a rectangle contour.
contours, _ = cv2.findContours(thresh, cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)[-2:]
#Store the small contours.
small_stuff = []
x_min0, y_min0, x_max0, y_max0 = np.inf, np.inf, 0, 0
for i in contours:
if len(i) < contour_min_pixel: # if NO. of pixels is too small, ignore contours under contour_min_size pixels
continue
#x,y are the top-left coordinate of the rectangle and w, h are contour's width and heigh
x, y, w, h = cv2.boundingRect(i)
if w <= small_stuff_size and h <= small_stuff_size: # collect position of contours which are smaller than small_stuff_size.
small_stuff.append([x, y, x+w, y+h])
continue
#find the largest bounding rectangle.
x_min0 = min(x_min0, x)
y_min0 = min(y_min0, y)
x_max0 = max(x_max0, x + w)
y_max0 = max(y_max0, y + h)
x_min, y_min, x_max, y_max = x_min0, y_min0, x_max0, y_max0
# enlarge the found crop box if it cuts out small stuff that is very close by
for i in range(len(small_stuff)):
#if the small stuff overlap with the big obj, count the small stuff into the obj, update the xmin max ymin max with the small stuff's.
if small_stuff[i][0] < x_min0 and small_stuff[i][0] + small_stuff_dist >= x_min0:
x_min = small_stuff[i][0]
if small_stuff[i][1] < y_min0 and small_stuff[i][1] + small_stuff_dist >= y_min0:
y_min = small_stuff[i][1]
if small_stuff[i][2] > x_max0 and small_stuff[i][2] - small_stuff_dist <= x_max0:
x_max = small_stuff[i][2]
if small_stuff[i][3] > y_max0 and small_stuff[i][3] - small_stuff_dist <= y_max0:
y_max = small_stuff[i][3]
if pad_pixels > 0: # make sure we get the crop within a valid range, pad_pixels is the range to ensure the crop is larger than the obj but not exceeding the canvas.
y_min = max(0, y_min-pad_pixels)
y_max = min(img.shape[0], y_max+pad_pixels)
x_min = max(0, x_min-pad_pixels)
x_max = min(img.shape[1], x_max+pad_pixels)
img_cropped = img[y_min:y_max, x_min:x_max]
#flip the black/white colors.
# img_cropped = 255 - img_cropped
return img_cropped
def pad_image(image, desired_size):
h, w = image.shape[0], image.shape[1]
delta_h = desired_size - h
delta_w = desired_size - w
top, bottom = delta_h//2, delta_h - (delta_h//2)
left,right = delta_w//2, delta_w - (delta_w//2)
img_padded = cv2.copyMakeBorder(image, top, bottom, left, right, cv2.BORDER_CONSTANT,
value = [255, 255, 255])
return img_padded
def preprocess_train_images(train_df, transform, CFG):
#Goal of this func is to make all images the same size to fit the transformer model (crop and pad),
#create a new column 'image' to record the original image data and the transformed image data if the trans flag is 'rotate90 or verticalflip'.
#Here only one transformation is prepared because of the preliminary feeling that the scale of dataset is enough.
assert set(['InChI_text', 'file_path', 'text_length']).issubset(train_df.columns), 'make sure the df has been preprocessed and certain columns are created.'
trans_img = []
ori_img = []
transform_type = ['rotate90', 'verticalflip']
df = train_df.copy()
resize = Compose([Resize(CFG.image_size, CFG.image_size)])
for i in tqdm(range(len(train_df))):
img_path = train_df.loc[i, 'file_path']
image = cv2.imread(img_path, cv2.IMREAD_GRAYSCALE)
if CFG.crop == True:
image = crop_image(image,
contour_min_pixel = 2,
small_stuff_size = 2,
small_stuff_dist = 5,
pad_pixels = 10)
image = resize(image = image)['image']
image = add_noise(image)
#np.expand_dims is used here because the input images needs to have 3 dimensions with the last one as 1.
#But imread(cv2.IMREAD_GRAYSCALE) can only give a 2D image.
image = np.expand_dims(image, axis = -1)
ori_img.append(image)
if CFG.trans_type == 'rotate90 or verticalflip':
trans_image = transform(transform_type[random.randint(0, 1)])(image = image)['image']
trans_img.append(trans_image)
df.insert(3, 'image', ori_img)
if CFG.trans_type == 'rotate90 or verticalflip':
train_df['image'] = trans_img
temp = pd.concat([df, train_df]).sample(frac = 1).reset_index(drop = True)
return temp
else:
return df
def get_transform(trans_type):
#transform images, need to annotate trans flag.
if trans_type == 'rotate90':
return Compose([
OneOf([
Rotate([90, 90], p = 0.5),
Rotate([-90, -90], p = 0.5),
], p = 1.0),
])
elif trans_type == 'verticalflip':
return Compose([
OneOf([
VerticalFlip()
], p = 1.0),
])
def get_aug(CFG):
#the goal is to normalize the image data and convert np array to torch tensor before sending to the model
return Compose([Normalize(mean = CFG.pixels_mean, std = CFG.pixels_std), ToTensorV2()])
| phelchegs/bms-molecular-translation | InChI/InChI_preprocessing.py | InChI_preprocessing.py | py | 7,948 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "tqdm.auto.tqdm.pandas",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tqdm.auto.tqdm",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "re.compile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_... |
36040675316 | import typing
from datetime import datetime, timedelta
import arrow
from ParadoxTrading.Utils.DataStruct import DataStruct
DATETIME_TYPE = typing.Union[str, datetime]
class SplitAbstract:
def __init__(self):
self.cur_bar: DataStruct = None
self.cur_bar_begin_time: DATETIME_TYPE = None
self.cur_bar_end_time: DATETIME_TYPE = None
self.bar_list: typing.List[DataStruct] = []
self.bar_begin_time_list: typing.List[DATETIME_TYPE] = []
self.bar_end_time_list: typing.List[DATETIME_TYPE] = []
def __len__(self) -> len:
return len(self.getBarList())
def getLastData(self) -> DataStruct:
"""
get last
:return:
"""
return self.cur_bar.iloc[-1]
def getCurBar(self) -> DataStruct:
return self.cur_bar
def getCurBarBeginTime(self) -> DATETIME_TYPE:
return self.cur_bar_begin_time
def getCurBarEndTime(self) -> DATETIME_TYPE:
return self.cur_bar_end_time
def getBarList(self) -> typing.List[DataStruct]:
return self.bar_list
def getBarBeginTimeList(self) -> typing.List[DATETIME_TYPE]:
return self.bar_begin_time_list
def getBarEndTimeList(self) -> typing.List[DATETIME_TYPE]:
return self.bar_end_time_list
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
raise NotImplementedError('You need to implement _get_begin_end_time!')
def _create_new_bar(self, _data: DataStruct, _cur_time: DATETIME_TYPE):
self.cur_bar = _data.clone()
self.cur_bar_begin_time, self.cur_bar_end_time = \
self._get_begin_end_time(_cur_time)
self.bar_list.append(self.cur_bar)
self.bar_begin_time_list.append(self.cur_bar_begin_time)
self.bar_end_time_list.append(self.cur_bar_end_time)
def addOne(self, _data: DataStruct) -> bool:
"""
add one tick data into spliter
Args:
_data (DataStruct): one tick
Returns:
bool : whether created a new bar
"""
assert len(_data) == 1
cur_time = _data.index()[0]
if self.cur_bar is None:
self._create_new_bar(_data, cur_time)
return True
else:
if cur_time < self.cur_bar_end_time:
self.cur_bar.addDict(_data.toDict())
return False
else:
self._create_new_bar(_data, cur_time)
return True
def addMany(self, _data: DataStruct):
"""
add continue data into spliter
Args:
_data (DataStruct): continute data
"""
for d in _data:
self.addOne(d)
return self
class SplitIntoSecond(SplitAbstract):
def __init__(self, _second: int = 1):
super().__init__()
self.skip_s = _second
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
base_s = _cur_time.second // self.skip_s * self.skip_s
begin_datetime = _cur_time.replace(second=base_s, microsecond=0)
end_datetime = begin_datetime + timedelta(seconds=self.skip_s)
return begin_datetime, end_datetime
class SplitIntoMinute(SplitAbstract):
def __init__(self, _minute: int = 1):
super().__init__()
self.skip_m = _minute
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
base_m = _cur_time.minute // self.skip_m * self.skip_m
begin_datetime = _cur_time.replace(
minute=base_m, second=0, microsecond=0)
end_datetime = begin_datetime + timedelta(minutes=self.skip_m)
return begin_datetime, end_datetime
class SplitIntoHour(SplitAbstract):
def __init__(self, _hour: int = 1):
super().__init__()
self.skip_h = _hour
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
base_h = _cur_time.hour // self.skip_h * self.skip_h
begin_datetime = _cur_time.replace(
hour=base_h, minute=0, second=0, microsecond=0)
end_datetime = begin_datetime + timedelta(hours=self.skip_h)
return begin_datetime, end_datetime
class SplitIntoWeek(SplitAbstract):
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
cur_date = datetime.strptime(_cur_time, '%Y%m%d')
weekday = cur_date.weekday()
begin_datetime: datetime = cur_date - timedelta(days=weekday)
end_datetime: datetime = begin_datetime + timedelta(weeks=1)
return (
begin_datetime.strftime('%Y%m%d'),
end_datetime.strftime('%Y%m%d')
)
class SplitIntoMonth(SplitAbstract):
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
cur_date = arrow.get(_cur_time, 'YYYYMMDD')
begin_datetime = cur_date.replace(day=1)
end_datetime = begin_datetime.shift(months=1)
return (
begin_datetime.format('YYYYMMDD'),
end_datetime.format('YYYYMMDD')
)
class SplitIntoYear(SplitAbstract):
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
cur_date = arrow.get(_cur_time, 'YYYYMMDD')
begin_datetime = cur_date.replace(day=1)
end_datetime = begin_datetime.shift(years=1)
return (
begin_datetime.format('YYYYMMDD'),
end_datetime.format('YYYYMMDD')
)
class SplitVolumeBars(SplitAbstract):
def __init__(
self, _use_key='volume', _volume_size: int = 1,
):
"""
:param _use_key: use which index to split volume
:param _volume_size: split ticks
"""
super().__init__()
self.use_key = _use_key
self.volume_size = _volume_size
self.total_volume = 0
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
return _cur_time, _cur_time
def addOne(self, _data: DataStruct):
assert len(_data) == 1
cur_time = _data.index()[0]
cur_volume = _data[self.use_key][0]
if self.cur_bar is None: # the first tick
self._create_new_bar(_data, cur_time)
self.total_volume = cur_volume
return True
if self.total_volume > self.volume_size:
self._create_new_bar(_data, cur_time)
self.total_volume = cur_volume
return True
self.cur_bar.addDict(_data.toDict())
self.cur_bar_end_time = cur_time # override end time
self.bar_end_time_list[-1] = cur_time
self.total_volume += cur_volume
return False
class SplitTickImbalance(SplitAbstract):
def __init__(
self, _use_key='lastprice',
_period=7, _init_T=1000
):
"""
<Advances in Financial Machine Learning> - 2.3.2.1
_use_key: use which index to calc bt
_init_T: the length of first bar
_period: period of EMA
"""
super().__init__()
self.use_key = _use_key
self.last_value = None
self.last_b = 1
self.sum_b = 0 # sum of b
self.num_b = 0 # total number of b
self.T = _init_T # len of Bar
self.P = None # probability of b == 1
self.period = _period
self.threshold = None
def _get_begin_end_time(
self, _cur_time: DATETIME_TYPE
) -> (DATETIME_TYPE, DATETIME_TYPE):
return _cur_time, _cur_time
def _update_b(self, _value):
# update value, b and total_b
if _value > self.last_value:
self.last_b = 1
elif _value < self.last_value:
self.last_b = -1
else:
pass
self.last_value = _value
self.sum_b += self.last_b
self.num_b += 1
def _reset_b(self):
self.sum_b = 0
self.num_b = 0
def _update_threshold(self):
new_T = self.num_b
new_P = (self.sum_b + self.num_b) / 2. / self.num_b
self.T += (new_T - self.T) / self.period
if self.P is None: # init p
self.P = new_P
else:
self.P += (new_P - self.P) / self.period
self.threshold = self.T * abs(2 * self.P - 1)
def addOne(self, _data: DataStruct) -> bool:
# check data
assert len(_data) == 1
value = _data[self.use_key][0]
cur_time = _data.index()[0]
if self.cur_bar is None: # init the first bar
self.last_value = value
self._create_new_bar(_data, cur_time)
return True
self._update_b(value)
print(value, self.last_b, self.sum_b, self.num_b)
flag = False
if self.P is None: # current is the first bar
if self.num_b >= self.T: # finish the first bar
flag = True
elif abs(self.sum_b) >= self.threshold: # create new bar
flag = True
if flag:
self._update_threshold()
print(self.T, self.P, self.threshold)
input()
self._reset_b()
self._create_new_bar(_data, cur_time)
return True
else:
self.cur_bar.addDict(_data.toDict())
self.cur_bar_end_time = cur_time # override end time
self.bar_end_time_list[-1] = cur_time
return False
| ppaanngggg/ParadoxTrading | ParadoxTrading/Utils/Split.py | Split.py | py | 9,614 | python | en | code | 51 | github-code | 6 | [
{
"api_name": "typing.Union",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "ParadoxTrading.Utils.DataStruct.DataStruct",
"line_number": 13,
"usage_type": "name"
},
{
"api_... |
70488593467 | import csv
import functools
import json
import math
import random
def cycle_call_parametrized(string_q: int, left_b: int, right_b: int):
def cycle_call(func):
# print(f'LALA')
def wrapper_(*args, **kwargs):
# creating a csv-file:
generate_csv(string_q, left_b, right_b)
roots = dict()
with open('info.csv', 'r', encoding='utf-8') as f:
reader = csv.reader(f, dialect='excel')
for i, row in enumerate(reader):
if row:
a, b, c = row
a, b, c, = int(a), int(b), int(c)
roots[i // 2] = str(func(a, b, c))
return roots
return wrapper_
return cycle_call
def jsonize(func):
def wrapper_(*args, **kwargs):
# getting info:
roots = func(args, kwargs)
with open('info.json', 'w', encoding='utf-8') as f:
json.dump(roots, f, indent='\n')
return wrapper_
@jsonize
@cycle_call_parametrized(100, 100, 1000)
def solve_quadratic_equation(a: int, b: int, c: int):
"""solves a * x^2 + b * x + c = 0 equation..."""
sqrt_d = (b ** 2 - 4 * a * c) ** .5
x1, x2 = (-b + sqrt_d) / (2 * a), (-b - sqrt_d) / (2 * a)
return x1, x2 if x1 != x2 else x1
def generate_csv(string_q: int, left_b: int, right_b: int): # 100 -->> 1000 strings...
with open('info.csv', 'w', encoding='utf-8') as f:
writer = csv.writer(f, dialect='excel', quotechar='|', quoting=csv.QUOTE_MINIMAL)
for ind in range(string_q + 1):
k = [random.randint(left_b, right_b + 1) for _ in [0, 1, 2]]
# print(f'k: {k}')
writer.writerow(k)
# generate_csv(100, 100, 1000)
solve_quadratic_equation()
solve_quadratic_equation()
| LocusLontrime/Python | Dive_into_python/HomeWork9/Decorators.py | Decorators.py | py | 1,807 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "csv.reader",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dump",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "csv.QUOTE_MINIMAL",
"line_number"... |
39426129134 | ''' Strategy to be backtested. '''
import backtrader as bt
# Create a Stratey
class TestStrategy(bt.Strategy):
''' Base class to be subclassed for user defined strategies. '''
# Moving average parameters
params = (('pfast',2),('pslow',184),)
def __init__(self):
self.dataclose = self.datas[0].close
self.datahigh = self.datas[0].high
self.datalow = self.datas[0].low
# Order variable will contain ongoing order details/status
self.order = None
# Instantiate moving averages
self.slow_sma = bt.indicators.MovingAverageSimple(self.datas[0],
period=self.params.pslow)
self.fast_sma = bt.indicators.MovingAverageSimple(self.datas[0],
period=self.params.pfast)
self.bar_executed = 0
def log(self, txt, dt=None):
''' Logging function for this strategy. '''
dt = dt or self.datas[0].datetime.date(0)
print(f'{dt.isoformat()}, {txt}')
def next(self):
'''
This method will be called for all remaining data points when
the minimum period for all datas/indicators have been meet.
'''
# Check for open orders
if self.order:
return
# Check if we are in the market
if not self.position:
# We are not in the market, look for a signal to OPEN trades
if self.fast_sma[0] > self.slow_sma[0]:
self.log(f'BUY CREATED: {self.dataclose[0]:2f}')
# Keep track of the created order to avoid a 2nd order
self.order = self.buy()
elif self.fast_sma[0] < self.slow_sma[0]:
self.log(f'SELL CREATED: {self.dataclose[0]}')
# Keep track of the created order to avoid a 2nd order
self.order = self.sell()
def notify_order(self, order):
''' Receives an order whenever there has been a change in one. '''
if order.status in [order.Submitted, order.Accepted]:
# An active Buy/Sell order has been submitted/accepted - Nothing to do
return
# Check if an order has been completed
# Attention: broker could reject order if not enough cash
if order.status in [order.Completed]:
if order.isbuy():
self.log(f'BUY EXECUTED: {order.executed.price}')
elif order.issell():
self.log(f'SELL EXECUTED: {order.executed.price}')
self.bar_executed = len(self)
elif order.status in [order.Canceled, order.Margin, order.Rejected]:
self.log('Order Canceled/Margin/Rejected')
# Reset orders
self.order = None
| Kyle-sn/PaperStreet | python/backtest/strategy.py | strategy.py | py | 2,714 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "backtrader.Strategy",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "backtrader.indicators.MovingAverageSimple",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "backtrader.indicators",
"line_number": 22,
"usage_type": "attribute"
... |
37612660256 | from django.shortcuts import render
from .models import *
import cv2
import numpy as np
from pytesseract import *
pytesseract.tesseract_cmd="C:/Program Files/Tesseract-OCR/tesseract.exe"
def main(request):
return render(request,'main.html')
def maintest(request):
return render(request,'maintest.html')
def kakaomap(request):
hospital = Hospital.objects.all()
return render(request,'kakaomap.html',{'hospital':hospital })
def camera(request):
return render(request,'camera.html')
def history(request):
img = image.objects.all()
return render(request,'history.html',{'img':img})
def result(request):
prescription = image.objects.create(
sample=request.FILES.get('camera'),
)
pic = prescription.sample
pic = "./media/"+ str(pic)
img = cv2.imread("test4.jpg")
orig = img.copy() #원본 이미지 복사
rect_img = img[355:660, 60:317]
#r = 800.0 / img.shape[0]
#dim = (int(img.shape[1] * r), 800)
#img = cv2.resize(img, dim, interpolation=cv2.INTER_AREA)
#print("STEP 1: Edge Detection")
#cv2.namedWindow('img', cv2.WINDOW_NORMAL)
#cv2.namedWindow('edged', cv2.WINDOW_NORMAL)
#print(str(pytesseract.image_to_string(img)))
custom_config = 'outputbase nobatch digits'
number = pytesseract.image_to_string(rect_img,config=custom_config)
dist = ""
db = []
for num in number:
dist += num
if(num == "\n"):
try:
db.append(Medicine.objects.get(m_Code=int(dist)))
except:
continue
count = len(db)
return render(request,'result.html',{'db':db, 'count':count})
| YounngR/Graduation-work | DB/views.py | views.py | py | 1,680 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pytesseract.tesseract_cmd",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.shortcuts.render",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api... |
37585700958 | import tkinter as tk
from tkinter import *
from tkinter import ttk
from tkinter.messagebox import showinfo
import tkinter.font as tkFont
import sqlite3, time, datetime, random
name_of_db = 'inventory_master.db'
my_conn = sqlite3.connect(name_of_db)
cdb = my_conn.cursor()
def create_table():
cdb.execute(
'CREATE TABLE IF NOT EXISTS customer_master('
'idno INTEGER PRIMARY KEY,'
'datestamp TEXT, '
'customer_name TEXT, '
'address TEXT, '
'town TEXT, '
'post_code TEXT, '
'contact TEXT)')
def show_ID():
frmList = tk.Tk()
frmList.title("List of customer")
width = 665
height = 500
screenwidth = frmList.winfo_screenwidth()
screenheight = frmList.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
frmList.geometry(alignstr)
frmList.resizable(width=False, height=False)
customerID = txtID.get()
txtName.focus_set()
txtName.insert(INSERT,"Hello")
data_set = my_conn.execute("SELECT * FROM customer_master WHERE idno=?", (customerID,))
# btnFullName.grid(columnspan=2, padx=15, pady=15)
output_data(data_set, frmList)
clear_form()
def show_Name():
frmList = tk.Tk()
frmList.title("List of customer")
width = 665
height = 500
screenwidth = frmList.winfo_screenwidth()
screenheight = frmList.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
frmList.geometry(alignstr)
frmList.resizable(width=False, height=False)
customerName = txtName.get()
data_set = my_conn.execute("SELECT * FROM customer_master WHERE customer_name like?", (customerName,))
# btnFullName.grid(columnspan=2, padx=15, pady=15)
output_data(data_set, frmList)
clear_form()
def show_Contact():
frmList = tk.Tk()
frmList.title("List of customer")
width = 665
height = 500
screenwidth = frmList.winfo_screenwidth()
screenheight = frmList.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
frmList.geometry(alignstr)
frmList.resizable(width=False, height=False)
contact = txtContact.get()
data_set = my_conn.execute("SELECT * FROM customer_master WHERE contact like?", (contact,))
# btnFullName.grid(columnspan=2, padx=15, pady=15);2
output_data(data_set, frmList)
clear_form()
def update_record():
with my_conn:
customer_id = txtID.get()
customer_name = txtName.get()
address = txtAddress.get()
town = txtTown.get()
post_code = txtPostCode.get()
contact = txtContact.get()
cdb.execute("UPDATE customer_master SET customer_name=?, address=?, town=?, post_code=?, contact=? WHERE idno=?",
(customer_name, address, town, post_code, contact, customer_id))
my_conn.commit()
msg = f'Record Successfully Saved!'
showinfo(title='Information', message=msg)
clear_form()
def delete_record():
with my_conn:
customer_id = txtID.get()
cdb.execute("DELETE FROM customer_master WHERE idno=?", (customer_id,))
my_conn.commit()
clear_form()
def output_data(data_set, frmList):
i = 0 # row value inside the loop
for person in data_set:
for j in range(len(person)):
e = Entry(frmList, width=15, fg='black')
e.grid(row=i, column=j)
e.insert(END, person[j])
i = i + 1
return frmList
def clear_form():
txtID.delete(0, END)
txtName.delete(0, END)
txtAddress.delete(0, END)
txtTown.delete(0, END)
txtContact.delete(0, END)
txtPostCode.delete(0, END)
def btnClose_Command():
clear_form()
exit()
create_table()
frmCustomerUpdate = tk.Tk()
frmCustomerUpdate.title("Customer Update")
width = 513
height = 364
screenwidth = frmCustomerUpdate.winfo_screenwidth()
screenheight = frmCustomerUpdate.winfo_screenheight()
alignstr = '%dx%d+%d+%d' % (width, height, (screenwidth - width) / 2, (screenheight - height) / 2)
frmCustomerUpdate.geometry(alignstr)
frmCustomerUpdate.resizable(width=False, height=False)
txtID = tk.Entry(frmCustomerUpdate)
txtID["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtID["font"] = ft
txtID["fg"] = "#333333"
txtID["justify"] = "center"
txtID["text"] = "Customer ID"
txtID.place(x=100, y=60, width=251, height=30)
txtName = tk.Entry(frmCustomerUpdate)
txtName["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtName["font"] = ft
txtName["fg"] = "#333333"
txtName["justify"] = "left"
txtName["text"] = "Customer Name"
txtName.place(x=100, y=110, width=251, height=30)
txtAddress = tk.Entry(frmCustomerUpdate)
txtAddress["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtAddress["font"] = ft
txtAddress["fg"] = "#333333"
txtAddress["justify"] = "left"
txtAddress["text"] = "Address"
txtAddress.place(x=100, y=160, width=250, height=30)
txtTown = tk.Entry(frmCustomerUpdate)
txtTown["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtTown["font"] = ft
txtTown["fg"] = "#333333"
txtTown["justify"] = "left"
txtTown["text"] = "Town"
txtTown.place(x=100, y=210, width=248, height=30)
txtPostCode = tk.Entry(frmCustomerUpdate)
txtPostCode["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtPostCode["font"] = ft
txtPostCode["fg"] = "#333333"
txtPostCode["justify"] = "left"
txtPostCode["text"] = "Post Code"
txtPostCode.place(x=100, y=260, width=248, height=30)
txtContact = tk.Entry(frmCustomerUpdate)
txtContact["borderwidth"] = "1px"
ft = tkFont.Font(family='Times', size=10)
txtContact["font"] = ft
txtContact["fg"] = "#333333"
txtContact["justify"] = "left"
txtContact["text"] = "Contact"
txtContact.place(x=100, y=310, width=247, height=30)
lblID = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblID["font"] = ft
lblID["fg"] = "#333333"
lblID["justify"] = "left"
lblID["text"] = "Customer ID"
lblID.place(x=10, y=60, width=89, height=30)
lblName = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblName["font"] = ft
lblName["fg"] = "#333333"
lblName["justify"] = "left"
lblName["text"] = "Customer Name"
lblName.place(x=10, y=110, width=91, height=30)
lblAddress = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblAddress["font"] = ft
lblAddress["fg"] = "#333333"
lblAddress["justify"] = "left"
lblAddress["text"] = "Address"
lblAddress.place(x=10, y=160, width=91, height=30)
lblTown = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblTown["font"] = ft
lblTown["fg"] = "#333333"
lblTown["justify"] = "left"
lblTown["text"] = "Town"
lblTown.place(x=10, y=210, width=92, height=30)
lblPostCode = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblPostCode["font"] = ft
lblPostCode["fg"] = "#333333"
lblPostCode["justify"] = "left"
lblPostCode["text"] = "Post Code"
lblPostCode.place(x=10, y=260, width=91, height=30)
lblContact = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=10)
lblContact["font"] = ft
lblContact["fg"] = "#333333"
lblContact["justify"] = "left"
lblContact["text"] = "Mobile No."
lblContact.place(x=10, y=310, width=91, height=30)
lblTitle = tk.Label(frmCustomerUpdate)
ft = tkFont.Font(family='Times', size=22)
lblTitle["font"] = ft
lblTitle["fg"] = "#333333"
lblTitle["justify"] = "center"
lblTitle["text"] = "CUSTOMER UPDATE"
lblTitle.place(x=10, y=10, width=488, height=37)
btncustomerID = tk.Button(frmCustomerUpdate)
btncustomerID["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btncustomerID["font"] = ft
btncustomerID["fg"] = "#000000"
btncustomerID["justify"] = "center"
btncustomerID["text"] = "Search Customer ID"
btncustomerID.place(x=370, y=60, width=130, height=30)
btncustomerID["command"] = show_ID
btncustomerName = tk.Button(frmCustomerUpdate)
btncustomerName["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btncustomerName["font"] = ft
btncustomerName["fg"] = "#000000"
btncustomerName["justify"] = "center"
btncustomerName["text"] = "Search Customer Name"
btncustomerName.place(x=370, y=110, width=130, height=30)
btncustomerName["command"] = show_Name
btnMobile = tk.Button(frmCustomerUpdate)
btnMobile["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btnMobile["font"] = ft
btnMobile["fg"] = "#000000"
btnMobile["justify"] = "center"
btnMobile["text"] = "Search Mobile No."
btnMobile.place(x=370, y=160, width=129, height=30)
btnMobile["command"] = show_Contact
btnUpdate = tk.Button(frmCustomerUpdate)
btnUpdate["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btnUpdate["font"] = ft
btnUpdate["fg"] = "#000000"
btnUpdate["justify"] = "center"
btnUpdate["text"] = "Update"
btnUpdate.place(x=370, y=210, width=128, height=30)
btnUpdate["command"] = update_record
btnDelete = tk.Button(frmCustomerUpdate)
btnDelete["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btnDelete["font"] = ft
btnDelete["fg"] = "#000000"
btnDelete["justify"] = "center"
btnDelete["text"] = "Delete"
btnDelete.place(x=370, y=260, width=126, height=30)
btnDelete["command"] = delete_record
btnClose = tk.Button(frmCustomerUpdate)
btnClose["bg"] = "#efefef"
ft = tkFont.Font(family='Times', size=10)
btnClose["font"] = ft
btnClose["fg"] = "#000000"
btnClose["justify"] = "center"
btnClose["text"] = "Close"
btnClose.place(x=370, y=310, width=126, height=30)
btnClose["command"] = btnClose_Command
frmCustomerUpdate.mainloop() # run form by default
| InfoSoftBD/Python | CustomerUpdate.py | CustomerUpdate.py | py | 9,946 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "sqlite3.connect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "tkinter.Tk",
"line_number": ... |
24829002801 | import pygame
pygame.init()
pygame.display.set_caption("WannabePong")
size = 800, 600
screen = pygame.display.set_mode(size)
width, height = size
speed = [1, 1]
bgc = 255, 255, 255
fontControls = pygame.font.SysFont("monospace", 16)
font = pygame.font.SysFont("monospace", 26)
fontCount = pygame.font.SysFont("monospace", 42)
pelota = pygame.image.load("pelota.png")
pelotaRect = pelota.get_rect()
palaRoja = pygame.image.load("palaRoja.png")
palaRojaRect = palaRoja.get_rect()
palaAzul = pygame.image.load("palaAzul.png")
palaAzulRect = palaAzul.get_rect()
divisor = pygame.image.load("divisor.png")
divisorRect = divisor.get_rect()
strikesRojo = 0
strikesAzul = 0
countdown = 10
run = True
divisorRect.move_ip(400, 0)
palaRojaRect.move_ip(1, 300)
palaAzulRect.move_ip(773, 300)
while countdown > 0:
count = fontCount.render("{0}".format(countdown), 1, (0,0,0))
redControls = fontControls.render("Moves with W and S keys", 1, (0,0,0))
blueControls = fontControls.render("Moves with UP and DOWN arrows", 1, (0,0,0))
screen.fill(bgc)
screen.blit(redControls, (5, 50))
screen.blit(blueControls, (505, 50))
screen.blit(count, (388, 250))
pygame.display.flip()
pygame.time.wait(1000)
countdown -= 1
while run:
pygame.time.delay(2)
pelotaRect = pelotaRect.move(speed)
keys = pygame.key.get_pressed()
strikesRojoDisplay = font.render("Strikes: {0}".format(strikesRojo), 1, (0,0,0))
strikesAzulDisplay = font.render("Strikes: {0}".format(strikesAzul), 1, (0,0,0))
winnerRojo = font.render("RED WINS!", 1, (0,0,0))
winnerAzul = font.render("BLUE WINS!", 1, (0,0,0))
for event in pygame.event.get():
if event.type == pygame.QUIT:
run = False
if keys[pygame.K_w] and palaRojaRect.top <= 0:
palaRojaRect = palaRojaRect.move(0, 0)
elif keys[pygame.K_w]:
palaRojaRect = palaRojaRect.move(0, -1)
if keys[pygame.K_s] and palaRojaRect.bottom >= height:
palaRojaRect = palaRojaRect.move(0, 0)
elif keys[pygame.K_s]:
palaRojaRect = palaRojaRect.move(0, 1)
if keys[pygame.K_UP] and palaAzulRect.top <= 0:
palaAzulRect = palaAzulRect.move(0, 0)
elif keys[pygame.K_UP]:
palaAzulRect = palaAzulRect.move(0, -1)
if keys[pygame.K_DOWN] and palaAzulRect.bottom >= height:
palaAzulRect = palaAzulRect.move(0, 0)
elif keys[pygame.K_DOWN]:
palaAzulRect = palaAzulRect.move(0, 1)
if palaRojaRect.colliderect(pelotaRect):
speed[0] = -speed[0]
if palaAzulRect.colliderect(pelotaRect):
speed[0] = -speed[0]
if pelotaRect.left <= 0 or pelotaRect.right >= width:
speed[0] = -speed[0]
if pelotaRect.left <= 0:
strikesRojo += 1
elif pelotaRect.right >= width:
strikesAzul += 1
if pelotaRect.top <= 0 or pelotaRect.bottom >= height:
speed[1] = -speed[1]
if strikesRojo == 3 or strikesAzul == 3:
run = False
screen.fill(bgc)
screen.blit(divisor, divisorRect)
screen.blit(pelota, pelotaRect)
screen.blit(palaRoja, palaRojaRect)
screen.blit(palaAzul, palaAzulRect)
screen.blit(strikesRojoDisplay, (5, 10))
screen.blit(strikesAzulDisplay, (633, 10))
pygame.display.flip()
screen.fill(bgc)
if strikesRojo == 3:
screen.blit(winnerAzul, (333, 250))
pygame.display.flip()
elif strikesAzul == 3:
screen.blit(winnerRojo, (333, 250))
pygame.display.flip()
pygame.time.wait(5000)
pygame.QUIT() | vsanjorge/localMultiplayerPong | main.py | main.py | py | 3,327 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_caption",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "pygame.displa... |
27923745620 | from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense
from keras.utils import np_utils
from tensorflow.keras.models import Sequential
import matplotlib.pyplot as plt
from scipy.io import loadmat
import numpy as np
def display(i):
img = X[i]
plt.title('Example'+ str(i)+ 'Label:'+str(Y[i])+ 'Predicted:'+str(ypred[i]))
plt.imshow(img.reshape((28,28)),cmap=plt.cm.gray_r)
plt.show()
def plot_accuracy(history):
plt.plot(history.history['accuracy'])
plt.plot(history.history['val_accuracy'])
plt.title('Model accuracy')
plt.ylabel('Accuracy')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
def plot_loss(history):
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.title('Model loss')
plt.ylabel('Loss')
plt.xlabel('Epoch')
plt.legend(['Train', 'Test'], loc='upper left')
plt.show()
mnist = loadmat('mnist-original')
X , Y = mnist['data'] , mnist['label']
X= X.T
Y = Y.T
X_train , X_test , Y_train , Y_test = train_test_split(X,Y,test_size=0.1,shuffle = True)
X_train , X_val , Y_train , Y_val = train_test_split(X_train,Y_train,test_size=0.2,shuffle = True)
X_train = X_train/255
X_test = X_test/255
X_val = X_val/255
Ytrain = np_utils.to_categorical(Y_train)
Ytest = np_utils.to_categorical(Y_test)
Yval = np_utils.to_categorical(Y_val)
model = Sequential()
model.add(Dense(784,input_shape=(784,),activation='relu',kernel_initializer='normal'))
model.add(Dense(10, activation = 'softmax',kernel_initializer='normal'))
model.compile(loss='categorical_crossentropy' , optimizer = 'adam' , metrics = ['accuracy'])
history = model.fit(X_train ,Ytrain,batch_size = 512 ,epochs=30,verbose=2, validation_data=(X_val,Yval))
test_accuracy = model.evaluate(x=X_test,y=Ytest,batch_size=200,verbose=2)
print("Test results : ", test_accuracy)
Ypred = model.predict(X)
ypred = []
for i in Ypred:
ypred.append(np.argmax(i))
plot_accuracy(history)
plot_loss(history)
| ankitlohiya212/basic-ml-problems | Basic ML problems/Mnist.py | Mnist.py | py | 2,107 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.title",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "ma... |
26057428953 | import re
import requests
from bs4 import BeautifulSoup
URL = "https://sourcesup.renater.fr/scm/viewvc.php/rec/2019-CONVECS/REC/"
page = requests.get(URL)
soup = BeautifulSoup(page.content, "html.parser")
for link in soup.find_all('a', href=True):
print(link['href'])
if 'name' in link:
print(link['name'])
m = re.search(r"/(\w+)\.rec", link["href"])
if m is not None:
print(m.group(1))
name = m.group(1)
URL = f"https://sourcesup.renater.fr/scm/viewvc.php/rec/2019-CONVECS/REC/{name}.rec?revision=3&view=co"
page = requests.get(URL)
print(page.content)
f = open(f"rec/{name}.rec", "wb")
f.write(page.content)
f.close()
| philzook58/egglog-rec | scraper.py | scraper.py | py | 711 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_numbe... |
9224589864 | from prediction.M2I.predictor import M2IPredictor
import numpy as np
import math
import logging
import copy
import random
import time
import interactive_sim.envs.util as utils
import plan.helper as plan_helper
import agents.car as car
S0 = 2
T = 0.25 #1.5 # reaction time when following
DELTA = 4 # the power term in IDM
PLANNING_HORIZON = 5 # in frames
PREDICTION_HTZ = 10 # prediction_htz
T_HEADWAY = 0.2
A_SPEEDUP_DESIRE = 0.3 # A
A_SLOWDOWN_DESIRE = 1.5 # B
XPT_SHRESHOLD = 0.7
MINIMAL_DISTANCE_PER_STEP = 0.05
MINIMAL_DISTANCE_TO_TRAVEL = 4
# MINIMAL_DISTANCE_TO_RESCALE = -999 #0.1
REACTION_AFTER = 200 # in frames
MINIMAL_SCALE = 0.3
MAX_DEVIATION_FOR_PREDICTION = 4
TRAFFIC_LIGHT_COLLISION_SIZE = 2
MINIMAL_SPEED_TO_TRACK_ORG_GOAL = 5
MINIMAL_DISTANCE_TO_GOAL = 15
OFF_ROAD_DIST = 30
PRINT_TIMER = False
DRAW_CBC_PTS = False
def get_angle(x, y):
return math.atan2(y, x)
def euclidean_distance(pt1, pt2):
x_1, y_1 = pt1
x_2, y_2 = pt2
return math.sqrt((x_1-x_2)**2+(y_1-y_2)**2)
def get_angle_of_a_line(pt1, pt2):
# angle from horizon to the right, counter-clockwise,
x1, y1 = pt1
x2, y2 = pt2
angle = math.atan2(y2 - y1, x2 - x1)
return angle
def calculate_yaw_from_states(trajectory, default_yaw):
time_frames, _ = trajectory.shape
pred_yaw = np.zeros([time_frames])
for i in range(time_frames - 1):
pose_p = trajectory[i+1]
pose = trajectory[i]
delta_x = pose_p[0] - pose[0]
delta_y = pose_p[1] - pose[1]
dis = np.sqrt(delta_x*delta_x + delta_y*delta_y)
if dis > 1:
angel = get_angle(delta_x, delta_y)
pred_yaw[i] = angel
default_yaw = angel
else:
pred_yaw[i] = default_yaw
return pred_yaw
def change_axis(yaw):
return - yaw - math.pi/2
def get_current_pose_and_v(current_state, agent_id, current_frame_idx):
agent_dic = current_state['predicting']['original_trajectory']
my_current_pose = agent_dic[agent_id]['pose'][current_frame_idx - 1]
if agent_dic[agent_id]['pose'][current_frame_idx - 1, 0] == -1 or agent_dic[agent_id]['pose'][current_frame_idx - 6, 0] == -1:
my_current_v_per_step = 0
print("Past invalid for ", agent_id, " and setting v to 0")
else:
my_current_v_per_step = euclidean_distance(agent_dic[agent_id]['pose'][current_frame_idx - 1, :2],
agent_dic[agent_id]['pose'][current_frame_idx - 6, :2]) / 5
return my_current_pose, my_current_v_per_step
class EnvPlanner:
"""
EnvPlanner is capable of using as much information as it can to satisfy its loss like avoiding collisions.
EnvPlanner can assume it's controlling all agents around if it does not exacerbate the sim-2-real gap.
While the baseline planner or any planner controlling the ego vehicle can only use the prediction or past data
"""
def __init__(self, env_config, predictor, dataset='Waymo', map_api=None):
self.planning_from = env_config.env.planning_from
self.planning_interval = env_config.env.planning_interval
self.planning_horizon = env_config.env.planning_horizon
self.planning_to = env_config.env.planning_to
self.scenario_frame_number = 0
self.online_predictor = predictor
self.method_testing = env_config.env.testing_method # 0=densetnt with dropout, 1=0+post-processing, 2=1+relation
self.test_task = env_config.env.test_task
self.all_relevant = env_config.env.all_relevant
self.follow_loaded_relation = env_config.env.follow_loaded_relation
self.follow_prediction_traj = env_config.env.follow_prediction
self.target_lanes = [0, 0] # lane_index, point_index
self.routed_traj = {}
self.follow_gt_first = env_config.env.follow_gt_first
self.predict_env_for_ego_collisions = env_config.env.predict_env_for_ego_collisions
self.predict_relations_for_ego = env_config.env.predict_relations_for_ego
self.predict_with_rules = env_config.env.predict_with_rules
self.frame_rate = env_config.env.frame_rate
self.current_on_road = True
self.dataset = dataset
self.online_predictor.dataset = dataset
self.valid_lane_types = [1, 2] if self.dataset == 'Waymo' else [0, 11]
self.vehicle_types = [1] if self.dataset == 'Waymo' else [0, 7] # Waymo: Unset=0, Vehicle=1, Pedestrian=2, Cyclist=3, Other=4
self.map_api = map_api # NuPlan only
self.past_lanes = {}
def reset(self, *args, **kwargs):
time1 = time.perf_counter()
self.online_predictor(new_data=kwargs['new_data'], model_path=kwargs['model_path'],
time_horizon=kwargs['time_horizon'], predict_device=kwargs['predict_device'],
use_prediction=(self.follow_prediction_traj or self.predict_env_for_ego_collisions) and kwargs['ego_planner'],
predictor_list=kwargs['predictor_list'])
time2 = time.perf_counter()
self.online_predictor.setting_goal_points(current_data=kwargs['new_data'])
self.current_on_road = True
print(f"predictor reset with {time2-time1:04f}s")
# self.data = self.online_predictor.data
def is_planning(self, current_frame_idx):
self.scenario_frame_number = current_frame_idx
frame_diff = self.scenario_frame_number - self.planning_from
if frame_diff >= 0 and frame_diff % self.planning_interval == 0:
return True
return False
def is_first_planning(self, current_frame_idx):
self.scenario_frame_number = current_frame_idx
frame_diff = self.scenario_frame_number - self.planning_from
if frame_diff >= 0 and frame_diff == 0: # frame_diff % self.planning_interval == 0:
return True
return False
def collision_based_relevant_detection(self, current_frame_idx, current_state, predict_ego=True):
ego_agent = current_state['predicting']['ego_id'][1]
# print("before: ", current_state['predicting']['relevant_agents'], bool(current_state['predicting']['relevant_agents']))
if not current_state['predicting']['relevant_agents']:
relevant_agents = [ego_agent]
undetected_piles = [ego_agent]
else:
relevant_agents = current_state['predicting']['relevant_agents'].copy()
if ego_agent not in relevant_agents:
relevant_agents += [ego_agent]
undetected_piles = relevant_agents.copy()
colliding_pairs = []
while len(undetected_piles) > 0:
if self.all_relevant:
# hard force all agents as relevant
current_agent = undetected_piles.pop()
for each_agent_id in current_state['agent']:
if each_agent_id != current_agent:
relevant_agents.append(each_agent_id)
break
current_agent = undetected_piles.pop()
ego_poses = current_state['agent'][current_agent]['pose']
ego_shape = current_state['agent'][current_agent]['shape'][0]
detected_pairs = []
ego_agent_0 = None
for idx, each_pose in enumerate(ego_poses):
if idx <= current_frame_idx:
continue
ego_agent_packed =Agent(x=each_pose[0],
y=each_pose[1],
yaw=each_pose[3],
length=max(1, ego_shape[1]),
width=max(1, ego_shape[0]),
agent_id=current_agent)
if ego_agent_0 is None:
ego_agent_0 = ego_agent_packed
for each_agent_id in current_state['agent']:
if [current_agent, each_agent_id] in detected_pairs:
continue
if each_agent_id == current_agent or each_agent_id in relevant_agents:
continue
each_agent_frame_num = current_state['agent'][each_agent_id]['pose'].shape[0]
if idx >= each_agent_frame_num:
continue
target_agent_packed =Agent(x=current_state['agent'][each_agent_id]['pose'][idx, 0],
y=current_state['agent'][each_agent_id]['pose'][idx, 1],
yaw=current_state['agent'][each_agent_id]['pose'][idx, 3],
length=current_state['agent'][each_agent_id]['shape'][0][1],
width=current_state['agent'][each_agent_id]['shape'][0][0],
agent_id=each_agent_id)
if each_pose[0] == -1 or each_pose[1] == -1 or current_state['agent'][each_agent_id]['pose'][idx, 0] == -1 or current_state['agent'][each_agent_id]['pose'][idx, 1] == -1:
continue
collision = utils.check_collision(ego_agent_packed, target_agent_packed)
if collision:
detected_pairs.append([current_agent, each_agent_id])
yield_ego = True
# FORWARD COLLISION CHECKINGS
collision_0 = utils.check_collision(ego_agent_0, target_agent_packed)
if collision_0:
detected_relation = [[ego_agent_0, target_agent_packed]]
else:
# check relation
# print(f"In: {current_agent} {each_agent_id} {undetected_piles} {current_state['predicting']['relation']}")
self.online_predictor.relation_pred_onetime(each_pair=[current_agent, each_agent_id],
current_frame=current_frame_idx,
clear_history=True,
current_data=current_state)
# print(f"Out: {current_agent} {each_agent_id} {undetected_piles} {current_state['predicting']['relation']}")
detected_relation = current_state['predicting']['relation']
if [each_agent_id, current_agent] in detected_relation:
if [current_agent, each_agent_id] in detected_relation:
# bi-directional relations, still yield
pass
else:
yield_ego = False
if yield_ego or self.method_testing < 2:
relevant_agents.append(each_agent_id)
undetected_piles.append(each_agent_id)
if [current_agent, each_agent_id] not in colliding_pairs and [each_agent_id, current_agent] not in colliding_pairs:
colliding_pairs.append([current_agent, each_agent_id])
# print(f"Detected for {current_agent} with {undetected_piles}")
if self.test_task != 1:
# don't predict ego
relevant_agents.remove(ego_agent)
current_state['predicting']['relevant_agents'] = relevant_agents
current_state['predicting']['colliding_pairs'] = colliding_pairs
# print(f"Collision based relevant agent detected finished: \n{relevant_agents} \n{colliding_pairs}")
def clear_markers_per_step(self, current_state, current_frame_idx):
if self.is_planning(current_frame_idx):
current_state['predicting']['relation'] = []
current_state['predicting']['points_to_mark'] = []
current_state['predicting']['trajectory_to_mark'] = []
def get_prediction_trajectories(self, current_frame_idx, current_state=None, time_horizon=80):
if self.is_planning(current_frame_idx):
frame_diff = self.scenario_frame_number - self.planning_from
self.collision_based_relevant_detection(current_frame_idx, current_state)
current_state['predicting']['relation'] = []
for each_pair in current_state['predicting']['colliding_pairs']:
self.online_predictor.relation_pred_onetime(each_pair=each_pair, current_data=current_state,
current_frame=current_frame_idx)
if self.follow_prediction_traj and len(current_state['predicting']['relevant_agents']) > 0:
if self.method_testing < 0:
self.online_predictor.variety_predict(frame_diff)
else:
self.online_predictor.marginal_predict(frame_diff)
self.online_predictor.last_predict_frame = frame_diff + 5
return True
else:
return False
# def update_env_trajectory_speed_only(self, current_frame_idx, relevant_only=True, current_state=None):
def update_env_trajectory_for_sudo_base_planner(self, current_frame_idx, current_state=None):
"""
the sudo base planner for the ego vehicle
"""
if self.test_task in [1, 2]:
# predict ego
return current_state
# self.scenario_frame_number = current_frame_idx
ego_id = current_state['predicting']['ego_id'][1]
# for each_agent in current_state['agent']:
# if each_agent in [748, 781, 735]:
# current_state['predicting']['trajectory_to_mark'].append(
# current_state['predicting']['original_trajectory'][each_agent]['pose'][:, :])
# frame_diff = self.scenario_frame_number - self.planning_from
# if frame_diff >= 0 and frame_diff == 0: # frame_diff % self.planning_interval == 0:
if self.is_first_planning(current_frame_idx):
# print("updating ego trajectory: ", self.planning_interval, self.scenario_frame_number)
# current_state['predicting']['trajectory_to_mark'].append(
# current_state['predicting']['original_trajectory'][ego_id]['pose'][current_frame_idx:, :])
my_current_pose = current_state['agent'][ego_id]['pose'][current_frame_idx - 1]
my_current_v_per_step = euclidean_distance(
current_state['agent'][ego_id]['pose'][current_frame_idx - 1, :2],
current_state['agent'][ego_id]['pose'][current_frame_idx - 2, :2])
org_pose = current_state['predicting']['original_trajectory'][ego_id]['pose'].copy()
projected_pose_on_original = my_current_pose
closest_distance = 999999
closest_index = 0
for idx, each_pose in enumerate(org_pose):
dist = euclidean_distance(each_pose[:2], my_current_pose[:2])
if dist < closest_distance:
closest_distance = dist
projected_pose_on_original = each_pose
closest_index = idx
my_interpolator = SudoInterpolator(org_pose[closest_index:, :2], projected_pose_on_original)
# my_current_pose = projected_pose_on_original
total_frames = current_state['agent'][ego_id]['pose'].shape[0]
total_distance_traveled = 0
for i in range(total_frames - current_frame_idx):
my_current_v_per_step -= A_SLOWDOWN_DESIRE/self.frame_rate/self.frame_rate
step_speed = euclidean_distance(
current_state['agent'][ego_id]['pose'][current_frame_idx+i - 1, :2],
current_state['agent'][ego_id]['pose'][current_frame_idx+i - 2, :2])
my_current_v_per_step = max(0, min(my_current_v_per_step, step_speed))
current_state['agent'][ego_id]['pose'][current_frame_idx+i, :] = my_interpolator.interpolate(total_distance_traveled + my_current_v_per_step)
total_distance_traveled += my_current_v_per_step
if self.is_planning(self.scenario_frame_number):
# current_state['predicting']['trajectory_to_mark'].append(
# current_state['predicting']['original_trajectory'][ego_id]['pose'][current_frame_idx:, :])
current_state['predicting']['trajectory_to_mark'].append(current_state['agent'][ego_id]['pose'][current_frame_idx:, :])
return current_state
def find_closes_lane(self, current_state, agent_id, my_current_v_per_step, my_current_pose, no_unparallel=False,
return_list=False, current_route=[]):
# find a closest lane to trace
closest_dist = 999999
closest_dist_no_yaw = 999999
closest_dist_threshold = 5
closest_lane = None
closest_lane_no_yaw = None
closest_lane_pt_no_yaw_idx = None
closest_lane_pt_idx = None
current_lane = None
current_closest_pt_idx = None
dist_to_lane = None
distance_threshold = None
closest_lanes_same_dir = []
closest_lanes_idx_same_dir = []
for each_lane in current_state['road']:
if len(current_route) > 0 and each_lane not in current_route:
continue
if isinstance(current_state['road'][each_lane]['type'], int):
if current_state['road'][each_lane]['type'] not in self.valid_lane_types:
continue
else:
if current_state['road'][each_lane]['type'][0] not in self.valid_lane_types:
continue
road_xy = current_state['road'][each_lane]['xyz'][:, :2]
if road_xy.shape[0] < 3:
continue
current_lane_closest_dist = 999999
current_lane_closest_idx = None
for j, each_xy in enumerate(road_xy):
road_yaw = current_state['road'][each_lane]['dir'][j]
dist = euclidean_distance(each_xy, my_current_pose[:2])
yaw_diff = abs(utils.normalize_angle(my_current_pose[3] - road_yaw))
if dist < closest_dist_no_yaw:
closest_lane_no_yaw = each_lane
closest_dist_no_yaw = dist
closest_lane_pt_no_yaw_idx = j
if yaw_diff < math.pi / 180 * 20 and dist < closest_dist_threshold:
if dist < closest_dist:
closest_lane = each_lane
closest_dist = dist
closest_lane_pt_idx = j
if dist < current_lane_closest_dist:
current_lane_closest_dist = dist
current_lane_closest_idx = j
# classify current agent as a lane changer or not:
if my_current_v_per_step > 0.1 and 0.5 < current_lane_closest_dist < 3.2 and each_lane not in closest_lanes_same_dir and current_state['road'][each_lane]['turning'] == 0:
closest_lanes_same_dir.append(each_lane)
closest_lanes_idx_same_dir.append(current_lane_closest_idx)
if closest_lane is not None and not 0.5 < closest_dist < 3.2:
closest_lanes_same_dir = []
closest_lanes_idx_same_dir = []
if closest_lane is not None:
current_lane = closest_lane
current_closest_pt_idx = closest_lane_pt_idx
dist_to_lane = closest_dist
distance_threshold = max(7, max(7 * my_current_v_per_step, dist_to_lane))
elif closest_lane_no_yaw is not None and not no_unparallel:
current_lane = closest_lane_no_yaw
current_closest_pt_idx = closest_lane_pt_no_yaw_idx
dist_to_lane = closest_dist_no_yaw
distance_threshold = max(10, dist_to_lane)
else:
logging.warning(f'No current lane founded: {agent_id}')
# return
if return_list:
if len(closest_lanes_same_dir) > 0:
return closest_lanes_same_dir, closest_lanes_idx_same_dir, dist_to_lane, distance_threshold
else:
return [current_lane], [current_closest_pt_idx], dist_to_lane, distance_threshold
else:
return current_lane, current_closest_pt_idx, dist_to_lane, distance_threshold
def set_route(self, goal_pt, road_dic, current_pose=None, previous_routes=None, max_number_of_routes=50, route_roadblock_check=None, agent_id=None):
from nuplan.common.actor_state.state_representation import Point2D
from nuplan.common.maps.maps_datatypes import SemanticMapLayer
closest_lane_id, dist_to_lane = self.map_api.get_distance_to_nearest_map_object(point=Point2D(current_pose[0], current_pose[1]),
layer=SemanticMapLayer.LANE)
target_lane_id, dist_to_lane = self.map_api.get_distance_to_nearest_map_object(point=Point2D(goal_pt[0], goal_pt[1]),
layer=SemanticMapLayer.LANE)
if route_roadblock_check is not None and agent_id == 'ego':
route_lanes = []
for each_roadbloack in route_roadblock_check:
if each_roadbloack not in road_dic:
continue
route_lanes += road_dic[each_roadbloack]['lower_level']
if closest_lane_id not in route_lanes:
closest_lane_id, dist_to_lane = self.map_api.get_distance_to_nearest_map_object(
point=Point2D(current_pose[0], current_pose[1]),
layer=SemanticMapLayer.LANE_CONNECTOR)
if closest_lane_id not in route_lanes:
for each_lane in route_lanes:
if each_lane not in self.past_lanes:
print("[env planner] WARNING: closest lane/connector in original route not found with closest lanes for ego")
closest_lane_id = each_lane
dist_to_lane = 1
break
if not isinstance(dist_to_lane, int) or dist_to_lane > 30:
target_lane_id, dist_to_lane = self.map_api.get_distance_to_nearest_map_object(
point=Point2D(goal_pt[0], goal_pt[1]),
layer=SemanticMapLayer.LANE_CONNECTOR)
closest_lane_id = int(closest_lane_id)
target_lane_id = int(target_lane_id)
available_routes = []
checking_pile = [[closest_lane_id]]
lanes_visited = []
if previous_routes is not None:
for each_route in previous_routes:
if closest_lane_id in each_route:
closest_lane_idx = each_route.index(closest_lane_id)
available_routes.append(each_route[closest_lane_idx:])
while len(checking_pile) > 0 and len(available_routes) < max_number_of_routes:
# BFS
next_pile = []
for each_route in checking_pile:
latest_lane = each_route[-1]
if latest_lane not in road_dic:
continue
if latest_lane == target_lane_id:
available_routes.append(each_route+[target_lane_id])
next_pile = [[closest_lane_id]]
lanes_visited = []
else:
all_next_lanes = road_dic[latest_lane]['next_lanes']
uppder_roadblock = road_dic[latest_lane]['upper_level'][0]
ENVCHANGE_LANE = False
if uppder_roadblock in road_dic and ENVCHANGE_LANE:
parallel_lanes = road_dic[uppder_roadblock]['lower_level']
else:
parallel_lanes = []
all_next_lanes += parallel_lanes
# all_next_lanes += self.road_dic[latest_lane]['upper_level']
# if len(all_next_lanes) == 0 and len(each_route) == 1:
# # starting from a dead end, turn around
# all_next_lanes = road_dic[latest_lane]['previous_lanes']
for each_next_lane in all_next_lanes:
if each_next_lane in each_route:
# avoid circles
continue
if each_next_lane not in lanes_visited:
next_pile.append(each_route+[each_next_lane])
lanes_visited.append(each_next_lane)
else:
for each_available_route in available_routes:
if each_next_lane in each_available_route:
idx = each_available_route.index(each_next_lane)
if idx != 0:
route_to_add = each_route + [each_next_lane] + each_available_route[idx:]
if route_to_add not in available_routes:
available_routes.append(route_to_add)
break
checking_pile = next_pile
return available_routes
def get_reroute_traj(self, current_state, agent_id, current_frame_idx,
follow_org_route=False, dynamic_turnings=True, current_route=[], is_ego=False):
"""
return a marginal planned trajectory with a simple lane follower
for NuPlan, use route_roadbloacks. a list of road bloacks
for Waymo, use route, a list of lane_ids, and prior, a list of lane_ids detected from the original gt trajectories
"""
assert self.routed_traj is not None, self.routed_traj
# generate a trajectory based on the route
# 1. get the route for relevant agents
# find the closest lane to trace
my_current_pose, my_current_v_per_step = plan_helper.get_current_pose_and_v(current_state=current_state,
agent_id=agent_id,
current_frame_idx=current_frame_idx)
my_current_v_per_step = np.clip(my_current_v_per_step, a_min=0, a_max=7)
goal_pt, goal_yaw = self.online_predictor.goal_setter.get_goal(current_data=current_state,
agent_id=agent_id,
dataset=self.dataset)
if PRINT_TIMER:
last_tic = time.perf_counter()
if agent_id not in self.past_lanes:
self.past_lanes[agent_id] = []
if self.dataset == 'NuPlan' and is_ego:
goal_lane, _, _ = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=[goal_pt[0], goal_pt[1], -1, goal_yaw],
valid_lane_types=self.valid_lane_types,
)
# current_route is a list of multiple routes to choose
if len(current_route) == 0:
lanes_in_route = []
route_roadblocks = current_state['route'] if 'route' in current_state else None
for each_block in route_roadblocks:
if each_block not in current_state['road']:
continue
lanes_in_route += current_state['road'][each_block]['lower_level']
current_lanes, current_closest_pt_indices, dist_to_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=my_current_pose,
selected_lanes=lanes_in_route,
valid_lane_types=self.valid_lane_types,
excluded_lanes=self.past_lanes[agent_id]
)
else:
selected_lanes = []
for each_route in current_route:
selected_lanes += each_route
current_lanes, current_closest_pt_indices, dist_to_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=my_current_pose,
selected_lanes=selected_lanes,
valid_lane_types=self.valid_lane_types,
excluded_lanes=self.past_lanes[agent_id]
)
else:
if len(current_route) > 0:
current_route = current_route[0]
current_lanes, current_closest_pt_indices, dist_to_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=my_current_pose,
selected_lanes=current_route,
valid_lane_types=self.valid_lane_types,
excluded_lanes=self.past_lanes[agent_id]
)
if dist_to_lane is not None:
distance_threshold = max(self.frame_rate, max(self.frame_rate * my_current_v_per_step, dist_to_lane))
else:
dist_to_lane = 999
self.current_on_road = not (dist_to_lane > OFF_ROAD_DIST)
if self.dataset == 'NuPlan' and len(current_route) == 0 and is_ego:
pass
# route_roadblocks = current_state['route'] if 'route' in current_state else None
# current_routes = self.set_route(road_dic=current_state['road'],
# goal_pt=[goal_pt[0], goal_pt[1], 0, goal_yaw], current_pose=my_current_pose,
# previous_routes=[current_route], max_number_of_routes=1,
# route_roadblock_check=route_roadblocks,
# agent_id=agent_id)
# print(f"Got {len(current_routes)} for {agent_id} with {goal_pt} and {my_current_pose} given route {route_roadblocks}")
# current_route = current_routes[0] if len(current_routes) > 0 else []
else:
if current_lanes in current_route and not isinstance(current_lanes, list):
for each_past_lane in current_route[:current_route.index(current_lanes)]:
if each_past_lane not in self.past_lanes[agent_id]:
self.past_lanes[agent_id].append(each_past_lane)
if isinstance(current_lanes, list):
# deprecated
lane_found_in_route = False
for each_lane in current_lanes:
if each_lane in current_route:
current_lane = each_lane
lane_found_in_route = True
break
if not lane_found_in_route:
current_lane = random.choice(current_lanes)
idx = current_lanes.index(current_lane)
current_closest_pt_idx = current_closest_pt_indices[idx]
else:
current_lane = current_lanes
current_closest_pt_idx = current_closest_pt_indices
if PRINT_TIMER:
print(f"Time spent on first lane search: {time.perf_counter() - last_tic:04f}s")
last_tic = time.perf_counter()
if self.dataset == 'NuPlan' and is_ego:
# use route_roadblocks
prior_lanes = []
if current_lane is None:
print("WARNING: Ego Current Lane not found")
elif len(current_route) == 0:
# get route from the original trajectory, this route does not have to be neither accurate nor connected
prior_lanes = []
org_closest_pt_idx = []
for i in range(50):
if i + current_frame_idx > 90:
break
if i == 0:
continue
if i % 10 != 0:
continue
looping_pose, looping_v = get_current_pose_and_v(current_state=current_state,
agent_id=agent_id,
current_frame_idx=current_frame_idx + i)
# looping_lane, looping_closest_idx, _, _ = self.find_closes_lane(current_state=current_state,
# agent_id=agent_id,
# my_current_v_per_step=looping_v,
# my_current_pose=looping_pose,
# no_unparallel=follow_org_route,
# return_list=False)
looping_lane, looping_closest_idx, dist_to_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=looping_pose,
# include_unparallel=not follow_org_route
include_unparallel=False,
valid_lane_types=self.valid_lane_types,
excluded_lanes=self.past_lanes[agent_id]
)
if looping_lane is not None and looping_lane not in prior_lanes and dist_to_lane < 5:
prior_lanes.append(looping_lane)
org_closest_pt_idx.append(looping_closest_idx)
if PRINT_TIMER:
print(f"Time spent on loop lane search: {time.perf_counter() - last_tic:04f}s")
last_tic = time.perf_counter()
else:
prior_lanes = current_route
# 2. find a spot to enter
# Make connection with BC
accum_dist = -0.0001
p4 = None
cuttin_lane_id = None
cuttin_lane_idx = None
first_lane = True
def search_lanes(current_lane, route_roadblocks):
result_lanes = []
if goal_lane not in self.past_lanes['ego']:
goal_roadblock = current_state['road'][goal_lane]['upper_level'][0]
current_roadblock = current_state['road'][current_lane]['upper_level'][0]
if goal_roadblock == current_roadblock:
current_lane = goal_lane
lanes_to_loop = [[current_lane]]
visited_lanes = [current_lane]
while len(lanes_to_loop) > 0:
looping_lanes = lanes_to_loop.pop()
if len(looping_lanes) >= 3:
result_lanes.append(looping_lanes)
continue
looping_lane = looping_lanes[-1]
looping_roadblock = current_state['road'][looping_lane]['upper_level'][0]
if looping_roadblock not in route_roadblocks:
continue
# no lane changing
# all_lanes_in_block = current_state['road'][looping_roadblock]['lower_level']
# for each_lane in all_lanes_in_block:
# if each_lane not in visited_lanes:
# visited_lanes.append(each_lane)
# lanes_to_loop.append(looping_lanes[:-1]+[each_lane])
next_lanes = current_state['road'][looping_lane]['next_lanes']
for each_lane in next_lanes:
if each_lane not in visited_lanes:
visited_lanes.append(each_lane)
if each_lane not in current_state['road']:
result_lanes.append(looping_lanes)
continue
each_block = current_state['road'][each_lane]['upper_level'][0]
if each_block not in route_roadblocks:
continue
lanes_to_loop.append(looping_lanes+[each_lane])
if len(lanes_to_loop) == 0 and len(looping_lanes) > 0:
result_lanes.append(looping_lanes)
return result_lanes
if self.dataset == 'NuPlan' and is_ego and current_lane is not None:
route_roadblocks = current_state['route'] if 'route' in current_state else None
current_upper_roadblock = current_state['road'][current_lane]['upper_level'][0]
if current_upper_roadblock not in route_roadblocks:
route_roadblocks.insert(0, current_upper_roadblock)
while len(route_roadblocks) < 3 and route_roadblocks[-1] in current_state['road']:
next_roadblocks = current_state['road'][route_roadblocks[-1]]['next_lanes']
if len(next_roadblocks) == 0 or next_roadblocks[0] not in current_state['road']:
break
route_roadblocks.append(current_state['road'][route_roadblocks[-1]]['next_lanes'][0])
# assumption: not far from current lane
result_lanes = search_lanes(current_lane, route_roadblocks)
if len(result_lanes) == 0:
# choose a random lane from the first roadblock
print("WARNING: No available route found")
assert False, 'No Available Route Found for ego'
result_traj = []
for each_route in result_lanes:
current_trajectory = None
reference_trajectory = None
reference_yaw = None
for each_lane in each_route:
if each_lane not in current_state['road']:
break
if reference_trajectory is None:
reference_trajectory = current_state['road'][each_lane]['xyz'][current_closest_pt_idx:, :2].copy()
reference_yaw = current_state['road'][each_lane]['dir'][current_closest_pt_idx:].copy()
else:
reference_trajectory = np.concatenate((reference_trajectory,
current_state['road'][each_lane]['xyz'][:, :2].copy()))
reference_yaw = np.concatenate((reference_yaw,
current_state['road'][each_lane]['dir'].copy()))
# get CBC
if reference_trajectory.shape[0] < 2:
p1 = my_current_pose[:2]
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2)
delta = self.planning_horizon
x, y = -math.sin(yaw) * delta + my_current_pose[0], -math.cos(yaw) * delta + \
my_current_pose[1]
p2 = [x, y]
p3 = p2
x, y = -math.sin(yaw) * delta + p2[0], -math.cos(yaw) * delta + p2[1]
p4 = [x, y]
# 4. generate a curve with cubic BC
if my_current_v_per_step < 1:
proper_v_for_cbc = (my_current_v_per_step + 1) / 2
else:
proper_v_for_cbc = my_current_v_per_step
if euclidean_distance(p4, p1) > 1:
print(f"No lanes found for route of {agent_id} {proper_v_for_cbc} {my_current_pose}")
connection_traj = self.trajectory_from_cubic_BC(p1=p1, p2=p2, p3=p3, p4=p4, v=proper_v_for_cbc)
else:
assert False, f"Error: P4, P1 overlapping {p4, p1}"
assert connection_traj.shape[0] > 0, connection_traj.shape
result_traj.append(connection_traj)
current_state['predicting']['trajectory_to_mark'].append(current_trajectory)
else:
starting_index = int(my_current_v_per_step * self.frame_rate * 2)
starting_index = min(starting_index, reference_trajectory.shape[0] - 1)
p4 = reference_trajectory[starting_index, :2]
starting_yaw = -utils.normalize_angle(reference_yaw[starting_index] + math.pi / 2)
delta = euclidean_distance(p4, my_current_pose[:2]) / 4
x, y = math.sin(starting_yaw) * delta + p4[0], math.cos(starting_yaw) * delta + p4[1]
p3 = [x, y]
p1 = my_current_pose[:2]
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2)
# delta = euclidean_distance(p4, my_current_pose[:2]) / 4
delta = min(70/self.frame_rate, euclidean_distance(p4, my_current_pose[:2]) / 2)
x, y = -math.sin(yaw) * delta + my_current_pose[0], -math.cos(yaw) * delta + my_current_pose[1]
p2 = [x, y]
if euclidean_distance(p4, p1) > 2:
if my_current_v_per_step < 1:
proper_v_for_cbc = (my_current_v_per_step + 1) / 2
else:
proper_v_for_cbc = my_current_v_per_step
connection_traj = self.trajectory_from_cubic_BC(p1=p1, p2=p2, p3=p3, p4=p4, v=proper_v_for_cbc)
current_trajectory = np.concatenate((connection_traj, reference_trajectory[starting_index:, :2]))
else:
current_trajectory = reference_trajectory[starting_index:, :2]
result_traj.append(current_trajectory)
current_state['predicting']['trajectory_to_mark'].append(current_trajectory)
assert len(result_traj) == len(result_lanes), f'unmatched shape {len(result_traj)} {len(result_lanes)}'
self.routed_traj[agent_id] = result_traj
return self.routed_traj[agent_id], result_lanes
if current_lane is not None:
current_looping_lane = current_lane
while_counter = 0
if distance_threshold > 100:
print("Closest lane detection failded: ", agent_id, current_looping_lane, distance_threshold, my_current_v_per_step, dist_to_lane, current_route)
else:
distance_threshold = max(distance_threshold, self.frame_rate * my_current_v_per_step)
while accum_dist < distance_threshold and distance_threshold <= 100:
if while_counter > 100:
print("ERROR: Infinite looping lanes")
break
while_counter += 1
# turning: 1=left turn, 2=right turn, 3=UTurn
# UTurn -> Skip
# Left/Right check distance, if < 15 then skip, else not skip
if current_looping_lane not in current_state['road']:
break
current_looping_lane_turning = current_state['road'][current_looping_lane]['turning']
if dynamic_turnings and current_looping_lane_turning == 3 or (current_looping_lane_turning in [1, 2] and euclidean_distance(current_state['road'][current_looping_lane]['xyz'][-1, :2], my_current_pose[:2]) < 15):
# skip turning lanes
# accum_dist = distance_threshold - 0.1
pass
elif while_counter > 50:
print("Inifinite looping lanes (agent_id/current_lane): ", agent_id, current_looping_lane)
accum_dist = distance_threshold - 0.1
else:
if first_lane:
road_xy = current_state['road'][current_looping_lane]['xyz'][current_closest_pt_idx:, :2].copy()
else:
road_xy = current_state['road'][current_looping_lane]['xyz'][:, :2].copy()
for j, each_xy in enumerate(road_xy):
if j == 0:
continue
accum_dist += euclidean_distance(each_xy, road_xy[j - 1])
if accum_dist >= distance_threshold:
p4 = each_xy
if first_lane:
yaw = - utils.normalize_angle(
current_state['road'][current_looping_lane]['dir'][j + current_closest_pt_idx] + math.pi / 2)
else:
yaw = - utils.normalize_angle(
current_state['road'][current_looping_lane]['dir'][j] + math.pi / 2)
delta = euclidean_distance(p4, my_current_pose[:2]) / 4
x, y = math.sin(yaw) * delta + p4[0], math.cos(yaw) * delta + p4[1]
p3 = [x, y]
cuttin_lane_id = current_looping_lane
if first_lane:
cuttin_lane_idx = j + current_closest_pt_idx
else:
cuttin_lane_idx = j
break
if p4 is None:
if current_looping_lane in prior_lanes and current_looping_lane != prior_lanes[-1]:
# if already has route, then use previous route
current_lane_route_idx = prior_lanes.index(current_looping_lane)
current_looping_lane = prior_lanes[current_lane_route_idx+1]
else:
# if not, try to loop a new route
next_lanes = current_state['road'][current_looping_lane]['next_lanes']
next_lane_found = False
if follow_org_route:
if current_looping_lane in prior_lanes: # True:
# follow original lanes
current_idx = prior_lanes.index(current_looping_lane)
if current_idx < len(prior_lanes) - 1:
next_lane = prior_lanes[current_idx + 1]
next_lane_found = True
if next_lane in next_lanes:
# next lane connected, loop this next lane and continue next loop
current_looping_lane = next_lane
else:
# next lane not connected
# 1. find closest point
road_xy = current_state['road'][current_looping_lane]['xyz'][:, :2].copy()
closest_dist = 999999
closest_lane_idx = None
turning_yaw = None
for j, each_xy in enumerate(road_xy):
dist = euclidean_distance(each_xy[:2], my_current_pose[:2])
if dist < closest_dist:
closest_lane_idx = j
closest_dist = dist
turning_yaw = utils.normalize_angle(my_current_pose[3] - current_state['road'][current_looping_lane]['dir'][j])
if closest_lane_idx is None:
# follow no next lane logic below
next_lane_found = False
else:
max_turning_dist = 120 / math.pi
if closest_dist >= max_turning_dist:
# too far for max turning speed 15m/s
if turning_yaw > math.pi / 2:
# turn towards target lane first on the right
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2) + math / 2
delta = 180 / math.pi
x, y = math.sin(yaw) * delta + my_current_pose[0], math.cos(yaw) * delta + my_current_pose[1]
p4 = [x, y]
yaw = yaw - math / 2
delta = delta / 2
x, y = math.sin(yaw) * delta + my_current_pose[0], math.cos(yaw) * delta + my_current_pose[1]
p3 = [x, y]
break
if turning_yaw <= math.pi / 2:
# turn towards target lane first on the right
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2) - math / 2
delta = 180 / math.pi
x, y = math.sin(yaw) * delta + my_current_pose[0], math.cos(yaw) * delta + my_current_pose[1]
p4 = [x, y]
yaw = yaw + math / 2
delta = delta / 2
x, y = math.sin(yaw) * delta + my_current_pose[0], math.cos(yaw) * delta + my_current_pose[1]
p3 = [x, y]
break
else:
accum_dist = distance_threshold - 0.1
if not next_lane_found:
# follow prior or choose a random one as the next
if len(next_lanes) > 0:
current_looping_lane_changes = False
for each_lane in next_lanes:
if each_lane in prior_lanes:
current_looping_lane = each_lane
current_looping_lane_changes = True
if not current_looping_lane_changes:
# random choose one lane as route
current_looping_lane = random.choice(next_lanes)
else:
print("warning: no next lane found with breaking the lane finding loop")
break
# return
else:
break
first_lane = False
if PRINT_TIMER:
print(f"Time spent on while loop: {time.perf_counter() - last_tic:04f}s")
last_tic = time.perf_counter()
if p4 is None:
# not found any lane at all, generate a linear line forward
# 3. gennerate p1 and p2
p1 = my_current_pose[:2]
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2)
delta = self.planning_horizon
x, y = -math.sin(yaw) * delta + my_current_pose[0], -math.cos(yaw) * delta + \
my_current_pose[1]
p2 = [x, y]
p3 = p2
x, y = -math.sin(yaw) * delta + p2[0], -math.cos(yaw) * delta + p2[1]
p4 = [x, y]
# 4. generate a curve with cubic BC
if my_current_v_per_step < 1:
proper_v_for_cbc = (my_current_v_per_step + 1) / 2
else:
proper_v_for_cbc = my_current_v_per_step
if euclidean_distance(p4, p1) > 1:
print(f"No lanes found for route of {agent_id} {proper_v_for_cbc} {my_current_pose}")
connection_traj = self.trajectory_from_cubic_BC(p1=p1, p2=p2, p3=p3, p4=p4, v=proper_v_for_cbc)
else:
assert False, f"Error: P4, P1 overlapping {p4, p1}"
assert connection_traj.shape[0] > 0, connection_traj.shape
self.routed_traj[agent_id] = connection_traj
else:
assert cuttin_lane_id is not None
# 3. gennerate p1 and p2
p1 = my_current_pose[:2]
yaw = - utils.normalize_angle(my_current_pose[3] + math.pi / 2)
# delta = euclidean_distance(p4, my_current_pose[:2]) / 4
delta = min(7, euclidean_distance(p4, my_current_pose[:2]) / 2)
x, y = -math.sin(yaw) * delta + my_current_pose[0], -math.cos(yaw) * delta + \
my_current_pose[1]
p2 = [x, y]
if my_current_v_per_step < 1:
proper_v_for_cbc = (my_current_v_per_step + 1) / 2
else:
proper_v_for_cbc = my_current_v_per_step
connection_traj = self.trajectory_from_cubic_BC(p1=p1, p2=p2, p3=p3, p4=p4, v=proper_v_for_cbc)
# loop out a route
current_looping_lane = cuttin_lane_id
lanes_in_a_route = [current_looping_lane]
route_traj_left = np.array(current_state['road'][current_looping_lane]['xyz'][cuttin_lane_idx:, :2], ndmin=2)
next_lanes = current_state['road'][current_looping_lane]['next_lanes']
while len(next_lanes) > 0 and len(lanes_in_a_route) < 10:
any_lane_in_route = False
if len(prior_lanes) > 0:
for each_next_lane in next_lanes:
if each_next_lane in prior_lanes:
any_lane_in_route = True
current_looping_lane = each_next_lane
break
if not any_lane_in_route:
# try to follow original route
current_lane_changed = False
lanes_to_choose = []
for each_next_lane in next_lanes:
if each_next_lane in prior_lanes:
current_looping_lane = each_next_lane
current_lane_changed = True
break
if each_next_lane in current_state['road']:
lanes_to_choose.append(each_next_lane)
if current_lane_changed:
pass
elif len(lanes_to_choose) == 0:
print("NO VALID NEXT LANE TO CHOOSE from env_planner for ", agent_id)
break
else:
# random choose one lane as route
current_looping_lane = random.choice(lanes_to_choose)
# amend route manually for scenario 54 file 00000
# if current_looping_lane == 109:
# current_looping_lane = 112
# if current_looping_lane == 131:
# current_looping_lane = 132
if current_looping_lane not in current_state['road']:
print("selected lane not found in road dic")
break
lanes_in_a_route.append(current_looping_lane)
next_lanes = current_state['road'][current_looping_lane]['next_lanes']
# route_traj_left = np.concatenate(
# (route_traj_left, current_state['road'][current_looping_lane]['xyz'][:, :2]))
route_traj_left = np.concatenate(
(route_traj_left, current_state['road'][current_looping_lane]['xyz'][10:, :2])) # start with a margin to avoid overlapping ends and starts
if len(current_route) == 0:
# initiation the route and return
current_route = lanes_in_a_route
if is_ego:
goal_pt, goal_yaw = self.online_predictor.goal_setter.get_goal(current_data=current_state,
agent_id=agent_id,
dataset=self.dataset)
assert goal_pt is not None and goal_yaw is not None, goal_pt
ending_lane, ending_lane_idx, dist_to_ending_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=[goal_pt[0], goal_pt[1], 0, goal_yaw],
valid_lane_types=self.valid_lane_types
)
if ending_lane is not None:
if dist_to_ending_lane > 30:
logging.warning('Goal Point Off Road')
self.target_lanes = [ending_lane, ending_lane_idx]
if ending_lane not in lanes_in_a_route:
back_looping_counter = 0
back_to_loop_lanes = [ending_lane]
target_lane = ending_lane
while back_looping_counter < 10:
back_looping_counter += 1
current_back_looping_lane = back_to_loop_lanes.pop()
_, _, distance_to_ending_lane = plan_helper.find_closest_lane(
current_state=current_state,
my_current_pose=my_current_pose,
selected_lanes=[current_back_looping_lane],
valid_lane_types=self.valid_lane_types
)
if distance_to_ending_lane < OFF_ROAD_DIST:
target_lane = current_back_looping_lane
break
else:
if current_back_looping_lane not in current_state['road']:
break
prev_lanes = current_state['road'][current_back_looping_lane]['previous_lanes']
if not isinstance(prev_lanes, list):
prev_lanes = prev_lanes.tolist()
if len(prev_lanes) == 0:
break
back_to_loop_lanes += prev_lanes
current_route = [target_lane]
else:
logging.warning('No Lane Found for Goal Point at all')
route_traj_left = np.array(route_traj_left, ndmin=2)
# 4. generate a curve with cubic BC
if euclidean_distance(p4, p1) > 2:
if len(route_traj_left.shape) < 2:
print(route_traj_left.shape, route_traj_left)
self.routed_traj[agent_id] = connection_traj
else:
if euclidean_distance(p4, p1) > 1 and len(connection_traj.shape) > 0 and connection_traj.shape[0] > 1:
# concatenate org_traj, connection_traj, route_traj_left
self.routed_traj[agent_id] = np.concatenate(
(connection_traj, route_traj_left))
else:
self.routed_traj[agent_id] = route_traj_left
else:
self.routed_traj[agent_id] = route_traj_left
if PRINT_TIMER:
print(f"Time spent on CBC: {time.perf_counter() - last_tic:04f}s")
last_tic = time.perf_counter()
if DRAW_CBC_PTS:
current_state['predicting']['mark_pts'] = [p4, p3, p2, p1]
if is_ego:
if self.dataset == 'NuPlan':
return [self.routed_traj[agent_id]], current_route
else:
return [self.routed_traj[agent_id]], [current_route]
else:
return self.routed_traj[agent_id], current_route
def adjust_speed_for_collision(self, interpolator, distance_to_end, current_v, end_point_v, reschedule_speed_profile=False):
# constant deceleration
time_to_collision = min(self.planning_horizon, distance_to_end / (current_v + end_point_v + 0.0001) * 2)
time_to_decelerate = abs(current_v - end_point_v) / (0.1/self.frame_rate)
traj_to_return = []
desired_deceleration = 0.2 /self.frame_rate
if time_to_collision < time_to_decelerate:
# decelerate more than 3m/ss
deceleration = (end_point_v - current_v) / time_to_collision
dist_travelled = 0
for i in range(int(time_to_collision)):
current_v += deceleration * 1.2
current_v = max(0, current_v)
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
current_len = len(traj_to_return)
while current_len < 100:
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
current_len = len(traj_to_return)
else:
# decelerate with 2.5m/ss
time_for_current_speed = np.clip(((distance_to_end - 3 - (current_v+end_point_v)/2*time_to_decelerate) / (current_v + 0.0001)), 0, self.frame_rate*self.frame_rate)
dist_travelled = 0
if time_for_current_speed > 1:
for i in range(int(time_for_current_speed)):
if reschedule_speed_profile:
dist_travelled += current_v
else:
if i == 0:
dist_travelled += current_v
elif i >= interpolator.trajectory.shape[0]:
dist_travelled += current_v
else:
current_v_hat = interpolator.get_speed_with_index(i)
if abs(current_v_hat - current_v) > 2 / self.frame_rate:
print("WARNING: sharp speed changing", current_v, current_v_hat)
current_v = current_v_hat
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
for i in range(int(time_to_decelerate)):
current_v -= desired_deceleration
current_v = max(0, current_v)
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
current_len = len(traj_to_return)
while current_len < 100:
dist_travelled += current_v
traj_to_return.append(interpolator.interpolate(dist_travelled))
current_len = len(traj_to_return)
if len(traj_to_return) > 0:
short = self.planning_horizon - len(traj_to_return)
for _ in range(short):
traj_to_return.append(traj_to_return[-1])
else:
for _ in range(self.planning_horizon):
traj_to_return.append(interpolator.interpolate(0))
return np.array(traj_to_return, ndmin=2)
def get_traffic_light_collision_pts(self, current_state, current_frame_idx,
continue_time_threshold=5):
tl_dics = current_state['traffic_light']
road_dics = current_state['road']
traffic_light_ending_pts = []
for lane_id in tl_dics.keys():
if lane_id == -1:
continue
tl = tl_dics[lane_id]
# get the position of the end of this lane
# Unknown = 0, Arrow_Stop = 1, Arrow_Caution = 2, Arrow_Go = 3, Stop = 4, Caution = 5, Go = 6, Flashing_Stop = 7, Flashing_Caution = 8
try:
tl_state = tl["state"][current_frame_idx]
except:
tl_state = tl["state"][0]
if tl_state in [1, 4, 7]:
end_of_tf_checking = min(len(tl["state"]), current_frame_idx + continue_time_threshold)
all_red = True
for k in range(current_frame_idx, end_of_tf_checking):
if tl["state"][k] not in [1, 4, 7]:
all_red = False
break
if all_red:
for seg_id in road_dics.keys():
if lane_id == seg_id:
road_seg = road_dics[seg_id]
if self.dataset == 'Waymo':
if road_seg["type"] in [1, 2, 3]:
if len(road_seg["dir"].shape) < 1:
continue
if road_seg['turning'] == 1 and tl_state in [4, 7]:
# can do right turn with red light
continue
end_point = road_seg["xyz"][0][:2]
traffic_light_ending_pts.append(end_point)
break
elif self.dataset == 'NuPlan':
end_point = road_seg["xyz"][0][:2]
traffic_light_ending_pts.append(end_point)
break
else:
assert False, f'Unknown dataset in env planner - {self.dataset}'
return traffic_light_ending_pts
def check_past_goal(self, traj, current_idx, current_state, agent_id):
# if 'follow_goal' in current_state['predicting'] and agent_id in current_state['predicting']['follow_goal'] and not current_state['predicting']['follow_goal'][agent_id]:
# return True
# detect by angle
index = 1
valid = abs(current_state['predicting']['original_trajectory'][agent_id]['pose'][-1, :2][0] + 1) > 0.01
while not valid:
index += 1
valid = abs(current_state['predicting']['original_trajectory'][agent_id]['pose'][-index, :2][0] + 1) > 0.01
original_goal = current_state['predicting']['original_trajectory'][agent_id]['pose'][-index, :2]
total_frame = traj.shape[0]
if current_idx + self.planning_interval * 2 > total_frame - 1 or current_idx + self.planning_interval + self.frame_rate > total_frame - 1:
return False
next_checking_pt = traj[current_idx+self.planning_interval*2, :2]
angle_to_goal = get_angle_of_a_line(next_checking_pt, original_goal)
goal_yaw = current_state['predicting']['original_trajectory'][agent_id]['pose'][-1, 3]
past_goal = False
normalized_angle = utils.normalize_angle(angle_to_goal - goal_yaw)
if normalized_angle > math.pi / 2 or normalized_angle < -math.pi / 2:
past_goal = True
# detect by distance for low speed trajectories
two_point_dist = euclidean_distance(original_goal, next_checking_pt)
if two_point_dist < MINIMAL_DISTANCE_TO_GOAL:
past_goal = True
# goal_distance2 = euclidean_distance(marginal_traj[self.planning_interval + 20, :2], origial_goal)
two_point_dist = euclidean_distance(traj[current_idx+self.planning_interval, :2],
traj[current_idx+self.planning_interval+self.frame_rate, :2])
if two_point_dist < MINIMAL_SPEED_TO_TRACK_ORG_GOAL:
past_goal = True
if past_goal:
current_state['predicting']['follow_goal'][agent_id] = False
else:
current_state['predicting']['follow_goal'][agent_id] = True
return past_goal
def get_trajectory_from_interpolator(self, my_interpolator, my_current_speed, a_per_step=None,
check_turning_dynamics=True, desired_speed=7,
emergency_stop=False, hold_still=False,
agent_id=None, a_scale_turning=0.7, a_scale_not_turning=0.9):
total_frames = self.planning_horizon
total_pts_in_interpolator = my_interpolator.trajectory.shape[0]
trajectory = np.ones((total_frames, 4)) * -1
# get proper speed for turning
largest_yaw_change = -1
largest_yaw_change_idx = None
if check_turning_dynamics and not emergency_stop:
for i in range(min(200, total_pts_in_interpolator - 2)):
if my_interpolator.trajectory[i, 0] == -1.0 or my_interpolator.trajectory[i+1, 0] == -1.0 or my_interpolator.trajectory[i+2, 0] == -1.0:
continue
current_yaw = utils.normalize_angle(get_angle_of_a_line(pt1=my_interpolator.trajectory[i, :2], pt2=my_interpolator.trajectory[i+1, :2]))
next_yaw = utils.normalize_angle(get_angle_of_a_line(pt1=my_interpolator.trajectory[i+1, :2], pt2=my_interpolator.trajectory[i+2, :2]))
dist = utils.euclidean_distance(pt1=my_interpolator.trajectory[i, :2], pt2=my_interpolator.trajectory[i+1, :2])
yaw_diff = abs(utils.normalize_angle(next_yaw - current_yaw))
if yaw_diff > largest_yaw_change and 0.04 < yaw_diff < math.pi / 2 * 0.9 and 100 > dist > 0.3:
largest_yaw_change = yaw_diff
largest_yaw_change_idx = i
proper_speed_minimal = max(5, math.pi / 3 / largest_yaw_change) # calculate based on 20m/s turning for 12s a whole round with a 10hz data in m/s
proper_speed_minimal_per_frame = proper_speed_minimal / self.frame_rate
if largest_yaw_change_idx is not None:
deceleration_frames = max(0, largest_yaw_change_idx - abs(my_current_speed - proper_speed_minimal_per_frame) / (A_SLOWDOWN_DESIRE / self.frame_rate / self.frame_rate / 2))
else:
deceleration_frames = 99999
if agent_id is not None:
pass
dist_past = 0
current_speed = my_current_speed
for i in range(total_frames):
if current_speed < 0.1:
low_speed_a_scale = 1 * self.frame_rate
else:
low_speed_a_scale = 0.1 * self.frame_rate
if hold_still:
trajectory[i] = my_interpolator.interpolate(0)
continue
elif emergency_stop:
current_speed -= A_SLOWDOWN_DESIRE / self.frame_rate
elif largest_yaw_change_idx is not None:
proper_speed_minimal_per_frame = max(0.5, min(proper_speed_minimal_per_frame, 5))
if largest_yaw_change_idx >= i >= deceleration_frames:
if current_speed > proper_speed_minimal_per_frame:
current_speed -= A_SLOWDOWN_DESIRE / self.frame_rate / 2
else:
current_speed += A_SPEEDUP_DESIRE / self.frame_rate * a_scale_not_turning * low_speed_a_scale
elif i < deceleration_frames:
if current_speed < desired_speed / 4.7:
# if far away from the turnings and current speed is smaller than 15m/s, then speed up
# else keep current speed
if a_per_step is not None:
current_speed += max(-A_SLOWDOWN_DESIRE / self.frame_rate, min(A_SPEEDUP_DESIRE / self.frame_rate * low_speed_a_scale, a_per_step))
else:
current_speed += A_SPEEDUP_DESIRE / self.frame_rate * a_scale_turning * low_speed_a_scale
elif i > largest_yaw_change_idx:
if current_speed > proper_speed_minimal_per_frame:
current_speed -= A_SLOWDOWN_DESIRE / self.frame_rate
else:
if a_per_step is not None:
current_speed += max(-A_SLOWDOWN_DESIRE / self.frame_rate, min(A_SPEEDUP_DESIRE / self.frame_rate * low_speed_a_scale, a_per_step))
else:
current_speed += A_SPEEDUP_DESIRE / self.frame_rate * a_scale_turning * low_speed_a_scale
else:
if current_speed < desired_speed:
if a_per_step is not None:
current_speed += max(-A_SLOWDOWN_DESIRE / self.frame_rate, min(A_SPEEDUP_DESIRE / self.frame_rate * low_speed_a_scale, a_per_step))
else:
current_speed += A_SPEEDUP_DESIRE / self.frame_rate * a_scale_not_turning * low_speed_a_scale # accelerate with 0.2 of desired acceleration
current_speed = max(0, current_speed)
dist_past += current_speed
trajectory[i] = my_interpolator.interpolate(dist_past)
return trajectory
def update_env_trajectory_reguild(self, current_frame_idx, relevant_only=True,
current_state=None, plan_for_ego=False, dynamic_env=True):
"""
plan and update trajectory to commit for relevant environment agents
current_frame_idx: 1,2,3,...,11(first frame to plan)
"""
# if self.online_predictor.prediction_data is None:
# logging.warning('Skip planning: Planning before making a prediction')
# return
if not dynamic_env:
return current_state
# self.scenario_frame_number = current_frame_idx
# frame_diff = self.scenario_frame_number - self.planning_from
if self.is_planning(current_frame_idx):
# if frame_diff >= 0 and frame_diff % self.planning_interval == 0:
# load scenario data
if current_state is None:
return
agents = current_state['agent']
relevant_agents = current_state['predicting']['relevant_agents']
edges = current_state['predicting']['relation']
# XPts = current_state['predicting']['XPt']
# select marginal prediction traj
# prediction_traj_dic_m = current_state['predicting']['marginal_trajectory']
# prediction_traj_dic_c = current_state['predicting']['conditional_trajectory']
# prediction_traj_dic_m = prediction_traj_dic_c
ego_id = current_state['predicting']['ego_id'][1]
agents_dic_copy = copy.deepcopy(current_state['agent'])
for agent_id in agents:
# loop each relevant agent
if relevant_only and agent_id not in relevant_agents:
continue
current_state['agent'][agent_id]['action'] = None
total_time_frame = current_state['agent'][agent_id]['pose'].shape[0]
goal_point = current_state['predicting']['goal_pts'][agent_id]
my_current_pose = current_state['agent'][agent_id]['pose'][current_frame_idx - 1]
my_current_v_per_step = euclidean_distance(current_state['agent'][agent_id]['pose'][current_frame_idx - 1, :2],
current_state['agent'][agent_id]['pose'][current_frame_idx - 6, :2])/5
my_target_speed = 70 / self.frame_rate
if my_current_v_per_step > 100 / self.frame_rate:
my_current_v_per_step = 10 / self.frame_rate
org_pose = current_state['predicting']['original_trajectory'][agent_id]['pose'].copy()
# for non-vehicle types agent, skip
if int(current_state['agent'][agent_id]['type']) not in self.vehicle_types:
continue
# rst = prediction_traj_dic_m[agent_id]['rst']
# score = np.exp(prediction_traj_dic_m[agent_id]['score'])
# score /= np.sum(score)
# best_idx = np.argmax(score)
# prediction_traj_m = rst[best_idx]
# use_rules = 0 # 0=hybird, 1=use rules only
# info: always use rules for env agents
use_rules = not self.follow_prediction_traj
if use_rules:
# past_goal = self.check_past_goal(traj=current_state['agent'][agent_id]['pose'],
# current_idx=current_frame_idx,
# current_state=current_state,
# agent_id=agent_id)
my_traj, _ = self.get_reroute_traj(current_state=current_state,
agent_id=agent_id,
current_frame_idx=current_frame_idx)
else:
routed_traj, _ = self.get_reroute_traj(current_state=current_state,
agent_id=agent_id,
current_frame_idx=current_frame_idx)
marginal_trajs = current_state['predicting']['marginal_trajectory'][agent_id]['rst'][0]
x_dist = []
for r_p in routed_traj[:50, :2]:
line_dist = []
for m_p in marginal_trajs[:50, :2]:
dist = euclidean_distance(r_p, m_p)
line_dist.append(dist)
x_dist.append(min(line_dist))
minimal_distance = max(x_dist)
if True:
# if minimal_distance < 3:
my_traj = marginal_trajs
else:
my_traj = routed_traj
# current_state['predicting']['routed_trajectory'][agent_id]
# if False:
# # use prediction trajectory
# target_lanes = org_pose
# if agent_id in current_state['lanes_traveled']:
# lane_traveled_list = current_state['lanes_traveled'][agent_id]
# if len(lane_traveled_list) > 0:
# for i, each_lane_id in enumerate(lane_traveled_list):
# if i == 0:
# target_lanes = current_state['road'][each_lane_id]['xyz'][:, :2].copy()
# else:
# target_lanes = np.concatenate(
# (target_lanes, current_state['road'][each_lane_id]['xyz'][:, :2])).copy()
# prediction_traj_m, follow_org = self.select_trajectory_from_prediction(prediction_traj_dic_m, agent_id,
# goal_point,
# original_trajectory=target_lanes, #org_pose,
# remaining_frames=min(10, total_time_frame - current_frame_idx),
# follow_goal=
# current_state['predicting'][
# 'follow_goal'][
# agent_id],
# follow_original_as_default=follow_org_as_default)
# assert prediction_traj_m is not None, f'{agent_id} / {relevant_agents}'
# action = 0 # 0=No Action, 1=Follow, 2=Yield
# my_traj = prediction_traj_m.copy()
# detect trajectory collisions
# after collision detection, we have earliest_collision_idx, earliest_target_id, latest_collision_idx(for that earliest collision detected
my_interpolator = SudoInterpolator(my_traj.copy(), my_current_pose)
interpolated_trajectory = self.get_trajectory_from_interpolator(my_interpolator=my_interpolator,
my_current_speed=my_current_v_per_step,
agent_id=agent_id)
my_interpolator = SudoInterpolator(interpolated_trajectory.copy(), my_current_pose)
earliest_collision_idx = None
earliest_target_agent = None
collision_point = None
traffic_light_ending_pts = self.get_traffic_light_collision_pts(current_state=current_state,
current_frame_idx=current_frame_idx)
tl_checked = False
running_red_light = False
if self.method_testing < 1:
continue
# check collisions for ego from frame 1 of the prediction trajectory
ego_index_checking = 1 # current_frame_idx+1
collision_detected_now = False
latest_collision_id = None
end_checking_frame = np.clip(current_frame_idx + REACTION_AFTER, 0, total_time_frame)
end_checking_frame = min(end_checking_frame, current_frame_idx+self.planning_horizon)
# pack an Agent object for collision detection
my_reactors = []
for i in range(current_frame_idx, end_checking_frame):
ego_index_checking = i - current_frame_idx
ego_pose2_valid = False
if i - current_frame_idx > 0:
ego_pose2 = interpolated_trajectory[ego_index_checking - 1]
if abs(ego_pose2[0]) < 1.1 and abs(ego_pose2[1]) < 1.1:
pass
else:
ego_agent2 =Agent(x=(ego_pose2[0] + ego_pose[0]) / 2,
y=(ego_pose2[1] + ego_pose[1]) / 2,
yaw=get_angle_of_a_line(ego_pose2[:2], ego_pose[:2]),
length=euclidean_distance(ego_pose2[:2], ego_pose[:2]),
width=max(1, current_state['agent'][agent_id]['shape'][0][0]),
agent_id=agent_id)
ego_pose2_valid = True
for each_other_agent in agents:
if each_other_agent == agent_id:
continue
if each_other_agent in my_reactors:
continue
if current_state['agent'][each_other_agent]['shape'][0][1] == -1:
continue
if ego_index_checking >= interpolated_trajectory.shape[0]:
continue
ego_pose = interpolated_trajectory[ego_index_checking, :] # ego start checking from frame 0
if abs(ego_pose[0]) < 1.1 and abs(ego_pose[1]) < 1.1:
# print("WARNING invalid pose for collision detection: ", pose_in_pred)
continue
ego_agent =Agent(x=ego_pose[0],
y=ego_pose[1],
yaw=ego_pose[3],
length=max(1, current_state['agent'][agent_id]['shape'][0][1]),
width=max(1, current_state['agent'][agent_id]['shape'][0][0]),
agent_id=agent_id)
# check traffic light violation
for tl_pt in traffic_light_ending_pts:
dummy_tf_agent = Agent(x=tl_pt[0], y=tl_pt[1], yaw=0,
length=TRAFFIC_LIGHT_COLLISION_SIZE,
width=TRAFFIC_LIGHT_COLLISION_SIZE, agent_id=99999)
running = utils.check_collision(
checking_agent=ego_agent,
target_agent=dummy_tf_agent)
if ego_pose2_valid:
running |= utils.check_collision(
checking_agent=ego_agent2,
target_agent=dummy_tf_agent)
if running:
running_red_light = True
earliest_collision_idx = ego_index_checking
collision_point = [ego_pose[0], ego_pose[1]]
earliest_target_agent = 99999
target_speed = 0
# break collision detection
break
if running_red_light:
to_yield = True
break
each_other_agent_pose_array = current_state['agent'][each_other_agent]['pose']
target_current_pose = each_other_agent_pose_array[i]
target_agent =Agent(x=target_current_pose[0],
y=target_current_pose[1],
yaw=target_current_pose[3],
length=max(1, current_state['agent'][each_other_agent]['shape'][0][1]),
width=max(1, current_state['agent'][each_other_agent]['shape'][0][0]),
agent_id=each_other_agent)
has_collision = utils.check_collision(checking_agent=ego_agent,
target_agent=target_agent)
if ego_pose2_valid:
has_collision |= utils.check_collision(checking_agent=ego_agent2,
target_agent=target_agent)
to_yield = False
if has_collision:
to_yield = True
# solve this conflict
found_in_loaded = False
if self.follow_loaded_relation:
detected_relation = []
for edge in current_state['edges']:
if agent_id == edge[0] and each_other_agent == edge[1]:
to_yield = False
found_in_loaded = True
break
current_state['predicting']['relation'] += [agent_id, each_other_agent]
if not found_in_loaded:
# FORWARD COLLISION CHECKINGS
target_pose_0 = each_other_agent_pose_array[current_frame_idx]
target_agent_0 =Agent(x=target_pose_0[0],
y=target_pose_0[1],
yaw=target_pose_0[3],
length=max(1, current_state['agent'][each_other_agent]['shape'][0][1]),
width=max(1, current_state['agent'][each_other_agent]['shape'][0][0]),
agent_id=each_other_agent)
collision_0 = utils.check_collision(ego_agent, target_agent_0)
if ego_pose2_valid:
collision_0 |= utils.check_collision(ego_agent2, target_agent_0)
if collision_0:
# yield
detected_relation = [[each_other_agent, agent_id]]
else:
# FCC backwards
ego_agent_0 =Agent(
x=interpolated_trajectory[0, 0],
y=interpolated_trajectory[0, 1],
yaw=interpolated_trajectory[0, 3],
length=max(1, current_state['agent'][agent_id]['shape'][0][1]),
width=max(1, current_state['agent'][agent_id]['shape'][0][0]),
agent_id=agent_id)
collision_back = utils.check_collision(ego_agent_0, target_agent)
if collision_back:
# not yield
detected_relation = [[agent_id, each_other_agent]]
else:
# check relation
self.online_predictor.relation_pred_onetime(each_pair=[agent_id, each_other_agent],
current_frame=current_frame_idx,
clear_history=True,
current_data=current_state)
detected_relation = current_state['predicting']['relation']
# data to save
if 'relations_per_frame_env' not in current_state['predicting']:
current_state['predicting']['relations_per_frame_env'] = {}
for dt in range(self.planning_interval):
if (current_frame_idx + dt) not in current_state['predicting']['relations_per_frame_env']:
current_state['predicting']['relations_per_frame_env'][current_frame_idx + dt] = []
current_state['predicting']['relations_per_frame_env'][current_frame_idx + dt] += detected_relation
if [agent_id, each_other_agent] in detected_relation:
if [each_other_agent, agent_id] in detected_relation:
# bi-directional relations, still yield
pass
else:
my_reactors.append(each_other_agent)
to_yield = False
if to_yield:
earliest_collision_idx = ego_index_checking
collision_point = [ego_pose[0], ego_pose[1]]
earliest_target_agent = each_other_agent
if abs(each_other_agent_pose_array[i, 0] + 1) < 0.1 or abs(each_other_agent_pose_array[i-5, 0] + 1) < 0.1:
target_speed = 0
else:
target_speed = euclidean_distance(each_other_agent_pose_array[i, :2], each_other_agent_pose_array[i-5, :2]) / 5
break
if earliest_collision_idx is not None:
break
if earliest_collision_idx is not None or self.method_testing < 2:
distance_to_travel = my_interpolator.get_distance_with_index(earliest_collision_idx) - S0
stopping_point = my_interpolator.interpolate(max(0, distance_to_travel - S0))[:2]
if euclidean_distance(interpolated_trajectory[0, :2],
stopping_point) < MINIMAL_DISTANCE_TO_TRAVEL or distance_to_travel < MINIMAL_DISTANCE_TO_TRAVEL or my_current_v_per_step < 0.1:
planed_traj = self.get_trajectory_from_interpolator(my_interpolator=my_interpolator,
my_current_speed=my_current_v_per_step,
desired_speed=my_target_speed,
emergency_stop=True)
agents_dic_copy[agent_id]['action'] = 'stop'
else:
planed_traj = self.adjust_speed_for_collision(interpolator=my_interpolator,
distance_to_end=distance_to_travel,
current_v=my_current_v_per_step,
end_point_v=min(my_current_v_per_step * 0.8,
target_speed))
assert len(planed_traj.shape) > 1, planed_traj.shape
agents_dic_copy[agent_id]['action'] = 'yield'
# print("Yielding log: ", agent_id, each_other_agent, earliest_target_agent, earliest_collision_idx, distance_to_travel)
else:
# no conflicts to yield
if euclidean_distance(interpolated_trajectory[0, :2], interpolated_trajectory[-1, :2]) < MINIMAL_DISTANCE_TO_TRAVEL:
planed_traj = self.get_trajectory_from_interpolator(my_interpolator=my_interpolator,
my_current_speed=my_current_v_per_step,
desired_speed=my_target_speed,
hold_still=True)
else:
planed_traj = interpolated_trajectory
agents_dic_copy[agent_id]['action'] = 'controlled'
if self.test_task == 1:
plan_for_ego = True
if not plan_for_ego and ego_id == agent_id:
agents_dic_copy[agent_id]['action'] = None
else:
if self.test_task != 2:
if collision_point is not None:
current_state['predicting']['points_to_mark'].append(collision_point)
current_state['predicting']['trajectory_to_mark'].append(planed_traj)
# if agent_id == 181:
# for each_traj in prediction_traj_dic_m[agent_id]['rst']:
# current_state['predicting']['trajectory_to_mark'].append(each_traj)
# replace the trajectory
planning_horizon, _ = planed_traj.shape
agents_dic_copy[agent_id]['pose'][current_frame_idx:planning_horizon+current_frame_idx, :] = planed_traj[:total_time_frame - current_frame_idx, :]
current_state['agent'] = agents_dic_copy
return current_state
def trajectory_from_cubic_BC(self, p1, p2, p3, p4, v):
# form a Bezier Curve
total_dist = utils.euclidean_distance(p4, p1)
total_t = min(93, int(total_dist/max(1, v)))
traj_to_return = []
for i in range(total_t):
if i >= 92:
break
t = (i+1)/total_t
p0_x = pow((1 - t), 3) * p1[0]
p0_y = pow((1 - t), 3) * p1[1]
p1_x = 3 * pow((1 - t), 2) * t * p2[0]
p1_y = 3 * pow((1 - t), 2) * t * p2[1]
p2_x = 3 * (1 - t) * pow(t, 2) * p3[0]
p2_y = 3 * (1 - t) * pow(t, 2) * p3[1]
p3_x = pow(t, 3) * p4[0]
p3_y = pow(t, 3) * p4[1]
traj_to_return.append((p0_x+p1_x+p2_x+p3_x, p0_y+p1_y+p2_y+p3_y))
return np.array(traj_to_return, ndmin=2)
def select_trajectory_from_prediction(self, prediction_dic, agent_id, goal_point, original_trajectory,
remaining_frames, follow_goal=False, follow_original_as_default=True):
if agent_id not in prediction_dic:
return None
# if always follow original as default
if follow_original_as_default:
follow_original = True
else:
follow_original = False
rst = prediction_dic[agent_id]['rst']
score = np.exp(prediction_dic[agent_id]['score'])
score /= np.sum(score)
if isinstance(rst, type([])):
total_rst = len(rst)
else:
total_rst = rst.shape[0]
if self.method_testing < 0:
# SimNet variety does not follow original path
return rst[0], False
if follow_original:
# select the closest prediction and return
distance = np.zeros_like(score)
for i in range(total_rst):
distance[i] = self.get_l2_regulate_distance_for_two_trajectories(original_trajectory, rst[i], remaining_frames)
best_idx = np.argmax(score/distance)
else:
best_idx = np.argmax(score)
follow_goal = False
return rst[best_idx], follow_goal
# if follow_goal:
# distance = np.zeros_like(score)
# for i in range(total_rst):
# distance[i] = self.get_l2_regulate_distance_for_two_trajectories(original_trajectory, rst[i], remaining_frames)
# if min(distance) > MAX_DEVIATION_FOR_PREDICTION and remaining_frames > 5:
# follow_original = True
# best_idx = np.argmax(score/distance)
# else:
# best_idx = np.argmax(score)
#
# distance_from_current_pose = self.get_l2_regulate_distance_for_two_trajectories(original_trajectory, [rst[best_idx, 0, :]], remaining_frames)
# current_v = euclidean_distance(rst[best_idx, 0, :2], rst[best_idx, 1, :2])
# if distance_from_current_pose > current_v:
# # too far to project back
# follow_original = False
# yaw_diff = utils.normalize_angle(original_trajectory[0, 3] - original_trajectory[-1, 3])
# if abs(yaw_diff) < math.pi/180*45:
# if current_v < MINIMAL_SPEED_TO_TRACK_ORG_GOAL:
# follow_original = False
# elif follow_goal:
# follow_original = True
#
# return rst[best_idx], follow_original
def get_l2_regulate_distance_for_two_trajectories(self, original_trajectory, compared_trajectory, comparing_frames):
distance = []
for idx1, each_pose in enumerate(compared_trajectory):
if idx1 > comparing_frames:
break
distances_across = []
for idx2, each_in_org in enumerate(original_trajectory):
l2 = euclidean_distance(each_pose[:2], each_in_org[:2])
distances_across.append(l2)
distance.append(min(distances_across))
# return distance
return max(distance)
def get_rescale_trajectory(self, reactor_current_pose, reactor_traj, reactor_interpolator, scale, debug=False,
current_v_per_step=None, constant_speed=False, current_a_per_step=None, target_speed=7,
follow_lanes=False):
total_time = min(150, reactor_traj.shape[0])
traj_to_return = np.zeros([total_time, 4])
total_distance_traveled = []
if current_v_per_step is not None:
current_v = current_v_per_step
else:
current_v = euclidean_distance(reactor_current_pose[:2], reactor_traj[0, :2])
for i in range(total_time):
if constant_speed:
if current_a_per_step is None:
dist = current_v
else:
current_v += max(-A_SLOWDOWN_DESIRE/self.frame_rate, min(A_SPEEDUP_DESIRE/self.frame_rate, current_a_per_step))
current_v = max(0, current_v)
dist = current_v
else:
if i == 0:
dist = utils.euclidean_distance(reactor_current_pose[:2], reactor_traj[i, :2])*scale
else:
dist = utils.euclidean_distance(reactor_traj[i-1, :2], reactor_traj[i, :2])*scale
if dist > current_v + A_SPEEDUP_DESIRE/self.frame_rate:
current_v += A_SPEEDUP_DESIRE/self.frame_rate
current_v = min(target_speed, current_v)
dist = current_v
elif dist < current_v - A_SLOWDOWN_DESIRE/self.frame_rate:
current_v -= A_SLOWDOWN_DESIRE/self.frame_rate
current_v = max(0, current_v)
dist = current_v
total_distance_traveled.append(dist)
total_distance_traveled = np.cumsum(total_distance_traveled)
for i in range(len(total_distance_traveled)):
traj_to_return[i, :] = reactor_interpolator.interpolate(total_distance_traveled[i], debug=debug)
return traj_to_return
def filter_trajectory_after_goal_point(self, traj, goal_point):
last_pose = None
last_distance = 999999
traj_to_returen = traj.copy()
for idx, each_pose in enumerate(traj):
if last_pose is not None:
traj_to_returen[idx, :] = last_pose
continue
next_distance = euclidean_distance(each_pose[:2], goal_point)
if next_distance < last_distance + 0.001:
last_distance = next_distance
else:
last_pose = each_pose
return traj_to_returen
def get_action(self):
return 0
def assert_traj(self, traj):
total_time, _ = traj.shape
if total_time < 30:
return -1
for i in range(total_time):
if i == 0:
continue
if i >= total_time - 3 or i >= 20:
break
dist_1 = euclidean_distance(traj[6+i, :2], traj[1+i, :2]) / 5
dist_2 = euclidean_distance(traj[5+i, :2], traj[i, :2]) / 5
if abs(dist_1 - dist_2) > 5.0/self.frame_rate:
print("Warning: frame jumping at: ", i, abs(dist_1 - dist_2))
return i
return -1
class SudoInterpolator:
def __init__(self, trajectory, current_pose):
self.trajectory = trajectory
self.current_pose = current_pose
def interpolate(self, distance: float, starting_from=None, debug=False):
if starting_from is not None:
assert False, 'not implemented'
else:
pose = self.trajectory.copy()
if distance <= MINIMAL_DISTANCE_PER_STEP:
return self.current_pose
if pose.shape is None or len(pose.shape) < 2:
return self.current_pose
total_frame, _ = pose.shape
# assert distance > 0, distance
distance_input = distance
for i in range(total_frame):
if i == 0:
pose1 = self.current_pose[:2]
pose2 = pose[0, :2]
else:
pose1 = pose[i - 1, :2]
pose2 = pose[i, :2]
next_step = euclidean_distance(pose1, pose2)
if debug:
print(f"{i} {next_step} {distance} {total_frame} {self.current_pose}")
if next_step >= MINIMAL_DISTANCE_PER_STEP:
if distance > next_step and i != total_frame - 1:
distance -= next_step
continue
else:
return self.get_state_from_poses(pose1, pose2, distance, next_step)
# x = (pose2[0] - pose1[0]) * distance / next_step + pose1[0]
# y = (pose2[1] - pose1[1]) * distance / next_step + pose1[1]
# yaw = utils.normalize_angle(get_angle_of_a_line(pt1=pose1, pt2=pose2))
# return [x, y, 0, yaw]
if distance_input - 2 > distance:
# hide it outshoot
# logging.warning(f'Over shooting while planning!!!!!!!!!')
return self.get_state_from_poses(pose1, pose2, distance, next_step)
else:
# return current pose if trajectory not moved at all
return self.current_pose
# pose1 = self.current_pose[:2]
# pose2 = pose[0, :2]
# return self.get_state_from_poses(pose1, pose2, 0, 0.001)
def get_state_from_poses(self, pose1, pose2, mul, divider):
x = (pose2[0] - pose1[0]) * mul / (divider + 0.0001) + pose1[0]
y = (pose2[1] - pose1[1]) * mul / (divider + 0.0001) + pose1[1]
yaw = utils.normalize_angle(get_angle_of_a_line(pt1=pose1, pt2=pose2))
return [x, y, 0, yaw]
def get_distance_with_index(self, index: int):
distance = 0
if index != 0:
pose = self.trajectory.copy()
total_frame, _ = pose.shape
for i in range(total_frame):
if i >= index != -1:
# pass -1 to travel through all indices
break
elif i == 0:
step = euclidean_distance(self.current_pose[:2], pose[i, :2])
else:
step = euclidean_distance(pose[i, :2], pose[i-1, :2])
if step > MINIMAL_DISTANCE_PER_STEP:
distance += step
return distance
def get_speed_with_index(self, index: int):
if index != 0:
p_t = self.trajectory[index, :2]
p_t1 = self.trajectory[index - 1, :2]
speed_per_step = utils.euclidean_distance(p_t, p_t1)
return speed_per_step
else:
return None
class Agent(car.Agent):
def yaw_changer(self, yaw):
return change_axis(-yaw)
| Tsinghua-MARS-Lab/InterSim | simulator/plan/env_planner.py | env_planner.py | py | 107,809 | python | en | code | 119 | github-code | 6 | [
{
"api_name": "math.atan2",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "math.atan2",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 56,
... |
14839954104 | from PyQt5 import QtCore, QtGui, QtWidgets, uic
import sys
from AssignmentCategoryDict import AssignmentCategoryDict
from Assignment import Assignment
import uuid
class EditCategories(object):
def __init__(self, course, reload_gradesheet):
col_headers = ['Category Name', 'Drop Count']
self.ECategories = QtWidgets.QDialog()
self.ui = uic.loadUi('../assets/ui/EditCategories.ui', self.ECategories)
self.ECategories.categoryTable.setHorizontalHeaderLabels(col_headers)
self.course = course
self.ECategories.show()
self.category_uuids = []
self.setup_display()
self.reload_gradesheet = reload_gradesheet
self.original_row_count = self.ECategories.categoryTable.rowCount()
self.ECategories.removeSelectedCategoryButton.clicked.connect(self.remove_category)
self.ECategories.addCategoryButton.clicked.connect(self.add_category)
self.ECategories.saveCategoriesButton.clicked.connect(self.save_table_data)
def setup_display(self):
for category in self.course.assignment_category_dict.assignment_categories.values():
row_insert = self.ECategories.categoryTable.rowCount()
self.add_category()
self.ECategories.categoryTable.setItem(row_insert, 0, QtWidgets.QTableWidgetItem(category.categoryName))
self.ECategories.categoryTable.setItem(row_insert, 1, QtWidgets.QTableWidgetItem(category.drop_count))
self.category_uuids.append(category.category_uuid)
def add_category(self):
row_insert = self.ECategories.categoryTable.rowCount()
self.ECategories.categoryTable.insertRow(self.ECategories.categoryTable.rowCount())
self.ECategories.categoryTable.setItem(row_insert, 0, QtWidgets.QTableWidgetItem(""))
self.ECategories.categoryTable.setItem(row_insert, 1, QtWidgets.QTableWidgetItem(""))
def remove_category(self):
if self.ECategories.categoryTable.rowCount() <= 0:
return
row = self.ECategories.categoryTable.currentRow()
if row > self.original_row_count:
self.ECategories.categoryTable.removeRow(row)
else:
choice = QtWidgets.QMessageBox.question(self.ECategories, "Warning",
"You are about to delete one of your original categories. Continue?",
QtWidgets.QMessageBox.Yes, QtWidgets.QMessageBox.No)
if choice == QtWidgets.QMessageBox.Yes:
cat_to_delete_uuid = self.category_uuids[row]
self.course.assignment_category_dict.delete_category(self.course, cat_to_delete_uuid)
self.original_row_count = self.original_row_count - 1
del self.category_uuids[row]
self.ECategories.categoryTable.removeRow(row)
self.reload_gradesheet()
def save_table_data(self):
row_count = self.ECategories.categoryTable.rowCount()
output = []
for row in range(0, row_count):
cat_name = self.ECategories.categoryTable.item(row, 0).text()
cat_drop_count = self.ECategories.categoryTable.item(row, 1).text()
output.append([cat_name, cat_drop_count])
valid = self.error_checking(output)
if valid:
self.course.assignment_category_dict.reload_categories()
for i in range(len(output)):
if i < self.original_row_count:
self.course.assignment_category_dict.save_category_info(output[i][0], output[i][1], self.category_uuids[i])
# Add the database update function
else:
self.course.assignment_category_dict.add_category(str(uuid.uuid4()), output[i][0], output[i][1], self.course.student_list)
# Add the database create function
self.reload_gradesheet()
def error_checking(self, user_input):
category_names = [user_input[i][0] for i in range(len(user_input))]
category_drop_counts = [user_input[i][1] for i in range(len(user_input))]
for i in category_names:
if i == "":
self.bad_input('Error', 'Please enter a category name for all categories')
return False
for i in category_drop_counts:
if "." in i:
return False
try:
x = int(i.strip())
if x < 0:
return False
except ValueError:
self.bad_input('Error', 'You have a drop count that is a nonnegative integer. Please try again.')
return False
return True
"""
Function for telling the user they entered bad input
Parameters:
window_text: (string) the name of the window
error_message: (string) the error message that is displayed to the user
"""
def bad_input(self, window_text, error_message):
choice = QtWidgets.QMessageBox.question(self.ECategories, window_text, error_message,
QtWidgets.QMessageBox.Cancel)
if choice:
pass | meeksjt/SuperTeacherGradebook499 | src/EditCategories.py | EditCategories.py | py | 5,236 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "PyQt5.QtWidgets.QDialog",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic",
... |
40327690661 | from pyecharts import options as opts
from typing import Any,Optional
from pyecharts.charts import Radar
import os
import sys
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))
from easy_pyechart import constants,baseParams,radar_base_config,round_radar_base_config
class eRadar():
def __init__(
self,
lableList:Optional[list] = [],
valueList:Optional[list] = [],
):
self.opts: dict = {
"lengend":Radar,
"lableList":lableList,
"valueList":valueList,
}
#基本雷达图
def basic_radar_chart(self,baseParams):
self.opts.update(baseParams.opts)
return radar_base_config(self)
#单选模式
def radar_selected_mode(self,baseParams):
self.opts.update(baseParams.opts)
c=radar_base_config(self)
c.set_global_opts(
legend_opts=opts.LegendOpts(selected_mode="single"),
title_opts=opts.TitleOpts(title=self.opts['title'],subtitle=self.opts['subTitle'],))
return c
#
def radar_air_quality(self,baseParams):
self.opts.update(baseParams.opts)
return radar_base_config(self)
#设置带有阴影区域的雷达图
def radar_angle_radius_axis(self,baseParams):
self.opts.update(baseParams.opts)
return round_radar_base_config(self) | jayz2017/easy_pyechart.py | easy_pyechart/easy_radar.py | easy_radar.py | py | 1,470 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
23775563757 | import flask
import grpc
import search_pb2_grpc as pb2_grpc
import search_pb2 as pb2
import redis
import json
from google.protobuf.json_format import MessageToJson
from flask import request, jsonify
app = flask.Flask(__name__)
app.config["DEBUG"] = True
class SearchClient(object):
"""
Client for gRPC functionality
"""
def __init__(self):
self.host = 'localhost'
self.server_port = 50051
self.channel = grpc.insecure_channel(
'{}:{}'.format(self.host, self.server_port))
self.stub = pb2_grpc.SearchStub(self.channel)
def get_results(self, message):
"""
Client function to call the rpc for GetServerResponse
"""
message = pb2.Message(message=message)
print(f'{message}')
return self.stub.GetServerResponse(message)
@app.route('/inventory/search', methods=['GET'])
def busqueda():
if 'q' in request.args:
busqueda= request.args['q']
r= redis.Redis(host='localhost', port=6379, db=0)
resultado = (r.get(busqueda))
if(resultado!=None):
products= json.loads(resultado)
return jsonify(products)
else:
client = SearchClient()
result = client.get_results(busqueda)
print(result.product[0].name + "*******")
serialized = MessageToJson(result)
r.set(busqueda, serialized)
return serialized
else:
return "Error, porfavor especifique la busqueda a realizar"
app.run() | manfruta/Sistemas-Tarea1 | cliente_app.py | cliente_app.py | py | 1,587 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "grpc.insecure_channel",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "search_pb2_grpc.SearchStub",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "search_p... |
44407917630 | '''
Created on 16/ago/2011
@author: Marco
'''
from reportlab.pdfgen import canvas
from reportlab.lib.units import cm
from math import sqrt
import ModelsCache
import Configuration
class PdfStructure(object):
'''
classdocs
'''
__markerList = []
__modelsCache = ModelsCache.ModelsCache()
@staticmethod
def AddMarker(digitalMarker):
PdfStructure.__markerList.append(digitalMarker)
@staticmethod
def RemoveMarker(tagName):
for tag in PdfStructure.__markerList:
if tag.name == tagName:
PdfStructure.__markerList.remove(tag)
@staticmethod
def GeneratePDF(fileName):
c = canvas.Canvas(fileName);
for digitalMarker in PdfStructure.__markerList:
inputFile = open(Configuration.TAG_DIR()+digitalMarker.name+".txt","r")
tagDefinition = inputFile.read()
lines = tagDefinition.split("\n")
(x,y) = digitalMarker.GetCenter();
tX = (float(x)/424)*21
tY = (float(y)/600)*29.7
for line in lines:
ellipse = line.split(" ")
if len(ellipse) == 10:
xCenter = -1*float(ellipse[3])
xCenter = (float(xCenter)/digitalMarker.defaultSize)*digitalMarker.size
yCenter = -1*float(ellipse[6])
yCenter = (float(yCenter)/digitalMarker.defaultSize)*digitalMarker.size
radius = ((0.5*sqrt((float(ellipse[3])*2)*(float(ellipse[3])*2)+(float(ellipse[6])*2)*(float(ellipse[6])*2)-4*float(ellipse[9])))/224)*digitalMarker.size
c.circle(xCenter/10*cm+tX*cm, yCenter/10*cm+tY*cm, radius/10*cm, fill=True)
c.save()
@staticmethod
def SaveModel(modelName):
out_file = open(Configuration.MODEL_DIR()+modelName+".model","a")
if not modelName:
raise Exception("ERROR: name is empty")
if not PdfStructure.__markerList:
raise Exception("ERROR: nothing to save as model")
for model in PdfStructure.__modelsCache.models:
if modelName == model.name:
raise Exception("ERROR: duplicated name")
model_names_file = open("ModelNames","a")
model_names_file.write(modelName+"\n")
model_names_file.close()
runeNames = []
runePositions = []
runeSizes = []
runeDefaultSizes = []
for rune in PdfStructure.__markerList:
runeNames.append(rune.name)
runePositions.append((rune.x, rune.y))
runeSizes.append(rune.size)
runeDefaultSizes.append(rune.defaultSize)
out_file.write(rune.name+" "+str(rune.x)+" "+str(rune.y)+" "+str(rune.size)+" "+str(rune.defaultSize)+"\n")
out_file.close()
PdfStructure.__modelsCache.AddModel(modelName, runeNames, runePositions, runeSizes, runeDefaultSizes)
@staticmethod
def GetModelNames():
modelNames = []
for model in PdfStructure.__modelsCache.models:
modelNames.append(model.name)
return modelNames
@staticmethod
def GetModel(modelName):
return PdfStructure.__modelsCache.GetModel(modelName)
@staticmethod
def DeleteModel(name):
model_names_file = open("ModelNames","r")
modelNames = model_names_file.read()
model_names_file.close()
model_names_file = open("ModelNames","w")
modelNames = modelNames.replace(name, "")
model_names_file.write(modelNames)
model_names_file.close()
for model in PdfStructure.__modelsCache.models:
if model.name == name:
PdfStructure.__modelsCache.models.remove(model)
| mziccard/RuneTagDrawer | PdfStructure.py | PdfStructure.py | py | 3,883 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "ModelsCache.ModelsCache",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfgen.canvas.Canvas",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "reportlab.pdfgen.canvas",
"line_number": 32,
"usage_type": "name"
},
{
"ap... |
32802770666 | from elasticsearch import Elasticsearch, helpers
import csv
import json
import time
mvar = "clara"
matching_query = { "query_string": {
"query": mvar
}
}
def main():
#sundesh
es = Elasticsearch(host = "localhost", port = 9200)
#anagnwsh arxeiou
f = open('BX-Books.csv',"r",encoding="utf8")
reader = csv.DictReader(f)
#pairnw ws list o,ti paizei mesa se reader
#lst = list(reader)
#dhmiourgeia arxeiou ann auto den yparxei
helpers.bulk(es, reader, index="bx_books_2")
if __name__ == "__main__":
main()
| d4g10ur0s/InformationRetrieval_21_22 | save_books.py | save_books.py | py | 627 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "csv.DictReader",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "elasticsearch.helpers.bulk",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": ... |
21546390354 | import re
from collections import Counter, defaultdict
from itertools import combinations
from typing import Dict, List, Tuple, Set
import numpy as np
from helper import load_input
def create_input():
'''Extract puzzle input and transform'''
# creates pattern for extracting replcements
pattern = r"(\w+) => (\w+)"
# splits puzzle input into replacements and molecule
replacements, molecule = load_input(day=19).read().strip("\n").split("\n\n")
# regex and init empty dict of lists
matches = re.findall(pattern, replacements)
replacements_dict = defaultdict(list)
# converts the replacements into dictionary of lists
for match in matches:
replacements_dict[match[0]].append(match[1])
return replacements_dict, molecule
def insert_replacements(start: str, end: str, replacements: List[str]) -> List[str]:
'''
Given start & end of molecule and a list of replacements,
incrementally inserts replacements between start and end to create new molecules.
Returns this as a list.
'''
return [
start + replacement + end
for replacement in replacements
]
def generate_molecules(replacements_dict: Dict[str, List[str]], molecule: str) -> Set[str]:
'''
Given the replacements and starting molecule,
generates all the possible molecules after replacement,
and returns as a set.
'''
# Prep storage for generated molecules
generated_molecules = set()
# loop through each element in starting molecule
for i, element in enumerate(molecule):
# extract replacements if a match
replacement1 = replacements_dict.get(element, None)
replacement2 = replacements_dict.get(molecule[i:i+2], None)
# slice the correct end of molecule, dependent on length
if replacement1:
end = molecule[i+1:]
elif replacement2:
end = molecule[i+2:]
else:
continue
# Updates the generated molecules set with new molecules after replacement
generated_molecules.update(insert_replacements(
start = molecule[:i],
end = end,
replacements = replacement1 or replacement2)
)
return generated_molecules
def part1():
'''
How many distinct molecules can be created
after all the different ways you can do one replacement on the medicine molecule
'''
replacements_dict, molecule = create_input()
return len(generate_molecules(replacements_dict, molecule))
def part2():
...
if __name__ == '__main__':
print(part1())
print(part2()) | rick-62/advent-of-code | advent_of_code_2015/solutions/day19.py | day19.py | py | 2,647 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "helper.load_input",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "typing.List",
... |
38649682103 | import http
import requests
import telegram
from flask import Blueprint, Response, request
from sqlalchemy_utils import create_database, database_exists
from config import BUILD_NUMBER, DATABASE_URL, REBASE_URL, VERSION
from .bot import dispatcher
from .db import db, test_db
from .utils import log
routes = Blueprint('routes', __name__, url_prefix='/')
@routes.get('/health')
def health_check() -> Response:
try:
if not database_exists(DATABASE_URL):
create_database(DATABASE_URL)
db.create_all()
except Exception as exc:
log.exception('Health checking database... %s: %s', 'ERR', exc)
return {
'bot': 'up' if dispatcher is not None else 'down',
'version': f'{VERSION}-{BUILD_NUMBER}',
'db': 'up' if test_db() else 'down',
}, http.HTTPStatus.OK
@routes.get('/rebase')
def reset() -> Response:
if REBASE_URL is None:
return { 'error': 'No rebase URL provided' }, http.HTTPStatus.INTERNAL_SERVER_ERROR
return requests.get(
f'https://api.telegram.org/bot{dispatcher.bot.token}/setWebhook?url={REBASE_URL}'
).content
@routes.post('/')
def index() -> Response:
if dispatcher is None:
return 'Bot is inactive', http.HTTPStatus.INTERNAL_SERVER_ERROR
update = telegram.Update.de_json(request.get_json(force=True), dispatcher.bot)
dispatcher.process_update(update)
return '', http.HTTPStatus.NO_CONTENT
| andrewscwei/python-telegram-bot-starter-kit | app/routes.py | routes.py | py | 1,370 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "flask.Blueprint",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sqlalchemy_utils.database_exists",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "config.DATABASE_URL",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name... |
16098965612 | from django.urls import path
from card import views
urlpatterns = [
path('create/', views.CreateFlashCardView.as_view(), name="create-flash-card"),
path('update/<id>/', views.UpdateFlashCardView.as_view(), name="update-flash-card"),
path('dalete/<id>/', views.DeleteFlashCardView.as_view(), name="delete-flash-card"),
path('list/<user_id>/', views.ListFlashCardView.as_view(), name="list-user-flash-card"),
]
| leonardo0231/flash-card | card/urls.py | urls.py | py | 428 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "card.views.CreateFlashCardView.as_view",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "card.views.CreateFlashCardView",
"line_number": 6,
"usage_type": "attribute"
},
... |
39697340199 | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits import mplot3d
# plots intensity time series for MDRE model
def plotIntensity ():
# index boundaries for time 3D plot
nStart = 0
nEnd = 10000
with open("time_series.txt", "r") as file:
lines = file.readlines()
time = []
intensity = []
rho_GS_e_act = []
rho_GS_h_act = []
rho_GS_e_inact = []
rho_GS_h_inact = []
rho_ES_e = []
rho_ES_h = []
E_real = []
E_imag = []
for line in lines:
time.append(float((line.split(' ')[0])))
intensity.append(float((line.split(' ')[1])))
E_real.append(float((line.split(' ')[2])))
E_imag.append(float((line.split(' ')[3])))
rho_GS_e_act.append(float((line.split(' ')[6])))
rho_GS_h_act.append(float((line.split(' ')[7])))
rho_GS_e_inact.append(float((line.split(' ')[8])))
rho_GS_h_inact.append(float((line.split(' ')[9])))
rho_ES_e.append(float((line.split(' ')[10])))
rho_ES_h.append(float((line.split(' ')[11])))
time = np.array(time)
intensity = np.array(intensity)
E_real = np.array(E_real)
E_imag = np.array(E_imag)
rho_GS_e_act = np.array(rho_GS_e_act)
rho_GS_h_act = np.array(rho_GS_h_act)
rho_GS_e_inact = np.array(rho_GS_e_inact)
rho_GS_h_inact = np.array(rho_GS_h_inact)
rho_ES_e = np.array(rho_ES_e)
rho_ES_h = np.array(rho_ES_h)
# calculation of inversion
inversion_GS_act = rho_GS_e_act + rho_GS_h_act - 1.0
inversion_GS_inact = rho_GS_e_inact + rho_GS_h_inact - 1.0
inversion_ES = rho_ES_e + rho_ES_h - 1.0
fig, (ax1, ax2) = plt.subplots(1, 2) #sharey=True
ax12 = ax1.twinx()
fig.set_size_inches(5.9, 3.2)
plt.rcParams.update({"font.size": 9})
fig.subplots_adjust(wspace=0.7, top=0.99, bottom=0.22, left=0.08, right=0.99)
fig.text(0.005, 0.93, "a)")
ax1.plot(time[nStart:nEnd], intensity[nStart:nEnd], color="crimson")
ax1.set_xlabel(r"time $t$ / ns", size=9.0)
ax1.set_ylabel(r"intensity $|E|^2$", color="crimson", size=9.0)
ax1.set_ylim(np.min(intensity) - 0.1, np.max(intensity) + 0.3)
ax1.set_xticks([0.0, 5.0, 10.0])
ax1.set_yticks([0.0, 1.0, 2.0, 3.0])
ax1.tick_params(axis="x", labelsize=9.0)
ax1.tick_params(axis="y", colors="crimson", labelsize=9.0)
ax1.set_zorder(1)
ax1.set_facecolor("none")
ax12.plot(time[nStart:nEnd], inversion_GS_act[nStart:nEnd], color="orange", label="GS act")
ax12.plot(time[nStart:nEnd], inversion_GS_inact[nStart:nEnd], color="gray", linestyle="--", label="GS inact")
ax12.plot(time[nStart:nEnd], inversion_ES[nStart:nEnd], color="cornflowerblue", label="ES")
ax12.set_ylabel(r"population inversion" + "\n" + r"$\rho_{m,e}^{(in)act} + \rho_{m,h}^{(in)act} - 1$", size=9.0)
ax12.set_ylim(-1.075, 1.075)
ax12.set_yticks([-1.0, 0.0, 1.0])
ax12.tick_params(axis="y", labelsize=9.0)
ax12.set_zorder(2)
ax12.legend(bbox_to_anchor=(0.44, 0.33))
# ~ fig, ax = plt.subplots()
# ~ fig.set_size_inches(5.9, 4.8)
# ~ fig.subplots_adjust(top=0.99, bottom=0.15, left=0.10, right=0.99)
fig.text(0.575, 0.93, "b)")
ax2.plot(inversion_GS_act, intensity, color="orange", label="GS act")
ax2.plot(inversion_GS_inact, intensity, color="gray", linestyle="--", label="GS inact")
ax2.plot(inversion_ES, intensity, color="cornflowerblue", label="ES")
ax2.set_xlabel(r"population inversion" + "\n" + r"$\rho_{m,e}^{(in)act} + \rho_{m,h}^{(in)act} - 1$", size=9.0)
ax2.set_ylabel(r"intensity $|E|^2$", color="crimson", size=9.0)
ax2.set_xlim(-1.075, 1.075)
ax2.set_ylim(-0.15, 3.15)
ax2.set_xticks([-1.0, 0.0, 1.0])
ax2.set_yticks([0.0, 1.0, 2.0, 3.0])
ax2.tick_params(axis="x", labelsize=9.0)
ax2.tick_params(axis="y", colors="crimson", labelsize=9.0)
ax2.grid(color="lightgray")
ax2.legend(loc="upper left")
plt.show()
plotIntensity()
| sir-aak/microscopically-derived-rate-equations | plotscripts/mdre_plotscript_intensity_inversion.py | mdre_plotscript_intensity_inversion.py | py | 3,810 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.array",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
17324365412 | from motor import motor_asyncio
from .model import Guild
import os
class Database:
def __init__(self, *, letty):
self.letty = letty
self.connection = motor_asyncio.AsyncIOMotorClient(os.environ['DB_URL'])
self.db = db = self.connection[os.environ['DB_NAME']]
self.guild = db.guilds
async def get_guild(self, guild_id):
data = await self.guild.find_one({"_id": guild_id})
if data != None:
return Guild(data, self.guild)
else:
return await self.register_guild(guild_id)
async def register_guild(self, guild_id):
data = {
"_id": guild_id,
"config":{"prefix":"lt.","language":"pt_BR"},
"disable":{"command":[],"channel":[],"role":[],"member":[]}
}
await self.guild.insert_one(data)
return Guild(data, self.guild) | WhyNoLetty/Letty | database/base.py | base.py | py | 890 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "motor.motor_asyncio.AsyncIOMotorClient",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "motor.motor_asyncio",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name"... |
15152787587 | # -*- coding: utf-8 -*
#该程序用于模型测试
import os
import torch
import numpy as np
import torch.nn as nn
from evaluation import HKOEvaluation
from ium_data.bj_iterator import BJIterator
if __name__ == "__main__":
#最佳的模型
test_model = torch.load('./checkpoints/trained_model_12000.pkl' )
test_model.eval()
test_bj_iter = BJIterator(datetime_set="bj_test_set.txt",sample_mode="sequent",
seq_len=15,width=600,height=600,
begin_idx=None, end_idx=None)
for i in range(10):
frame_data, mask_dat, datetime_batch, _ = test_bj_iter.sample(batch_size=2)
frame_data = torch.from_numpy(frame_data)
frame_data = frame_data.permute(1, 2, 0, 3, 4).contiguous()
test_input = frame_data[:, :, 0:5, :, :].cuda()
test_label = frame_data[:, :, 5:15, :, :].cuda()
#通过5帧预测之后的10帧,即预测后面一小时
output1 = test_model(test_input)
output2 = test_model(output1)
output = torch.cat((output1,output2),2)
test_label = test_label * 80
output = output * 80
print('testing dataset {}'.format(i))
#计算评价指标
evaluation = HKOEvaluation(seq_len=10, use_central=False)
test_label = test_label.cpu().detach().numpy().transpose(2, 0, 1, 3, 4)
output = output.cpu().detach().numpy().transpose(2, 0, 1, 3, 4)
evaluation.update(test_label, output, mask=None)
POD, CSI, FAR = evaluation.calculate_stat()
#将结果写进txt文件
evaluation.print_stat_readable()
evaluation.save_txt_readable('./results/test_evaluation.txt')
| LiangHe77/UNet_v1 | test.py | test.py | py | 1,817 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "ium_data.bj_iterator.BJIterator",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.from_numpy",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.cat... |
29821357591 | import docker
class MicroDockerClient:
def __init__(self, micro_configuration):
self.client = docker.from_env()
self.config = micro_configuration
def pull(self):
self.client.images.pull(self.config.image_name)
def run(self):
self.client.containers.run(
self.config.image_name,
ports={F'{self.config.container_port}/tcp':str(self.config.exposed_port)},
name=self.config.name,
detach=True)
def delete(self):
try:
ctr = self.client.containers.list(filters={'name':self.config.name})[0]
ctr.kill()
ctr.remove()
except Exception :
print("No ctr to delete") | alichamouda/micro-cd | micro_docker_client.py | micro_docker_client.py | py | 713 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "docker.from_env",
"line_number": 5,
"usage_type": "call"
}
] |
13020029275 | import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import ensemble
def data_accuracy(predictions, real):
"""
Check the accuracy of the estimated prices
"""
# This will be a list, the ith element of this list will be abs(prediction[i] - real[i])/real[i]
differences = list(map(lambda x: abs(x[0] - x[1]) / x[1], zip(predictions, real)))
# Find the value for the bottom t percentile and the top t percentile
f = 0
t = 90
percentiles = np.percentile(differences, [f, t])
differences_filter = []
for diff in differences:
# Keep only values in between f and t percentile
if percentiles[0] < diff < percentiles[1]:
differences_filter.append(diff)
print(f"Differences excluding outliers: {np.average(differences_filter)}")
# clf = ensemble.GradientBoostingRegressor(n_estimators = 1100, max_depth = 15, min_samples_split = 9,learning_rate = 0.5, loss = 'squared_error')
# clf = ensemble.GradientBoostingRegressor(n_estimators = 1000, max_depth = 15, min_samples_split = 9, learning_rate = 0.2, loss = 'squared_error')
clf = ensemble.GradientBoostingRegressor(n_estimators = 600, max_depth = 7, min_samples_split = 5, learning_rate = 0.7, loss = 'squared_error')
data = pd.read_csv("PROJECTS/house-prices/HousePriceDataTRAINING.csv")
data.columns = ["long", "lat", "date", "price", "bed"]
# conv_dates = [0 if ("2011" in values or "2012" in values or "2013" in values or "2014" in values or "2015" in values or "2016" in values) else 1 for values in data.date ]
conv_dates = []
for i in range(data.date.size):
conv_dates.append(abs(int(data.at[i, "date"].split("/")[0]) + int(data.at[i, "date"].split("/")[1])*31 + int(data.at[i, "date"].split("/")[2])*366 - 737691))
data['date'] = conv_dates
labels = data['price']
train1 = data.drop('price', axis=1)
x_train, x_test, y_train, y_test = train_test_split(
train1, labels, test_size=0.10)
# y_train = list(map(lambda p: np.log2(p), y_train))
clf.fit(x_train, y_train)
# x_pred = list(map(lambda p: 2**p, clf.predict(x_test)))
x_pred = clf.predict(x_test)
# print(clf.get_params())
print(data_accuracy(y_test, x_pred)) | V1K1NGbg/House-Price-Prediction-Project | testing.py | testing.py | py | 2,216 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "numpy.percentile",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.average",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.GradientBoostingRegressor",
"line_number": 29,
"usage_type": "call"
},
{
"api_nam... |
34652323206 | # Subgroup enumeration for cyclic, dicyclic, and tricyclic integer groups.
# PM Larsen, 2019
#
# The theory implemented here is described for two-dimensional groups in:
# Representing and counting the subgroups of the group Z_m x Z_n
# Mario Hampejs, Nicki Holighaus, László Tóth, and Christoph Wiesmeyr
# Journal of Numbers, vol. 2014, Article ID 491428
# http://dx.doi.org./10.1155/2014/491428
# https://arxiv.org/abs/1211.1797
#
# and for three-dimensional groups in:
# On the subgroups of finite Abelian groups of rank three
# Mario Hampejs and László Tóth
# Annales Univ. Sci. Budapest., Sect. Comp. 39 (2013), 111–124
# https://arxiv.org/abs/1304.2961
import itertools
import numpy as np
from math import gcd
def get_divisors(n):
return [i for i in range(1, n + 1) if n % i == 0]
def get_subgroup_elements(orders, H):
size = 1
for e, x in zip(np.diag(H), orders):
if e != 0:
size *= x // e
dimension = len(orders)
indices = np.zeros((size, dimension), dtype=int)
indices[:, 0] = H[0, 0] * np.arange(size)
for i, order in enumerate(orders):
if i > 0 and H[i, i] != 0:
k = np.prod(orders[:i]) // np.prod(np.diag(H)[:i])
p = np.arange(size) // k
for j in range(i + 1):
indices[:, j] += H[i, j] * p
return indices % orders
def consistent_first_rows(dimension, dm, ffilter):
for a in dm:
H = np.zeros((dimension, dimension), dtype=int)
H[0, 0] = a
if ffilter is None or ffilter(H):
yield a
def solve_linear_congruence(r, a, b, c, s, v):
for u in range(a + 1):
if (r // c * u) % a == (r * v * s // (b * c)) % a:
return u
raise Exception("u not found")
def enumerate_subgroup_bases(orders, ffilter=None,
min_index=1, max_index=float("inf")):
"""Get the subgroup bases of a cyclic/dicyclic/tricyclic integer group.
Parameters:
orders: list-like integer object
Orders of the constituent groups.
[m] if the group is a cyclic group Zm
[m, n] if the group is a dicyclic group Zm x Zn
[m, n, r] if the group is a tricyclic group Zm x Zn x Zr
ffilter: function, optional
A boolean filter function. Avoids generation of unwanted subgroups by
rejecting partial bases.
Returns iterator object yielding:
H: integer ndarray
Subgroup basis.
"""
dimension = len(orders)
assert dimension in [1, 2, 3]
if dimension == 1:
m = orders[0]
elif dimension == 2:
m, n = orders
else:
m, n, r = orders
dm = get_divisors(m)
if dimension == 1:
for d in consistent_first_rows(dimension, dm, ffilter):
group_index = m // d
if group_index >= min_index and group_index <= max_index:
yield np.array([[d]])
elif dimension == 2:
dn = get_divisors(n)
for a in consistent_first_rows(dimension, dm, ffilter):
for b in dn:
group_index = m * n // (a * b)
if group_index < min_index or group_index > max_index:
continue
for t in range(gcd(a, n // b)):
s = t * a // gcd(a, n // b)
H = np.array([[a, 0], [s, b]])
if ffilter is None or ffilter(H):
yield H
elif dimension == 3:
dn = get_divisors(n)
dr = get_divisors(r)
for a in consistent_first_rows(dimension, dm, ffilter):
for b, c in itertools.product(dn, dr):
group_index = m * n * r // (a * b * c)
if group_index < min_index or group_index > max_index:
continue
A = gcd(a, n // b)
B = gcd(b, r // c)
C = gcd(a, r // c)
ABC = A * B * C
X = ABC // gcd(a * r // c, ABC)
for t in range(A):
s = a * t // A
H = np.zeros((dimension, dimension), dtype=int)
H[0] = [a, 0, 0]
H[1] = [s, b, 0]
H[2, 2] = r
if ffilter is not None and not ffilter(H):
continue
for w in range(B * gcd(t, X) // X):
v = b * X * w // (B * gcd(t, X))
u0 = solve_linear_congruence(r, a, b, c, s, v)
for z in range(C):
u = u0 + a * z // C
H = np.array([[a, 0, 0], [s, b, 0], [u, v, c]])
if ffilter is None or ffilter(H):
yield H
def count_subgroups(orders):
"""Count the number of subgroups of a cyclic/dicyclic/tricyclic integer
group.
Parameters:
orders: list-like integer object
Orders of the constituent groups.
[m] if the group is a cyclic group Zm
[m, n] if the group is a dicyclic group Zm x Zn
[m, n, r] if the group is a tricyclic group Zm x Zn x Zr
Returns:
n: integer
Subgroup basis.
"""
def P(n):
return sum([gcd(k, n) for k in range(1, n + 1)])
dimension = len(orders)
assert dimension in [1, 2, 3]
if dimension == 1:
m = orders[0]
elif dimension == 2:
m, n = orders
else:
m, n, r = orders
dm = get_divisors(m)
if dimension == 1:
return len(dm)
elif dimension == 2:
dn = get_divisors(n)
return sum([gcd(a, b) for a in dm for b in dn])
else:
dn = get_divisors(n)
dr = get_divisors(r)
total = 0
for a, b, c in itertools.product(dm, dn, dr):
A = gcd(a, n // b)
B = gcd(b, r // c)
C = gcd(a, r // c)
ABC = A * B * C
X = ABC // gcd(a * r // c, ABC)
total += ABC // X**2 * P(X)
return total
| pmla/evgraf | evgraf/subgroup_enumeration.py | subgroup_enumeration.py | py | 6,050 | python | en | code | 13 | github-code | 6 | [
{
"api_name": "numpy.diag",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.prod",
"line_number": 3... |
33225197622 | # -*- coding: utf-8 -*-
""" #+begin_org
* *[Summary]* :: A =CmndLib= for providing currents configuration to CS-s.
#+end_org """
####+BEGIN: b:py3:cs:file/dblockControls :classification "cs-u"
""" #+begin_org
* [[elisp:(org-cycle)][| /Control Parameters Of This File/ |]] :: dblk ctrls classifications=cs-u
#+BEGIN_SRC emacs-lisp
(setq-local b:dblockControls t) ; (setq-local b:dblockControls nil)
(put 'b:dblockControls 'py3:cs:Classification "cs-u") ; one of cs-mu, cs-u, cs-lib, bpf-lib, pyLibPure
#+END_SRC
#+RESULTS:
: cs-u
#+end_org """
####+END:
####+BEGIN: b:prog:file/proclamations :outLevel 1
""" #+begin_org
* *[[elisp:(org-cycle)][| Proclamations |]]* :: Libre-Halaal Software --- Part Of BISOS --- Poly-COMEEGA Format.
** This is Libre-Halaal Software. © Neda Communications, Inc. Subject to AGPL.
** It is part of BISOS (ByStar Internet Services OS)
** Best read and edited with Blee in Poly-COMEEGA (Polymode Colaborative Org-Mode Enhance Emacs Generalized Authorship)
#+end_org """
####+END:
####+BEGIN: b:prog:file/particulars :authors ("./inserts/authors-mb.org")
""" #+begin_org
* *[[elisp:(org-cycle)][| Particulars |]]* :: Authors, version
** This File: /bisos/git/auth/bxRepos/bisos-pip/currents/py3/bisos/currents/currentsConfig.py
** Authors: Mohsen BANAN, http://mohsen.banan.1.byname.net/contact
#+end_org """
####+END:
####+BEGIN: b:python:file/particulars-csInfo :status "inUse"
""" #+begin_org
* *[[elisp:(org-cycle)][| Particulars-csInfo |]]*
#+end_org """
import typing
csInfo: typing.Dict[str, typing.Any] = { 'moduleName': ['currentsConfig'], }
csInfo['version'] = '202209290819'
csInfo['status'] = 'inUse'
csInfo['panel'] = 'currentsConfig-Panel.org'
csInfo['groupingType'] = 'IcmGroupingType-pkged'
csInfo['cmndParts'] = 'IcmCmndParts[common] IcmCmndParts[param]'
####+END:
""" #+begin_org
* /[[elisp:(org-cycle)][| Description |]]/ :: [[file:/bisos/git/auth/bxRepos/blee-binders/bisos-core/COMEEGA/_nodeBase_/fullUsagePanel-en.org][BISOS COMEEGA Panel]]
Module description comes here.
** Relevant Panels:
** Status: In use with blee3
** /[[elisp:(org-cycle)][| Planned Improvements |]]/ :
*** TODO complete fileName in particulars.
#+end_org """
####+BEGIN: b:prog:file/orgTopControls :outLevel 1
""" #+begin_org
* [[elisp:(org-cycle)][| Controls |]] :: [[elisp:(delete-other-windows)][(1)]] | [[elisp:(show-all)][Show-All]] [[elisp:(org-shifttab)][Overview]] [[elisp:(progn (org-shifttab) (org-content))][Content]] | [[elisp:(blee:ppmm:org-mode-toggle)][Nat]] | [[elisp:(bx:org:run-me)][Run]] | [[elisp:(bx:org:run-me-eml)][RunEml]] | [[elisp:(progn (save-buffer) (kill-buffer))][S&Q]] [[elisp:(save-buffer)][Save]] [[elisp:(kill-buffer)][Quit]] [[elisp:(org-cycle)][| ]]
** /Version Control/ :: [[elisp:(call-interactively (quote cvs-update))][cvs-update]] [[elisp:(vc-update)][vc-update]] | [[elisp:(bx:org:agenda:this-file-otherWin)][Agenda-List]] [[elisp:(bx:org:todo:this-file-otherWin)][ToDo-List]]
#+end_org """
####+END:
####+BEGIN: b:python:file/workbench :outLevel 1
""" #+begin_org
* [[elisp:(org-cycle)][| Workbench |]] :: [[elisp:(python-check (format "/bisos/venv/py3/bisos3/bin/python -m pyclbr %s" (bx:buf-fname))))][pyclbr]] || [[elisp:(python-check (format "/bisos/venv/py3/bisos3/bin/python -m pydoc ./%s" (bx:buf-fname))))][pydoc]] || [[elisp:(python-check (format "/bisos/pipx/bin/pyflakes %s" (bx:buf-fname)))][pyflakes]] | [[elisp:(python-check (format "/bisos/pipx/bin/pychecker %s" (bx:buf-fname))))][pychecker (executes)]] | [[elisp:(python-check (format "/bisos/pipx/bin/pycodestyle %s" (bx:buf-fname))))][pycodestyle]] | [[elisp:(python-check (format "/bisos/pipx/bin/flake8 %s" (bx:buf-fname))))][flake8]] | [[elisp:(python-check (format "/bisos/pipx/bin/pylint %s" (bx:buf-fname))))][pylint]] [[elisp:(org-cycle)][| ]]
#+end_org """
####+END:
####+BEGIN: b:py3:cs:orgItem/basic :type "=PyImports= " :title "*Py Library IMPORTS*" :comment "-- with classification based framework/imports"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* =PyImports= [[elisp:(outline-show-subtree+toggle)][||]] *Py Library IMPORTS* -- with classification based framework/imports [[elisp:(org-cycle)][| ]]
#+end_org """
####+END:
####+BEGIN: b:py3:cs:framework/imports :basedOn "classification"
""" #+begin_org
** Imports Based On Classification=cs-u
#+end_org """
from bisos import b
from bisos.b import cs
from bisos.b import b_io
import collections
####+END:
import os
import collections
#import enum
import shutil
import sys
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "Obtain Package Bases" :extraInfo ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *Obtain Package Bases:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: b:py3:cs:func/typing :funcName "configBaseDir_obtain" :deco "track"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-T- [[elisp:(outline-show-subtree+toggle)][||]] /configBaseDir_obtain/ deco=track [[elisp:(org-cycle)][| ]]
#+end_org """
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def configBaseDir_obtain(
####+END:
) -> str:
""" #+begin_org
** [[elisp:(org-cycle)][| *DocStr | ]
#+end_org """
outcome = b.subProc.WOpW(invedBy=None, log=0).bash(
f"""usgBpos.sh -i usgBpos_usageEnvs_fullUse_bxoPath""")
if outcome.isProblematic():
b_io.eh.badOutcome(outcome)
return ""
retVal = outcome.stdout.rstrip('\n')
return retVal
####+BEGIN: bx:cs:python:func :funcName "configUsgCursBaseDir_obtain" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /configUsgCursBaseDir_obtain/ retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def configUsgCursBaseDir_obtain(
configBaseDir,
):
####+END:
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
return os.path.abspath(os.path.join(configBaseDir, "control/currents"))
####+BEGIN: bx:cs:python:func :funcName "configUsgCursFpBaseDir_obtain" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /configUsgCursFpBaseDir_obtain/ retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def configUsgCursFpBaseDir_obtain(
configBaseDir,
):
####+END:
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
return os.path.abspath(os.path.join(configBaseDir,"control/currents/fp"))
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "File Parameters Obtain"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *File Parameters Obtain:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: bx:cs:python:func :funcName "bxoId_fpObtain" :comment "Configuration Parameter" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /bxoId_fpObtain/ =Configuration Parameter= retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def bxoId_fpObtain(
configBaseDir,
):
####+END:
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
return(
b.fp.FileParamValueReadFrom(
parRoot= os.path.abspath("{}/usgCurs/fp".format(configBaseDir)),
parName="bxoId")
)
####+BEGIN: bx:cs:python:func :funcName "sr_fpObtain" :comment "Configuration Parameter" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /sr_fpObtain/ =Configuration Parameter= retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def sr_fpObtain(
configBaseDir,
):
####+END:
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
return(
b.fp.FileParamValueReadFrom(
parRoot= os.path.abspath("{}/usgCurs/fp".format(configBaseDir)),
parName="sr")
)
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "Common Command Parameter Specification"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *Common Command Parameter Specification:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: bx:cs:python:func :funcName "commonParamsSpecify" :funcType "void" :retType "bool" :deco "" :argsList "csParams"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-void [[elisp:(outline-show-subtree+toggle)][||]] /commonParamsSpecify/ retType=bool argsList=(csParams) [[elisp:(org-cycle)][| ]]
#+end_org """
def commonParamsSpecify(
csParams,
):
####+END:
csParams.parDictAdd(
parName='configBaseDir',
parDescription="Root Of usgCurs/fp from which file parameters will be read",
parDataType=None,
parDefault=None,
parChoices=["any"],
# parScope=cs.CmndParamScope.TargetParam,
argparseShortOpt=None,
argparseLongOpt='--configBaseDir',
)
csParams.parDictAdd(
parName='bxoId',
parDescription="BISOS Default UserName",
parDataType=None,
parDefault=None,
parChoices=["any"],
# parScope=cs.CmndParamScope.TargetParam,
argparseShortOpt=None,
argparseLongOpt='--bxoId',
)
csParams.parDictAdd(
parName='sr',
parDescription="BISOS Default GroupName",
parDataType=None,
parDefault=None,
parChoices=["any"],
# parScope=cs.CmndParamScope.TargetParam,
argparseShortOpt=None,
argparseLongOpt='--sr',
)
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "Common Command Examples Sections"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *Common Command Examples Sections:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: bx:cs:python:func :funcName "examples_usgCursParsFull" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "configBaseDir"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /examples_usgCursParsFull/ retType=bool argsList=(configBaseDir) [[elisp:(org-cycle)][| ]]
#+end_org """
def examples_usgCursParsFull(
configBaseDir,
):
####+END:
"""
** Auxiliary examples to be commonly used.
"""
def cpsInit(): return collections.OrderedDict()
def menuItem(verbosity): cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity=verbosity,
comment='none', icmWrapper=None, icmName=None) # verbosity: 'little' 'basic' 'none'
def execLineEx(cmndStr): cs.examples.execInsert(execLine=cmndStr)
cs.examples.menuChapter(' =FP Values= *usgCurs Clear InfoBase --- Deletes All FPs*')
cmndName = "usgCursParsDelete" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['configBaseDir'] = configBaseDir
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsDelete" ; cmndArgs = "" ; cps=cpsInit(); menuItem(verbosity='none')
cmndName = "usgCursParsDelete" ; cmndArgs = "anyName" ;
cps = collections.OrderedDict() ;
cs.examples.cmndInsert(cmndName, cps, cmndArgs, icmWrapper="echo", verbosity='little')
cs.examples.menuChapter(' =FP Values= *usgCurs Get Parameters*')
cmndName = "usgCursParsGet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['configBaseDir'] = configBaseDir
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsGet" ; cmndArgs = "" ; cps=cpsInit(); menuItem(verbosity='none')
cs.examples.menuChapter(' =FP Values= *UsgCurs Defaults ParsSet --*')
cmndName = "usgCursParsDefaultsSet" ; cmndArgs = "bxoPolicy /" ;
cpsInit(); menuItem('none')
cmndName = "usgCursParsDefaultsSet" ; cmndArgs = "bxoPolicy /tmp" ;
cpsInit(); menuItem('none')
cs.examples.menuChapter(' =FP Values= *UsgCurs ParsSet -- Set Parameters Explicitly*')
cmndName = "usgCursParsSet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['bxoId'] = "mcm"
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['bxoId'] = "ea-59043"
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['sr'] = "marme/dsnProc"
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "" ;
cps = collections.OrderedDict() ; cps['sr'] = "apache2/plone3"
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
# cmndName = "usgCursParsSet" ; cmndArgs = "" ;
# cps = collections.OrderedDict() ; cps['configBaseDir'] = configBaseDir ; cps['platformControlBaseDir'] = "${HOME}/bisosControl"
# cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "anyName=anyValue" ;
cps = collections.OrderedDict() ;
cs.examples.cmndInsert(cmndName, cps, cmndArgs, verbosity='little')
cmndName = "usgCursParsSet" ; cmndArgs = "anyName=anyValue" ;
cps = collections.OrderedDict() ;
cs.examples.cmndInsert(cmndName, cps, cmndArgs, icmWrapper="echo", verbosity='little')
####+BEGIN: blee:bxPanel:foldingSection :outLevel 1 :title "File Parameters Get/Set -- Commands"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* [[elisp:(outline-show-subtree+toggle)][| *File Parameters Get/Set -- Commands:* |]] [[elisp:(org-shifttab)][<)]] E|
#+end_org """
####+END:
####+BEGIN: bx:cs:python:func :funcName "FP_readTreeAtBaseDir_CmndOutput" :funcType "anyOrNone" :retType "bool" :deco "" :argsList "interactive fpBaseDir cmndOutcome"
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /FP_readTreeAtBaseDir_CmndOutput/ retType=bool argsList=(interactive fpBaseDir cmndOutcome) [[elisp:(org-cycle)][| ]]
#+end_org """
def FP_readTreeAtBaseDir_CmndOutput(
interactive,
fpBaseDir,
cmndOutcome,
):
####+END:
"""Invokes FP_readTreeAtBaseDir.cmnd as interactive-output only."""
#
# Interactive-Output + Chained-Outcome Command Invokation
#
FP_readTreeAtBaseDir = icm.FP_readTreeAtBaseDir()
FP_readTreeAtBaseDir.cmndLineInputOverRide = True
FP_readTreeAtBaseDir.cmndOutcome = cmndOutcome
return FP_readTreeAtBaseDir.cmnd(
interactive=interactive,
FPsDir=fpBaseDir,
)
####+BEGIN: b:py3:cs:cmnd/classHead :cmndName "usgCursParsDelete" :comment "" :parsMand "" :parsOpt "configBaseDir" :argsMin 0 :argsMax 9999 :pyInv ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* CmndSvc- [[elisp:(outline-show-subtree+toggle)][||]] <<usgCursParsDelete>> =verify= parsOpt=configBaseDir argsMax=9999 ro=cli [[elisp:(org-cycle)][| ]]
#+end_org """
class usgCursParsDelete(cs.Cmnd):
cmndParamsMandatory = [ ]
cmndParamsOptional = [ 'configBaseDir', ]
cmndArgsLen = {'Min': 0, 'Max': 9999,}
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmnd(self,
rtInv: cs.RtInvoker,
cmndOutcome: b.op.Outcome,
configBaseDir: typing.Optional[str]=None, # Cs Optional Param
argsList: typing.Optional[list[str]]=None, # CsArgs
) -> b.op.Outcome:
callParamsDict = {'configBaseDir': configBaseDir, }
if self.invocationValidate(rtInv, cmndOutcome, callParamsDict, argsList).isProblematic():
return b_io.eh.badOutcome(cmndOutcome)
cmndArgsSpecDict = self.cmndArgsSpec()
####+END:
self.cmndDocStr(f""" #+begin_org
** [[elisp:(org-cycle)][| *CmndDesc:* | ]] Remove The entire infoBaseDir
#+end_org """)
if not configBaseDir:
configBaseDir = configUsgCursFpBaseDir_obtain(None)
cmndArgs = self.cmndArgsGet("0&-1", cmndArgsSpecDict, argsList)
if len(cmndArgs) == 0:
try:
shutil.rmtree(configBaseDir)
except OSError as e:
print(f"Error: {configBaseDir} : {e.strerror}")
b.dir.createIfNotThere(configBaseDir)
else:
for each in cmndArgs:
parNameFullPath = os.path.join(
configBaseDir,
each
)
try:
shutil.rmtree(parNameFullPath)
except OSError as e:
print(f"Error: {parNameFullPath} : {e.strerror}")
return cmndOutcome
####+BEGIN: b:py3:cs:method/args :methodName "cmndArgsSpec" :methodType "anyOrNone" :retType "bool" :deco "default" :argsList "self"
""" #+begin_org
** _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* Mtd-T-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /cmndArgsSpec/ deco=default deco=default [[elisp:(org-cycle)][| ]]
#+end_org """
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmndArgsSpec(self, ):
####+END:
"""
***** Cmnd Args Specification
"""
cmndArgsSpecDict = cs.CmndArgsSpecDict()
cmndArgsSpecDict.argsDictAdd(
argPosition="0&-1",
argName="cmndArgs",
argDefault=None,
argChoices='any',
argDescription="A sequence of parNames"
)
return cmndArgsSpecDict
####+BEGIN: b:py3:cs:func/typing :funcName "curParsGetAsDictValue_wOp" :funcType "WOp" :retType "extTyped" :deco "" :argsList ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-T-WOp [[elisp:(outline-show-subtree+toggle)][||]] /curParsGetAsDictValue_wOp/ [[elisp:(org-cycle)][| ]]
#+end_org """
def curParsGetAsDictValue_wOp(
####+END:
parNamesList: list,
outcome: b.op.Outcome = None,
) -> b.op.Outcome:
""" #+begin_org
** [[elisp:(org-cycle)][| *DocStr | ] A Wrapped Operation with results being a dictionary of values.
if not ~parNamesList~, get all the values.
*** TODO --- NOTYET This needs to be moved to
#+end_org """
configBaseDir = configUsgCursFpBaseDir_obtain(None)
return (
FP_parsGetAsDictValue_wOp(parNamesList, configBaseDir, outcome)
)
####+BEGIN: b:py3:cs:func/typing :funcName "FP_parsGetAsDictValue_wOp" :funcType "wOp" :retType "OpOutcome" :deco "" :argsList ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* F-T-wOp [[elisp:(outline-show-subtree+toggle)][||]] /FP_parsGetAsDictValue_wOp/ [[elisp:(org-cycle)][| ]]
#+end_org """
def FP_parsGetAsDictValue_wOp(
####+END:
parNamesList: list,
configBaseDir,
outcome: b.op.Outcome = None,
) -> b.op.Outcome:
""" #+begin_org
** [[elisp:(org-cycle)][| *DocStr | ] A Wrapped Operation with results being a dictionary of values.
if not ~parNamesList~, get all the values.
*** TODO --- NOTYET This needs to be moved to
#+end_org """
return b.fp.parsGetAsDictValue_wOp(parNamesList, configBaseDir, outcome=outcome)
if not outcome:
outcome = b.op.Outcome()
FP_readTreeAtBaseDir_CmndOutput(
interactive=False,
fpBaseDir=configBaseDir,
cmndOutcome=outcome,
)
results = outcome.results
opResults = dict()
opErrors = ""
if parNamesList:
for each in parNamesList:
# NOTYET, If no results[each], we need to record it in opErrors
opResults[each] = results[each].parValueGet()
#print(f"{each} {eachFpValue}")
else:
for eachFpName in results:
opResults[eachFpName] = results[eachFpName].parValueGet()
#print(f"{eachFpName} {eachFpValue}")
return outcome.set(
opError=b.OpError.Success,
opResults=opResults,
)
####+BEGIN: b:py3:cs:cmnd/classHead :cmndName "usgCursParsGetK2" :comment "" :parsMand "" :parsOpt "configBaseDir" :argsMin 0 :argsMax 9999 :pyInv ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* CmndSvc- [[elisp:(outline-show-subtree+toggle)][||]] <<usgCursParsGetK2>> =verify= parsOpt=configBaseDir argsMax=9999 ro=cli [[elisp:(org-cycle)][| ]]
#+end_org """
class usgCursParsGetK2(cs.Cmnd):
cmndParamsMandatory = [ ]
cmndParamsOptional = [ 'configBaseDir', ]
cmndArgsLen = {'Min': 0, 'Max': 9999,}
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmnd(self,
rtInv: cs.RtInvoker,
cmndOutcome: b.op.Outcome,
configBaseDir: typing.Optional[str]=None, # Cs Optional Param
argsList: typing.Optional[list[str]]=None, # CsArgs
) -> b.op.Outcome:
callParamsDict = {'configBaseDir': configBaseDir, }
if self.invocationValidate(rtInv, cmndOutcome, callParamsDict, argsList).isProblematic():
return b_io.eh.badOutcome(cmndOutcome)
cmndArgsSpecDict = self.cmndArgsSpec()
####+END:
self.cmndDocStr(f""" #+begin_org
** [[elisp:(org-cycle)][| *CmndDesc:* | ]] it reads from ../usgCurs/fp.
#+end_org """)
if not configBaseDir:
configBaseDir = configUsgCursFpBaseDir_obtain(None)
cmndArgs = self.cmndArgsGet("0&-1", cmndArgsSpecDict, argsList)
# FP_readTreeAtBaseDir_CmndOutput(
# interactive=False,
# fpBaseDir=configBaseDir,
# cmndOutcome=cmndOutcome,
# )
b.fp.readTreeAtBaseDir_wOp(configBaseDir, cmndOutcome=cmndOutcome)
results = cmndOutcome.results
if len(cmndArgs) == 0:
for eachFpName in results:
eachFpValue = results[eachFpName].parValueGet()
print(f"{eachFpName} {eachFpValue}")
else:
for each in cmndArgs:
eachFpValue = results[each].parValueGet()
print(f"{each} {eachFpValue}")
return cmndOutcome
####+BEGIN: b:py3:cs:cmnd/classHead :cmndName "usgCursParsGet" :comment "" :parsMand "" :parsOpt "configBaseDir" :argsMin 0 :argsMax 9999 :pyInv ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* CmndSvc- [[elisp:(outline-show-subtree+toggle)][||]] <<usgCursParsGet>> =verify= parsOpt=configBaseDir argsMax=9999 ro=cli [[elisp:(org-cycle)][| ]]
#+end_org """
class usgCursParsGet(cs.Cmnd):
cmndParamsMandatory = [ ]
cmndParamsOptional = [ 'configBaseDir', ]
cmndArgsLen = {'Min': 0, 'Max': 9999,}
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmnd(self,
rtInv: cs.RtInvoker,
cmndOutcome: b.op.Outcome,
configBaseDir: typing.Optional[str]=None, # Cs Optional Param
argsList: typing.Optional[list[str]]=None, # CsArgs
) -> b.op.Outcome:
callParamsDict = {'configBaseDir': configBaseDir, }
if self.invocationValidate(rtInv, cmndOutcome, callParamsDict, argsList).isProblematic():
return b_io.eh.badOutcome(cmndOutcome)
cmndArgsSpecDict = self.cmndArgsSpec()
####+END:
self.cmndDocStr(f""" #+begin_org
** [[elisp:(org-cycle)][| *CmndDesc:* | ]] it reads from ../usgCurs/fp.
#+end_org """)
if not configBaseDir:
configBaseDir = configUsgCursFpBaseDir_obtain(None)
cmndArgs = self.cmndArgsGet("0&-1", cmndArgsSpecDict, argsList)
curParsGetAsDictValue_wOp(cmndArgs, cmndOutcome)
results = cmndOutcome.results
if rtInv.outs:
for eachKey in results:
print(f"{eachKey}: {results[eachKey]}")
return cmndOutcome
####+BEGIN: b:py3:cs:method/args :methodName "cmndArgsSpec" :methodType "anyOrNone" :retType "bool" :deco "default" :argsList "self"
""" #+begin_org
** _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* Mtd-T-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /cmndArgsSpec/ deco=default deco=default [[elisp:(org-cycle)][| ]]
#+end_org """
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmndArgsSpec(self, ):
####+END:
"""
***** Cmnd Args Specification
"""
cmndArgsSpecDict = cs.CmndArgsSpecDict()
cmndArgsSpecDict.argsDictAdd(
argPosition="0&-1",
argName="cmndArgs",
argDefault=None,
argChoices='any',
argDescription="A sequence of parNames"
)
return cmndArgsSpecDict
####+BEGIN: b:py3:cs:cmnd/classHead :cmndName "usgCursParsSet" :comment "" :parsMand "" :parsOpt "configBaseDir bxoId sr" :argsMin 0 :argsMax 1000 :pyInv ""
""" #+begin_org
* _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* CmndSvc- [[elisp:(outline-show-subtree+toggle)][||]] <<usgCursParsSet>> =verify= parsOpt=configBaseDir bxoId sr argsMax=1000 ro=cli [[elisp:(org-cycle)][| ]]
#+end_org """
class usgCursParsSet(cs.Cmnd):
cmndParamsMandatory = [ ]
cmndParamsOptional = [ 'configBaseDir', 'bxoId', 'sr', ]
cmndArgsLen = {'Min': 0, 'Max': 1000,}
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmnd(self,
rtInv: cs.RtInvoker,
cmndOutcome: b.op.Outcome,
configBaseDir: typing.Optional[str]=None, # Cs Optional Param
bxoId: typing.Optional[str]=None, # Cs Optional Param
sr: typing.Optional[str]=None, # Cs Optional Param
argsList: typing.Optional[list[str]]=None, # CsArgs
) -> b.op.Outcome:
callParamsDict = {'configBaseDir': configBaseDir, 'bxoId': bxoId, 'sr': sr, }
if self.invocationValidate(rtInv, cmndOutcome, callParamsDict, argsList).isProblematic():
return b_io.eh.badOutcome(cmndOutcome)
cmndArgsSpecDict = self.cmndArgsSpec()
####+END:
self.cmndDocStr(f""" #+begin_org
** [[elisp:(org-cycle)][| *CmndDesc:* | ]] Args are in the form of a list of varName=varValue. Well known pars can also be set.
=configBaseDir= defaults to ~configBaseDir_obtain()~
#+end_org """)
if not configBaseDir:
configBaseDir = configBaseDir_obtain()
cmndArgs = self.cmndArgsGet("0&-1", cmndArgsSpecDict, argsList)
parNameFullPath = ""
def createPathAndFpWrite(
fpPath,
valuePath,
):
valuePath = os.path.abspath(valuePath)
try:
os.makedirs(valuePath)
except OSError:
if not os.path.isdir(valuePath):
raise
b.fp.b.fp.FileParamWriteToPath(
parNameFullPath=fpPath,
parValue=valuePath,
)
parNameFullPath = fpPath
# Any number of Name=Value can be passed as args
for each in cmndArgs:
varNameValue = each.split('=')
parNameFullPath = os.path.join(
configUsgCursFpBaseDir_obtain(configBaseDir=configBaseDir),
varNameValue[0],
)
b.fp.b.fp.FileParamWriteToPath(
parNameFullPath=parNameFullPath,
parValue=varNameValue[1],
)
if bxoId:
parNameFullPath = b.fp.b.fp.FileParamWriteToPath(
parNameFullPath=os.path.join(
configUsgCursFpBaseDir_obtain(configBaseDir=configBaseDir),
"bxoId",
),
parValue=bxoId,
)
if sr:
parNameFullPath = b.fp.b.fp.FileParamWriteToPath(
parNameFullPath=os.path.join(configUsgCursFpBaseDir_obtain(configBaseDir=configBaseDir),
"sr",
),
parValue=sr,
)
if rtInv.outs:
parValue = b.fp.FileParamValueReadFromPath(parNameFullPath)
b_io.ann.here("usgCursParsSet: {parValue} at {parNameFullPath}".
format(parValue=parValue, parNameFullPath=parNameFullPath))
return cmndOutcome.set(
opError=b.OpError.Success,
opResults=True,
)
####+BEGIN: b:py3:cs:method/args :methodName "cmndArgsSpec" :methodType "anyOrNone" :retType "bool" :deco "default" :argsList "self"
""" #+begin_org
** _[[elisp:(blee:menu-sel:outline:popupMenu)][±]]_ _[[elisp:(blee:menu-sel:navigation:popupMenu)][Ξ]]_ [[elisp:(outline-show-branches+toggle)][|=]] [[elisp:(bx:orgm:indirectBufOther)][|>]] *[[elisp:(blee:ppmm:org-mode-toggle)][|N]]* Mtd-T-anyOrNone [[elisp:(outline-show-subtree+toggle)][||]] /cmndArgsSpec/ deco=default deco=default [[elisp:(org-cycle)][| ]]
#+end_org """
@cs.track(fnLoc=True, fnEntry=True, fnExit=True)
def cmndArgsSpec(self, ):
####+END:
"""
***** Cmnd Args Specification
"""
cmndArgsSpecDict = cs.CmndArgsSpecDict()
cmndArgsSpecDict.argsDictAdd(
argPosition="0&-1",
argName="cmndArgs",
argDefault=None,
argChoices='any',
argDescription="A sequence of varName=varValue"
)
return cmndArgsSpecDict
####+BEGIN: b:prog:file/endOfFile :extraParams nil
""" #+begin_org
* *[[elisp:(org-cycle)][| END-OF-FILE |]]* :: emacs and org variables and control parameters
#+end_org """
### local variables:
### no-byte-compile: t
### end:
####+END:
| bisos-pip/currents | py3/bisos/currents/currentsConfig.py | currentsConfig.py | py | 33,875 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "typing.Any",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "bisos.b.subProc.WOpW",
"line_number": 116,
"usage_type": "call"
},
{
"api_name": "bisos.b.subPro... |
28153506484 | import json
import numpy as np
def load_json(file_path : str) -> dict:
"""
Loads .json file types.
Use json python library to load a .json file.
Parameters
----------
file_path : string
Path to file.
Returns
-------
json file : dictionary
.json dictionary file.
See Also
--------
read_GMR_file
save_json_dicts
Notes
-----
json files are typically dictionaries, as such the function is intended for
use with dictionaries stored in .json file types.
Examples
--------
my_dictionary = load_json(file_path="/Path/To/File")
"""
with open(file_path, 'r') as file:
return json.load(file)
def read_GMR_file(file_path):
'''
Load txt output from GMRX spectrometer. Return wavelength in nm.
Args:
file_path: <string> path to file
Returns:
wavelength: <array> wavelength array
intensity: <array> intensity array
'''
try:
wavelength, intensity = np.genfromtxt(
fname=file_path,
delimiter=';',
unpack=True)
except:
wavelength, intensity = np.genfromtxt(
fname=file_path,
delimiter=',',
unpack=True)
return wavelength, intensity
def convert(o):
"""
Check data type.
Check type of data string.
Parameters
----------
o : string
String to check.
Returns
-------
TypeError : Boolean
TypeError if string is not suitable.
See Also
--------
None.
Notes
-----
None.
Examples
--------
None.
"""
if isinstance(o, np.generic):
return o.item()
raise TypeError
def save_json_dicts(out_path : str,
dictionary : dict) -> None:
"""
Save .json file types.
Use json python library to save a dictionary to a .json file.
Parameters
----------
out_path : string
Path to file.
dictionary : dictionary
Dictionary to save.
Returns
-------
None
See Also
--------
load_json
Notes
-----
json files are typically dictionaries, as such the function is intended for
use with dictionaries stored in .json file types.
Examples
--------
save_json_dicts(
out_path="/Path/To/File",
dictionary=my_dictionary)
"""
with open(out_path, 'w') as outfile:
json.dump(
dictionary,
outfile,
indent=2,
default=convert)
outfile.write('\n')
def reflectometer_in(file_path : str) -> list:
"""
Loads text file output from the Filmetrics spectroscopic reflectometer.
Loads a 3 column, comma delimited, .fitnk file output from a Filmetrics F20
spectroscopic reflectometer.
Parameters
----------
file_path: string
Path to file.
Returns
-------
col0, col1, col2: list
Typically wavelength (nm), n, k.
See Also
--------
numpy genfromtxt
Notes
-----
The .fitnk file from the Filmetrics F20 contains 5 header rows and 6 footer
rows that are seemingly not useful information. The function skips over the
rows.
Examples
--------
None
"""
col0, col1, col2 = np.genfromtxt(
fname=file_path,
delimiter=',',
skip_header=5,
skip_footer=6,
unpack=True)
return col0, col1, col2
def ellipsometer_in(file_path : str) -> list:
"""
Load text file output from the J.A. Woollam VASE.
Loads a 5 column, comma delimited, .csv file output from a J.A. Woollam
variable angle spectroscopic ellipsometer.
Parameters
----------
file_path: string
Path to file.
Returns
-------
col0, col1, col2, col3, col4: list
Typically wavelength (nm), sample psi, sample delta, model psi, model
delta.
See Also
--------
numpy genfromtxt
Notes
-----
None
Example
-------
None
"""
col0, col1, col2, col3, col4 = np.genfromtxt(
fname=file_path,
delimiter=',',
skip_header=2,
usecols=(0, 1, 2, 3, 4),
unpack=True)
return col0, col1, col2, col3, col4
| jm1261/PeakFinder | src/fileIO.py | fileIO.py | py | 4,274 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "json.load",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "numpy.genfromtxt",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "numpy.generic",
"line... |
12858137004 | """
We are given a directed graph. We are given also a set of pairs of vertices.
Find the shortest distance between each pair of vertices or -1 if there is no path connecting them.
On the first line, you will get N, the number of vertices in the graph.
On the second line, you will get P, the number of pairs between which to find the shortest distance.
On the next N lines will be the edges of the graph and on the next P lines, the pairs.
"""
from collections import deque
from typing import Dict, List, Union
def build_graph(nodes: int) -> Dict[int, List[int]]:
graph = {}
for _ in range(nodes):
node, children_str = input().split(':')
node = int(node)
children = [int(x) for x in children_str.split(' ')] if children_str else []
graph[node] = children
return graph
def bfs(graph: Dict[int, List[int]], source: int, destination: int) -> Dict[int, Union[None, int]]:
queue = deque([source])
visited = {source}
parent = {source: None}
while queue:
node = queue.popleft()
if node == destination:
break
for child in graph[node]:
if child in visited:
continue
queue.append(child)
visited.add(child)
parent[child] = node
return parent
def find_size(parent: Dict[int, Union[None, int]], destination: int) -> int:
node = destination
size = -1
while node is not None:
node = parent[node]
size += 1
return size
nodes = int(input())
pairs = int(input())
graph = build_graph(nodes)
for _ in range(pairs):
source, destination = [int(x) for x in input().split('-')]
parent = bfs(graph, source, destination)
if destination not in parent:
print(f'{{{source}, {destination}}} -> -1')
continue
size = find_size(parent, destination)
print(f'{{{source}, {destination}}} -> {size}')
# Test solution at:
# https://judge.softuni.org/Contests/Practice/Index/3465#0
| dandr94/Algorithms-with-Python | 04. Minimum-spanning-tree-and-Shortest-path-in-Graph/02. Exercise/01. distance_between_vertices.py | 01. distance_between_vertices.py | py | 2,005 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": ... |
73952557948 | import os
today = '02-06-19_'
import numpy as np
import treecorr
def parse_args():
import argparse
parser = argparse.ArgumentParser(description='Produce Tau correlations, i.e correlation among galaxies and reserved stars')
parser.add_argument('--metacal_cat',
#default='/home2/dfa/sobreira/alsina/catalogs/y3_master/Y3_mastercat_v2_6_20_18_subsampled.h5',
#default='/home2/dfa/sobreira/alsina/catalogs/y3_master/Y3fullmaster/Y3_mastercat_v2_6_20_18.h5',
default='/home/dfa/sobreira/alsina/catalogs/Y3_mastercat_7_24/Y3_mastercat_7_24_19.h5',
help='Full Path to the Metacalibration catalog')
parser.add_argument('--piff_cat',
default='/home/dfa/sobreira/alsina/catalogs/y3a1-v29',
help='Full Path to the Only stars Piff catalog')
parser.add_argument('--exps_file',
default='/home/dfa/sobreira/alsina/Y3_shearcat_tests/alpha-beta-eta-test/code/ally3.grizY',
#default='/home/dfa/sobreira/alsina/DESWL/psf/testexp',
help='list of exposures (in lieu of separate exps)')
parser.add_argument('--bands', default='riz', type=str,
help='Limit to the given bands')
parser.add_argument('--use_reserved', default=True,
action='store_const', const=True,
help='just use the objects with the RESERVED flag')
parser.add_argument('--frac', default=1.,
type=float,
help='Choose a random fraction of the input stars')
parser.add_argument('--mod', default=True,
action='store_const', const=True,
help='If true it substracts the mean to each field before calculate correlations')
parser.add_argument('--obs', default=False,
action='store_const', const=True,
help='If true it uses psf_e stars for tau0')
parser.add_argument('--weights', default=False,
action='store_const', const=True,
help='Use weights in the reading of Metacal')
parser.add_argument('--bin_config', default=None,
help='bin_config file for running taus')
parser.add_argument('--outpath', default='/home/dfa/sobreira/alsina/Y3_shearcat_tests/alpha-beta-eta-test/measured_correlations/',
help='location of the output of the files')
parser.add_argument('--filename', default='TAUS_zbin_n.fits', type=str,
help='filename of the tau output file')
parser.add_argument('--zbin', default=None,type=int,
help='Run particular tomobin')
parser.add_argument('--nz_source',
#default='/home/dfa/sobreira/alsina/catalogs/y3_master/nz_source_zbin.h5',
default='/home/dfa/sobreira/alsina/catalogs/Y3_mastercat_7_24/nz_source_zbin.h5',
help='Indexes catalog to select galaxies in a particular redshift bin in Metacal')
args = parser.parse_args()
return args
def main():
import sys; sys.path.append(".")
from src.read_cats import read_data_stars, toList, read_metacal
from src.runcorr import measure_tau
from astropy.io import fits
import treecorr
args = parse_args()
#Make directory where the ouput data will be
outpath = os.path.expanduser(args.outpath)
try:
if not os.path.exists(outpath):
os.makedirs(outpath)
except OSError:
if not os.path.exists(outpath): raise
#Reading Mike stars catalog
keys = ['ra', 'dec','obs_e1', 'obs_e2', 'obs_T',
'piff_e1', 'piff_e2', 'piff_T', 'mag']
galkeys = ['ra','dec','e_1','e_2','R11','R22']
data_stars = read_data_stars(toList(args.exps_file),args.piff_cat, keys,limit_bands=args.bands,use_reserved=args.use_reserved)
if args.bin_config is not None:
print("Using external bin config")
bin_config = treecorr.read_config(args.bin_config)
print(bin_config)
else:
#bin_config = dict( sep_units = 'arcmin', min_sep = 0.1, max_sep = 250, nbins = 20, bin_slop=0.03 )
bin_config = dict( sep_units = 'arcmin', min_sep = 0.1, max_sep = 250, nbins = 20, )
#bin_config = dict( sep_units = 'arcmin', min_sep = 1.0, max_sep = 250, nbins = 20,)
#bin_config = dict(sep_units = 'arcmin' , bin_slop = 0.1, min_sep = 0.1, max_sep = 300, bin_size = 0.2)
if args.zbin is not None:
print('STARTING TOMOPRAPHIC TAUS!, measuring tau for zbin=', args.zbin)
data_galaxies = read_metacal(args.metacal_cat, galkeys, zbin=args.zbin,nz_source_file=args.nz_source, weights=args.weights)
else:
print("STARTING NON TOMOGRAPHIC TAUS")
data_galaxies = read_metacal(args.metacal_cat, galkeys, weights=args.weights )
tau0, tau2, tau5= measure_tau( data_stars , data_galaxies, bin_config,
mod=args.mod)
tau0marr = tau0.xim; tau2marr = tau2.xim; tau5marr = tau5.xim;
tau0parr = tau0.xip; tau2parr = tau2.xip; tau5parr = tau5.xip;
taus = [tau0parr, tau0marr, tau2parr, tau2marr, tau5parr, tau5marr]
taus_names = ['TAU0P', 'TAU0M','TAU2P','TAU2M', 'TAU5P', 'TAU5M']
##Format of the fit file output
names=['BIN1', 'BIN2','ANGBIN', 'VALUE', 'ANG']
forms = ['i4', 'i4', 'i4', 'f8', 'f8']
dtype = dict(names = names, formats=forms)
nrows = len(tau0marr)
outdata = np.recarray((nrows, ), dtype=dtype)
covmat = np.diag(np.concatenate( (tau0.varxip, tau0.varxim, tau2.varxip, tau2.varxim, tau5.varxip, tau5.varxim ) ))
hdu = fits.PrimaryHDU()
hdul = fits.HDUList([hdu])
covmathdu = fits.ImageHDU(covmat, name='COVMAT')
hdul.insert(1, covmathdu)
bin1array = np.array([ -999]*nrows)
bin2array = np.array([ -999]*nrows)
angbinarray = np.arange(nrows)
angarray = np.exp(tau0.meanlogr)
for j, nam in enumerate(taus_names):
array_list = [bin1array, bin2array, angbinarray,np.array(taus[j]), angarray ]
for array, name in zip(array_list, names): outdata[name] = array
corrhdu = fits.BinTableHDU(outdata, name=nam)
hdul.insert(j+2, corrhdu)
hdul[1].header['COVDATA'] = True
hdul[1].header['EXTNAME'] = 'COVMAT'
hdul[1].header['NAME_0'] = 'TAU0P'
hdul[1].header['STRT_0'] = 0
hdul[1].header['LEN_0'] = nrows
hdul[1].header['NAME_1'] = 'TAU0M'
hdul[1].header['STRT_1'] = nrows
hdul[1].header['LEN_1'] = nrows
hdul[1].header['NAME_2'] = 'TAU2P'
hdul[1].header['STRT_2'] = 2*nrows
hdul[1].header['LEN_2'] = nrows
hdul[1].header['NAME_3'] = 'TAU2M'
hdul[1].header['STRT_3'] = 3*nrows
hdul[1].header['LEN_3'] = nrows
hdul[1].header['NAME_4'] = 'TAU5P'
hdul[1].header['STRT_4'] = 4*nrows
hdul[1].header['LEN_4'] = nrows
hdul[1].header['NAME_5'] = 'TAU5M'
hdul[1].header['STRT_5'] = 5*nrows
hdul[1].header['LEN_5'] = nrows
hdul[2].header['QUANT1'] = 'GeR'; hdul[3].header['QUANT1'] = 'GeR'
hdul[2].header['QUANT2'] = 'PeR'; hdul[3].header['QUANT2'] = 'PeR'
hdul[4].header['QUANT1'] = 'GeR'; hdul[5].header['QUANT1'] = 'GeR'
hdul[4].header['QUANT2'] = 'PqR'; hdul[5].header['QUANT2'] = 'PqR'
hdul[6].header['QUANT1'] = 'GeR'; hdul[7].header['QUANT1'] = 'GeR'
hdul[6].header['QUANT2'] = 'PwR'; hdul[7].header['QUANT2'] = 'PwR'
filename = os.path.join(outpath, args.filename)
print("Printing file:", filename)
hdul.writeto(filename, overwrite=True)
if __name__ == "__main__":
main()
| des-science/Y3_shearcat_tests | alpha-beta-eta-test/code/essentials/taus.py | taus.py | py | 7,792 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "os.path.expandus... |
23525022654 | from matplotlib import pyplot as plt
import numpy as np
import collections
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
# Get cpu or gpu device for training.
device = "cuda" if torch.cuda.is_available() else "cpu"
print("Using {} device".format(device))
# class_names = ['airplane','automobile','bird','cat','deer',
# 'dog','frog','horse','ship','truck']
# not needed AFTER getting mean and standard deviation
# cifar10_train = datasets.CIFAR10(
# root='data', train=True, download=True,
# transform=transforms.ToTensor())
#
# cifar10_val = datasets.CIFAR10(
# root='data', train=False, download=True,
# transform=transforms.ToTensor())
# imgs_train = torch.stack([img_t for img_t, _ in cifar10_train], dim=3)
# imgs_val = torch.stack([img_t for img_t, _ in cifar10_val], dim=3)
# train_mean = imgs_train.view(3,-1).mean(dim=1)
# train_std = imgs_train.view(3,-1).std(dim=1)
#
# val_mean = imgs_val.view(3,-1).mean(dim=1)
# val_std = imgs_val.view(3,-1).std(dim=1)
# load data, no think
cifar10_train = datasets.CIFAR10(
root='data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4915, 0.4823, 0.4468),
(0.2470, 0.2435, 0.2616))]))
train_length = len(cifar10_train)
train_size = int(0.8 *train_length)
val_size = train_length - train_size
# make trai and validation set
cifar10_train, cifar10_val = torch.utils.data.random_split(cifar10_train, [train_size, val_size])
cifar10_test = datasets.CIFAR10(
root='data', train=False, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4915, 0.4823, 0.4468),
(0.2470, 0.2435, 0.2616))]))
# comment this and change output neurons (and dataloader far below) if you want only to find difference beetwenn planes and birds
# get only birds and planes
label_map = {0: 0, 2: 1}
class_names = ['airplane', 'bird']
cifar10_train_ = [(img, label_map[label])
for img, label in cifar10_train
if label in [0, 2]]
cifar10_val_ = [(img, label_map[label])
for img, label in cifar10_val
if label in [0, 2]]
cifar10_test_ = [(img, label_map[label])
for img, label in cifar10_test
if label in [0, 2]]
# store train and val loss
train_loss_list = []
val_loss_list = []
epoch_list = []
# make network architecture
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 16, kernel_size=3, padding=1) # convolution layer (in_chl, out_chl,...)
self.conv1_batchnorm = nn.BatchNorm2d(16)
self.act1 = nn.Tanh() # activation function
self.pool1 = nn.MaxPool2d(2) # pooling (kernel size 2x2)
self.conv2 = nn.Conv2d(16, 8, kernel_size=3, padding=1)
self.conv2_batchnorm = nn.BatchNorm2d(8)
self.act2 = nn.Tanh()
self.pool2 = nn.MaxPool2d(2)
self.fc1 = nn.Linear(8 * 8 * 8, 32) # first 8 from conv2, next 8's from pooling (32->16->8)
self.act3 = nn.Tanh()
self.fc2 = nn.Linear(32, 2)
# self.act4 = nn.Softmax(dim=1)
def forward(self, x):
out = self.conv1_batchnorm(self.conv1(x))
out = self.pool1(((self.act1(out))))
out = self.conv2_batchnorm(self.conv2(out))
out = self.pool2(((self.act2(out))))
out = out.view(-1, 8 * 8 * 8) # not sure why reshape
out = self.act3(self.fc1(out))
out = self.fc2(out)
return out
import datetime # to measure time
def training_loop(n_epochs, optimizer, model, loss_fn, train_loader, val_loader, epoch_num_of_no_improve):
epoch_no_improve = 0
for epoch in range(1, n_epochs+1):
loss_train = 0.0
for imgs, labels in train_loader:
# move tensors to gpu if available
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs)
loss = loss_fn(outputs, labels)
l2_lambda = 0.001
l2_norm = sum(p.pow(2.0).sum()
for p in model.parameters())
loss = loss + l2_lambda * l2_norm
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_train += loss.item()
epoch_list.append(epoch)
train_loss_list.append(loss_train / len(train_loader)) # to track loss
# get loss of validation data
with torch.no_grad():
loss_val = 0.0
for imgs, labels in val_loader:
# move tensors to gpu if available
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs)
loss_v = loss_fn(outputs, labels)
loss_val += loss_v.item()
val_loss_list.append(loss_val / len(val_loader))
# set when to print info about training progress
if epoch == 1 or epoch % 1 == 0:
print('Epoch {}, Training loss {}, Validation loss {}'.format(epoch,
loss_train / len(train_loader),
loss_val / len(val_loader)))
# early stopping
if epoch > 1:
if val_loss_list[-1] >= val_loss_list[-2]:
epoch_no_improve += 1
else:
epoch_no_improve = 0
if epoch_no_improve == epoch_num_of_no_improve:
print('Early stopping:')
print('Epoch {}, Training loss {}, Validation loss {}'.format(epoch,
loss_train / len(train_loader),
loss_val / len(val_loader)))
break
def validate_on_test(model, train_loader, val_loader, test_loader):
for name, loader in [("train", train_loader), ("val", val_loader), ('test', test_loader)]:
correct = 0
total = 0
with torch.no_grad(): # <1>
for imgs, labels in loader:
# move to gpu
imgs = imgs.to(device=device)
labels = labels.to(device=device)
outputs = model(imgs)
_, predicted = torch.max(outputs, dim=1) # Gives us the index of the highest value
total += labels.shape[0] # Counts the number of examples, so total is increased by the batch size
correct += int((predicted == labels).sum())
print("Accuracy {}: {:.2f} %".format(name , 100 * (correct / total)))
n_epochs = 100
model = Net().to(device=device)
optimizer = optim.ASGD(model.parameters(), lr=1e-2)
loss_fn = nn.CrossEntropyLoss()
train_loader = torch.utils.data.DataLoader(cifar10_train_, batch_size=64, shuffle=False)
val_loader = torch.utils.data.DataLoader(cifar10_val_, batch_size=64, shuffle=False)
test_loader = torch.utils.data.DataLoader(cifar10_test_, batch_size=64, shuffle=False)
epoch_num_of_no_improve = 5
training_loop(
n_epochs = n_epochs,
optimizer = optimizer,
model = model,
loss_fn = loss_fn,
train_loader = train_loader,
val_loader = val_loader,
epoch_num_of_no_improve=epoch_num_of_no_improve)
validate_on_test(model, train_loader, val_loader, test_loader)
plt.plot(epoch_list, train_loss_list, color='blue', label='train_loss')
plt.plot(epoch_list, val_loss_list, color='green', label='validation loss')
plt.xlabel('epoch')
plt.ylabel('loss')
plt.legend()
plt.show() | lewiis252/machine_learning | cifar10_nn.py | cifar10_nn.py | py | 7,791 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torchvision.datasets.CIFAR10",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "... |
23048935694 | from sqlalchemy.orm import Session
from .. import models, schemas
from fastapi.encoders import jsonable_encoder
def get_score(db: Session):
score = db.query(models.Score).first()
if not score:
new_score = create_score()
db.add(new_score)
db.commit()
db.refresh(new_score)
return new_score
return score
def post_goal(request: schemas.Goal, db: Session):
score = db.query(models.Score).first()
if not score:
new_score = create_score()
db.add(new_score)
db.commit()
db.refresh(new_score)
score = db.query(models.Score).first()
query = jsonable_encoder(request)
if query["team"] == "home":
score.home += 1
else:
score.away += 1
db.commit()
return score
def create_score():
new_score = models.Score(home=0, away=0)
return new_score
| hooglander/fastapi-get-and-post | app/repository/score.py | score.py | py | 873 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "fastapi.encoders.jsonable_encoder",
"line_number": 25,
"usage_type": "call"
}
] |
71968698427 | import torch.nn as nn
from collections import OrderedDict
from graph_ter_seg.tools import utils
class EdgeConvolution(nn.Module):
def __init__(self, k, in_features, out_features):
super(EdgeConvolution, self).__init__()
self.k = k
self.conv = nn.Conv2d(
in_features * 2, out_features, kernel_size=1, bias=False
)
self.bn = nn.BatchNorm2d(out_features)
self.relu = nn.LeakyReLU(negative_slope=0.2)
def forward(self, x):
x = utils.get_edge_feature(x, k=self.k)
x = self.relu(self.bn(self.conv(x)))
x = x.max(dim=-1, keepdim=False)[0]
return x
class MultiEdgeConvolution(nn.Module):
def __init__(self, k, in_features, mlp):
super(MultiEdgeConvolution, self).__init__()
self.k = k
self.conv = nn.Sequential()
for index, feature in enumerate(mlp):
if index == 0:
layer = nn.Sequential(OrderedDict([
('conv%d' %index, nn.Conv2d(
in_features * 2, feature, kernel_size=1, bias=False
)),
('bn%d' % index, nn.BatchNorm2d(feature)),
('relu%d' % index, nn.LeakyReLU(negative_slope=0.2))
]))
else:
layer = nn.Sequential(OrderedDict([
('conv%d' %index, nn.Conv2d(
mlp[index - 1], feature, kernel_size=1, bias=False
)),
('bn%d' % index, nn.BatchNorm2d(feature)),
('relu%d' % index, nn.LeakyReLU(negative_slope=0.2))
]))
self.conv.add_module('layer%d' % index, layer)
def forward(self, x):
x = utils.get_edge_feature(x, k=self.k)
x = self.conv(x)
x = x.max(dim=-1, keepdim=False)[0]
return x
class Flatten(nn.Module):
def __init__(self):
super(Flatten, self).__init__()
def forward(self, x):
return x.view(x.size(0), -1)
def main():
conv = MultiEdgeConvolution(k=20, mlp=(64, 64), in_features=64)
print(conv)
if __name__ == '__main__':
main()
| gyshgx868/graph-ter | graph_ter_seg/models/layers.py | layers.py | py | 2,158 | python | en | code | 56 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numb... |
12805757281 | import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
import random
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
Datadirectory = "train\\"
Classes = ["0", "1", "2", "3", "4", "5", "6"]
img_size = 224
training_data = []
counter = 0
def createtrainingset():
for category in Classes:
path = os.path.join(Datadirectory, category)
class_num = Classes.index(category)
for img in os.listdir(path):
try:
img_arr = cv2.imread(os.path.join(path, img))
new_arr = cv2.resize(img_arr, (img_size, img_size))
training_data.append([new_arr, class_num])
except Exception as e:
pass
createtrainingset()
print(len(training_data))
random.shuffle(training_data)
X = [] # Images (features)
y = [] # Labels
for feature, label in training_data:
X.append(feature)
y.append(label)
y = np.array(y)
X = np.array(X)
X = X.reshape(-1, img_size, img_size, 3)
X = X / 255.0 # Normalize the image data between 0 and 1
print(X.shape)
print(y.shape)
plt.imshow(X[0])
plt.show()
model = tf.keras.applications.MobileNetV2()
#TRANSFER LEARNING - TUNING ,weights will start from lasr check point
base_input = model.layers[0].input
base_output = model.layers[-2].output
final_output = layers.Dense(128)(base_output)
final_output = layers.Activation('relu')(final_output)
final_output = layers.Dense(64)(final_output)
final_output = layers.Activation('relu')(final_output)
final_output = layers.Dense(7, activation = 'softmax')(final_output)
new_model = keras.Model(inputs = base_input, outputs = final_output)
new_model.compile(loss = "sparse_categorical_crossentropy", optimizer = "adam", metrics = ["accuracy"])
new_model.fit(X,y, epochs=10, batch_size = 8)
new_model.save('onbes_epoch.h5')
| Mudaferkaymak/Detecting-Faces-and-Analyzing-Them-with-Computer-Vision | Detecting-Faces-and-Analyzing-Them-with-Computer-Vision/training_themodel.py | training_themodel.py | py | 1,867 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_number": ... |
21840251334 | """Order views module"""
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework import status as st
from rest_framework import generics
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.parsers import JSONParser
from rest_framework.response import Response
from rest_framework.exceptions import MethodNotAllowed, NotFound
from rest_framework.decorators import api_view
from orders.models import Order, STATUS_CHOICES
from orders.serializers import OrderSerializer
from orders.pagination import CustomPagination
from order_flow.settings import DEBUG
class OrderAPIListCreate(generics.ListCreateAPIView):
"""
Returns list of orders in JSON format and gave an option to create orders
"""
if DEBUG:
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
else:
renderer_classes = [JSONRenderer]
queryset = Order.objects.all()
serializer_class = OrderSerializer
pagination_class = CustomPagination
filter_backends = [DjangoFilterBackend, filters.OrderingFilter]
filterset_fields = ['external_id', 'status']
ordering_fields = ['id', 'status', 'created_at']
class OrderAPIRetrieveUpdateDestroy(generics.RetrieveUpdateDestroyAPIView):
"""
Returns distinct order JSON info and gave an option to update and delete it
"""
if DEBUG:
renderer_classes = [JSONRenderer, BrowsableAPIRenderer]
else:
renderer_classes = [JSONRenderer]
parser_classes = [JSONParser]
queryset = Order.objects.all()
serializer_class = OrderSerializer
def put(self, request, *args, **kwargs):
"""Add a possibility of partial update, using put method"""
return self.partial_update(request, *args, **kwargs)
def perform_destroy(self, instance):
"""Protect order from delete if its status is 'accepted'."""
if instance.status == 'accepted':
raise MethodNotAllowed(
'delete',
detail="You can not delete orders with status 'accepted'.",
)
instance.delete()
@api_view(['POST'])
def status_change(request, pk, status):
"""Change order status"""
try:
order = Order.objects.get(id=pk)
except Order.DoesNotExist:
raise NotFound(f'Order with id {pk} does not exist.')
if status not in [statuses[0] for statuses in STATUS_CHOICES]:
raise MethodNotAllowed(
'post',
detail="You can change order status"
" only to 'accepted' or 'failed'",
)
if order.status != 'new':
raise MethodNotAllowed(
'post',
detail="You can not change order status if it is not 'new'",
)
order.status = status
order.save()
return Response(status=st.HTTP_200_OK)
| GunGalla/order-flow-test | orders/views.py | views.py | py | 2,855 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "order_flow.settings.DEBUG",
"line_number": 22,
"usage_type": "name... |
15047866942 | # from __future__ import absolute_import
import torch
import torch.nn as nn
import onnx
from typing import List, Dict, Union, Optional, Tuple, Sequence
import copy
from .util import*
from torch.autograd import Variable
class onnxTorchModel(nn.Module):
def __init__(self,onnx_model: onnx.ModelProto,cfg:dict):
super(onnxTorchModel,self).__init__()
self.onnx_model=onnx_model
self.nodes=self.onnx_model.graph.node
self.pad_split=cfg["pad_split"]
self.weights_in_constant_flg=False
if len(onnx_model.graph.initializer)==0:
self.weights_in_constant_flg=True
self.op_type_list=[]
self.current_id=0
self.forwardExcList=[]
self.onnxBlobNameTable={}
self.generateOnnxBlobNameTable()
self.parseOnnx()
def getOnnxNameFromTable(self,name):
for n in self.onnxBlobNameTable.keys():
if self.onnxBlobNameTable[n]==name:
return n
def forward(self, input):
net_input=self.onnx_model.graph.input
net_output=self.onnx_model.graph.output
if len(net_input)==1:
exc_str="{node_input}=input".format(node_input=self.onnxBlobNameTable[net_input[0].name])
exec(exc_str)
for exc_info in self.forwardExcList:
if "exec_pad" in exc_info.keys():
exec(exc_info["exec_pad"])
exc_str=exc_info["exec"]
exec(exc_str)
if len(net_output)==1:
exc_str="self.net_output={node_output}".format(node_output=self.onnxBlobNameTable[net_output[0].name])
exec(exc_str)
return self.net_output
def parseOnnx(self):
nodes = self.onnx_model.graph.node
for nid,node in enumerate(nodes):
self.current_id=nid
op_type=node.op_type
if op_type not in self.op_type_list:
self.op_type_list.append(op_type)
print("Parsing onnx:",op_type)
if op_type=="Conv":
self.parseConv(node)
elif op_type=="BatchNormalization":
self.parseBN(node)
elif op_type=="Flatten":
self.parseFlatten(node)
elif op_type=="Relu":
self.parseRelu(node)
elif op_type=="MaxPool":
self.parseMaxPool(node)
elif op_type=="Add":
self.parseAdd(node)
elif op_type=="GlobalAveragePool":
self.parseGlobalAveragePool(node)
elif op_type=="MatMul":
self.parseMatMul(node)
elif op_type=="Softmax":
self.parseSoftmax(node)
elif op_type=="Identity":
self.parseIdentity(node)
elif op_type=="Constant":
self.parseNonWeightsConstant(node)
# torch.nn.Conv2d(in_channels: int, out_channels: int,
# kernel_size: Union[T, Tuple[T, T]], stride: Union[T, Tuple[T, T]] = 1,
# padding: Union[T, Tuple[T, T]] = 0, dilation: Union[T, Tuple[T, T]] = 1,
# groups: int = 1, bias: bool = True, padding_mode: str = 'zeros')
def parseConv(self,node):
attr=attribute_to_dict(node.attribute)
if(self.weights_in_constant_flg):
wt,bt=get_conv_params_in_constant(node,self.onnx_model.graph.node)
has_bias=True
if len(node.input)==2:
has_bias=False
c,n,k_w,k_h=wt.shape
c=c*int(attr["group"])
n=n*int(attr["group"])
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
pad_t=attr["pads"][0]
pad_b=attr["pads"][2]
pad_l=attr["pads"][1]
pad_r=attr["pads"][3]
if(pad_t!=pad_b or pad_l!=pad_r or self.pad_split):
exc_str_pad="{var_name}_pad=nn.ConstantPad2d(padding={padding},value={value})".format(var_name=var_name,padding=(pad_l,pad_r,pad_t,pad_b),value=0)
exc_str_conv="{var_name}=nn.Conv2d(in_channels={in_channels},out_channels={out_channels},kernel_size={kernel_size},stride={stride},padding={padding},dilation={dilation},groups={groups},bias={bias})".format(var_name=var_name,\
in_channels=c,\
out_channels=n,\
kernel_size=tuple(attr["kernel_shape"]),\
stride=tuple(attr["strides"]),\
padding=(0,0),\
dilation=tuple(attr["dilations"]),\
groups=attr["group"],\
bias=True)
self.generateForwardExec(node,var_name,op_pad_split=True)
exec(exc_str_pad)
exec(exc_str_conv)
exc_init_weights_str="{var_name}.weight=torch.nn.Parameter(torch.Tensor(wt))".format(var_name=var_name)
exec(exc_init_weights_str)
else:
exc_str="{var_name}=nn.Conv2d(in_channels={in_channels},out_channels={out_channels},kernel_size={kernel_size},stride={stride},padding={padding},dilation={dilation},groups={groups},bias={bias})".format(var_name=var_name,\
in_channels=c,\
out_channels=n,\
kernel_size=tuple(attr["kernel_shape"]),\
stride=tuple(attr["strides"]),\
padding=tuple(attr["pads"][:2]),\
dilation=tuple(attr["dilations"]),\
groups=attr["group"],\
bias=True)
self.generateForwardExec(node,var_name)
exec(exc_str)
exc_init_weights_str="{var_name}.weight=torch.nn.Parameter(torch.Tensor(wt))".format(var_name=var_name)
exec(exc_init_weights_str)
if has_bias:
self.forwardExcList[len(self.forwardExcList)-1]["has_bias"]=True
exc_init_bias_str="{var_name}.bias=torch.nn.Parameter(torch.Tensor(bt))".format(var_name=var_name)
exec(exc_init_bias_str)
else:
self.forwardExcList[len(self.forwardExcList)-1]["has_bias"]=False
exc_init_bias_str="nn.init.constant_({var_name}.bias, 0)".format(var_name=var_name)
exec(exc_init_bias_str)
# torch.nn.BatchNorm2d(num_features, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
def parseBN(self,node):
attr=attribute_to_dict(node.attribute)
if(self.weights_in_constant_flg):
bn_scale,bn_B,bn_mean,bn_var=get_bn_params_in_constant(node,self.onnx_model.graph.node)
n=bn_scale.shape[0]
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
exc_str="{var_name}=nn.BatchNorm2d(num_features={num_features},eps={eps},momentum={momentum})".format(var_name=var_name,\
num_features=n,eps=attr["epsilon"],momentum=attr["momentum"])
exec(exc_str)
bn_scale,bn_B,bn_mean,bn_var=get_bn_params_in_constant(node, self.nodes)
exc_init_scale_str="{var_name}.weight=torch.nn.Parameter(torch.Tensor(bn_scale))".format(var_name=var_name)
exc_init_bias_str="{var_name}.bias=torch.nn.Parameter(torch.Tensor(bn_B))".format(var_name=var_name)
exc_init_mean_str="{var_name}.running_mean=torch.Tensor(bn_mean)".format(var_name=var_name)
exc_init_var_str="{var_name}.running_var=torch.Tensor(bn_var)".format(var_name=var_name)
exec(exc_init_scale_str)
exec(exc_init_bias_str)
exec(exc_init_mean_str)
exec(exc_init_var_str)
self.generateForwardExec(node,var_name)
def parseFlatten(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
exc_str="{var_name}=nn.Flatten(start_dim={start_dim})".format(var_name=var_name,start_dim=attr["axis"])
self.generateForwardExec(node,var_name)
exec(exc_str)
def parseRelu(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
exc_str="{var_name}=nn.ReLU()".format(var_name=var_name)
self.generateForwardExec(node,var_name)
exec(exc_str)
# torch.nn.MaxPool2d(kernel_size: Union[T, Tuple[T, ...]],
# stride: Optional[Union[T, Tuple[T, ...]]] = None,
# padding: Union[T, Tuple[T, ...]] = 0, dilation: Union[T, Tuple[T, ...]] = 1,
# return_indices: bool = False, ceil_mode: bool = False)
def parseMaxPool(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
pad_t=attr["pads"][0]
pad_b=attr["pads"][2]
pad_l=attr["pads"][1]
pad_r=attr["pads"][3]
if(pad_t!=pad_b or pad_l!=pad_r or pad_r!=pad_t or self.pad_split):
exc_str_pad="{var_name}_pad=nn.ConstantPad2d(padding={padding},value={value})".format(var_name=var_name,padding=(pad_l,pad_r,pad_t,pad_b),value=0)
exc_str="{var_name}=nn.MaxPool2d(kernel_size={kernel_shape},padding={pads},stride={strides})".format(var_name=var_name,\
kernel_shape=tuple(attr["kernel_shape"]),\
pads=0,\
strides=tuple(attr["strides"]))
exec(exc_str_pad)
exec(exc_str)
self.generateForwardExec(node,var_name,op_pad_split=True)
else:
exc_str="{var_name}=nn.MaxPool2d(kernel_size={kernel_shape},padding={pads},stride={strides})".format(var_name=var_name,\
kernel_shape=tuple(attr["kernel_shape"]),\
pads=attr["pads"][0],\
strides=tuple(attr["strides"]))
exec(exc_str)
self.generateForwardExec(node,var_name)
def parseAdd(self,node):
attr=attribute_to_dict(node.attribute)
var_name="torch.add"
self.generateForwardExecMultiInput(node,var_name,filter_const=False,is_instance=False)
def parseGlobalAveragePool(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
exc_str="{var_name}=nn.AdaptiveAvgPool2d((1, 1))".format(var_name=var_name)
self.generateForwardExec(node,var_name)
exec(exc_str)
def parseMatMul(self,node):
attr=attribute_to_dict(node.attribute)
var_name="torch.matmul"
self.generateForwardExecMultiInput(node,var_name,filter_const=False,is_instance=False)
def parseSoftmax(self,node):
attr=attribute_to_dict(node.attribute)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
if attr["axis"]==-1:
exc_str="{var_name}=nn.Softmax(dim=1)".format(var_name=var_name)
exec(exc_str)
else:
exc_str="{var_name}=nn.Softmax(dim={dim})".format(var_name=var_name,dim= attr["axis"])
exec(exc_str)
self.generateForwardExec(node,var_name)
def parseIdentity(self,node):
inputs=node.input
outputs=node.output
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
input_blob=self.onnxBlobNameTable[inputs[0]]
output_blob=self.onnxBlobNameTable[outputs[0]]
forwardExcStr="{output_name}={input_name}".format(output_name=output_blob,input_name=input_blob)
nodeInfoDict={"exec":forwardExcStr,"var_name":var_name,"type":"Identity","input":[input_blob],"output":[output_blob],"is_instance":False,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
def parseNonWeightsConstant(self,node):
output_name=node.output[0]
next_type=get_node_type_by_input(output_name,self.nodes)
weight_node_list=["Conv","BatchNormalization"]
if next_type not in weight_node_list:
constant_tonser=get_tensor_in_constant(output_name,self.nodes)
var_name="self.{type}_{id}".format(type=node.op_type,id=self.current_id)
output_blob=self.onnxBlobNameTable[output_name]
exc_str="{var_name}=torch.nn.Parameter(torch.tensor(constant_tonser))".format(var_name=var_name)
exec(exc_str)
forwardExcStr="{output}={var_name}".format(output=output_blob,var_name=var_name)
nodeInfoDict={"exec":forwardExcStr,"var_name":var_name,"type":node.op_type,"input":[],"output":[output_blob],"is_instance":True}
self.forwardExcList.append(nodeInfoDict)
###################################### support func area
def generateForwardExec(self,node,var_name,filter_const=True,is_instance=True,op_pad_split=False):
inputs=node.input
outputs=node.output
# node_type=node.op_type
# next_type=
dynamic_input=[]
dynamic_output=[]
for inputname in inputs:
if filter_const and get_node_type_by_output(inputname,self.nodes)=="Constant":
continue
dynamic_input.append(self.onnxBlobNameTable[inputname])
for outputname in outputs:
dynamic_output.append(self.onnxBlobNameTable[outputname])
if len(dynamic_input)>1:
assert(0)
if len(dynamic_input)==0:
dynamic_input.append(self.onnxBlobNameTable[inputs[0]])
input_blob=dynamic_input[0]
output_blob=dynamic_output[0]
if op_pad_split:
forwardExcStrPad="{output_name}_pad={var_name}_pad({input_name})".format(output_name=input_blob,var_name=var_name,input_name=input_blob)
forwardExcStr="{output_name}={var_name}({input_name}_pad)".format(output_name=output_blob,var_name=var_name,input_name=input_blob)
nodeInfoDict={"exec":forwardExcStr,"exec_pad":forwardExcStrPad,"var_name":var_name,"type":node.op_type,"input":dynamic_input,"output":[output_blob],"is_instance":is_instance,"id":self.current_id}
else:
forwardExcStr="{output_name}={var_name}({input_name})".format(output_name=output_blob,var_name=var_name,input_name=input_blob)
nodeInfoDict={"exec":forwardExcStr,"var_name":var_name,"type":node.op_type,"input":dynamic_input,"output":[output_blob],"is_instance":is_instance,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
for i in range(1,len(dynamic_output)):
forwardExcStr="{output_name}={input_name}".format(output_name=dynamic_output[i],input_name=dynamic_output[0])
nodeInfoDict={"exec":forwardExcStr,"var_name":"Copy","type":"Copy","input":[dynamic_output[0]],"output":[output_blob],"is_instance":False,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
def generateForwardExecMultiInput(self,node,var_name,filter_const=True,is_instance=True):
inputs=node.input
outputs=node.output
dynamic_input=[]
dynamic_output=[]
for inputname in inputs:
if filter_const and get_node_type_by_output(inputname,self.nodes)=="Constant":
continue
dynamic_input.append(self.onnxBlobNameTable[inputname])
for outputname in outputs:
dynamic_output.append(self.onnxBlobNameTable[outputname])
input_blob=dynamic_input[0]
output_blob=dynamic_output[0]
input_blob_str=""
for input_blob in dynamic_input:
input_blob_str+=","+input_blob
input_blob_str=input_blob_str[1:]
forwardExcStr="{output_name}={var_name}({input_name})".format(output_name=output_blob,var_name=var_name,input_name=input_blob_str)
nodeInfoDict={"exec":forwardExcStr,"var_name":var_name,"type":node.op_type,"input":dynamic_input,"output":[output_blob],"is_instance":is_instance,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
for i in range(1,len(dynamic_output)):
forwardExcStr="{output_name}={input_name}".format(output_name=dynamic_output[i],input_name=dynamic_output[0])
nodeInfoDict={"exec":forwardExcStr,"var_name":"Copy","type":"Copy","input":[dynamic_output[0]],"output":[output_blob],"is_instance":False,"id":self.current_id}
self.forwardExcList.append(nodeInfoDict)
def generateOnnxBlobNameTable(self):
nodes = self.onnx_model.graph.node
id_count=0
for nid,node in enumerate(nodes):
inputs=node.input
outputs=node.output
for name in inputs:
if name not in self.onnxBlobNameTable.keys():
self.onnxBlobNameTable[name]="self.blob_"+str(id_count)
id_count+=1
for name in outputs:
if name not in self.onnxBlobNameTable.keys():
self.onnxBlobNameTable[name]="self.blob_"+str(id_count)
id_count+=1
def getFeatureTensor(self,name):
exec("self.outTensor= {name}".format(name=name))
return self.outTensor | diamour/onnxQuanter | onnx_torch_engine/converter.py | converter.py | py | 16,899 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "torch.nn.Module",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "onnx.ModelProto",
"line_number": 11,
"usage_type": "attribute"
}
] |
75188719226 | # 초기 거리를 1로 지정
# 가까운 곳부터 수행하는 bfs이기에 이미 최단거리가 기록된 경우에는 거리가 갱신되지 않도록 설정
from collections import deque
def bfs(x, y):
# 큐 구현을 위해 deque 라이브러리 사용
queue = deque()
# 초기 좌표 설정
queue.append((x, y))
# 큐가 빌 때까지 반복
while queue:
x, y = queue.popleft()
# 현재 위치에서 4가지 방향으로 위치 확인
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
# 미로 찾기 공간을 벗어난 경우 무시
if nx < 0 or nx >= N or ny < 0 or ny >= M:
continue
# 벽인 경우 무시
if graph[nx][ny] == 0:
continue
# 해당 노드를 처음 방문한 경우에만 최단거리 기록
if graph[nx][ny] == 1:
graph[nx][ny] = graph[x][y] + 1
queue.append((nx, ny))
# 가장 오른쪽 아래까지의 최단거리 반환
return graph[N-1][M-1]
N, M = map(int, input().split())
# 2차원 리스트 맵 정보 입력
graph = []
for i in range(N):
graph.append(list(map(int, input())))
# 상하좌우
dx = [-1, 1, 0, 0]
dy = [0, 0, -1, 1]
# BFS를 수행한 결과 출력
print(bfs(0, 0))
| zacinthepark/Problem-Solving-Notes | na/02/DFS-BFS/미로탈출.py | 미로탈출.py | py | 1,348 | python | ko | code | 0 | github-code | 6 | [
{
"api_name": "collections.deque",
"line_number": 8,
"usage_type": "call"
}
] |
41152326339 | from tkinter import *
from datetime import datetime, timedelta
import tkinter as tk
from tkinter import Entry, Label, StringVar, ttk, Checkbutton, Button, messagebox
import numpy as np
import pandas as pd
def generarCodigo(texto):
sumar = 0
codigo = texto[:3]
if texto[len(texto) // 2] == " ":
sumar = 1
codigo += texto[len(texto) // 2 + sumar : len(texto) // 2 + 2 + sumar]
codigo += texto[len(texto) - 1]
codigo += str(len(texto))
return codigo
def moda(lista):
repetido = lista[0]
for i in lista:
if lista.count(i) > lista.count(repetido):
repetido = i
return repetido
def nombreIncorrecto(texto):
invalidos = '1234567890!#$%&/()=?¡¿´*{¨]}[-_.:;,<>|°'
for i in texto:
if i in invalidos:
return True
return False
class Tabla:
def __init__(self, root, dataFrame, anchos, fechas, bgColor, posX, posY):
self.anchos = anchos
self.fechas = fechas
self.nuevoDatos = []
self.componentes = []
cont = 0
self.df = dataFrame
self.frm = ttk.Frame(root)
for k in dataFrame:
tmp = Entry(
self.frm,
width=anchos[cont],
bg=bgColor,
fg="black",
font=("Arial", 12),
highlightthickness=1,
highlightbackground="#000000",
highlightcolor="#000000",
)
tmp.grid(row=0, column=cont)
tmp.insert(INSERT, k)
cont += 1
self.lista = list(dataFrame.to_records(index=False))
self.filas = len(self.lista)
self.columnas = cont
for i in range(self.filas):
row = []
for j in range(self.columnas):
aux = Entry(
self.frm,
width=anchos[j],
fg="black",
font=(
"Arial",
12,
),
highlightthickness=1,
highlightbackground="#000000",
highlightcolor="#000000",
)
aux.grid(row=i + 1, column=j)
if len(fechas) == 0:
aux.insert(INSERT, self.lista[i][j])
else:
if j in fechas:
aux.insert(
INSERT,
pd.to_datetime(self.lista[i][j])
.date()
.strftime("%d/%m/%y"),
)
else:
aux.insert(INSERT, self.lista[i][j])
aux.configure(state="readonly")
row.append(aux)
self.componentes.append(row)
self.frm.pack()
self.frm.place(x=posX, y=posY)
def ingresarDatos(self, datos):
self.lista.append(datos)
for i in range(self.columnas):
aux = Entry(
self.frm,
width=self.anchos[i],
fg="black",
font=(
"Arial",
12,
),
highlightthickness=1,
highlightbackground="#000000",
highlightcolor="#000000",
)
aux.grid(row=self.filas + 1, column=i)
aux.insert(INSERT, datos[i])
aux.configure(state="readonly")
self.df.loc[self.df.shape[0]] = datos
self.filas += 1
return
def borrarUltimaFila(self):
if self.filas < 1:
messagebox.showerror(
title="ERROR", message="No hay datos que puedan ser borrados"
)
return
cont = 0
for i in self.frm.winfo_children():
if cont >= self.columnas * self.filas:
i.destroy()
cont += 1
self.df = self.df[:-1]
self.lista.pop()
self.filas -= 1
class FrmIngresoDeLibros:
def __init__(self, master, regresar):
self.frm = ttk.Frame(master)
self.nombreLibro = StringVar()
self.cantidadLibro = StringVar()
self.tabla = None
self.agregarComponentes(master, regresar)
def agregarComponentes(self, master, regresar):
hoy = datetime.today().strftime("%d/%m/%y")
Label(
text="INGRESO DE LIBROS",
font=("Arial", 24, "bold"),
bg="#315E7A",
fg="white",
width="500",
height="2",
).pack()
Label(
text=hoy,
font=("Arial", 12),
bg="#00E0FF",
fg="white",
width="20",
height="1",
).pack()
Label(text="Libro", font=("Arial", 12, "bold")).place(x=150, y=150)
Entry(
textvariable=self.nombreLibro,
width="25",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
).place(x=250, y=150)
Label(text="Cant", font=("Arial", 12, "bold")).place(x=520, y=150)
Entry(
textvariable=self.cantidadLibro,
width="25",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
).place(x=590, y=150)
Button(
text="Borrar",
font=("Arial", 12),
width="20",
bg="#D0A9F5",
height="2",
command=self.borrar,
).place(x=150, y=250)
Button(
text="Ingresar",
font=("Arial", 12),
width="20",
bg="#D0A9F5",
height="2",
command=lambda: self.ingresar(master),
).place(x=400, y=250)
Button(
text="Regresar",
font=("Arial", 12),
width="20",
bg="#D0A9F5",
height="2",
command=regresar,
).place(x=650, y=250)
self.mostrarTabla(master)
def borrar(self):
self.tabla.borrarUltimaFila()
archivo = self.tabla.df
archivo2 = pd.read_excel("EstadoLibros.xlsx", sheet_name="Hoja1")
archivo2 = archivo2[:-1]
archivo.to_excel("Libros.xlsx", sheet_name="Hoja1", index=False)
archivo2.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
def mostrarTabla(self, master):
archivo = pd.read_excel("Libros.xlsx", sheet_name="Hoja1")
anchos = [5, 40, 20, 20, 5]
fechas = [2]
self.tabla = Tabla(master, archivo, anchos, fechas, "#154673", 100, 350)
def ingresar(self, master):
n = len(self.tabla.lista) + 1
nombre = self.nombreLibro.get()
if nombre == "":
messagebox.showerror(
title="ERROR", message="El nombre ingresado es incorrecto"
)
return
fecha = datetime.now().date().strftime("%d/%m/%y")
try:
stock = int(self.cantidadLibro.get())
except ValueError:
messagebox.showerror(
title="ERROR", message="La cantidad ingresada es incorrecta"
)
return
if stock <= 0:
messagebox.showerror(
title="ERROR", message="Debe ingresar una cantidad mayor a 0"
)
return
if len(self.tabla.df[self.tabla.df["Nombre del Libro"] == nombre]) > 0:
index = self.tabla.df.index[
self.tabla.df["Nombre del Libro"] == nombre
].tolist()[0]
valores = self.tabla.df[self.tabla.df["Nombre del Libro"] == nombre].values[
0
]
valores[3] += stock
self.tabla.df.loc[index] = valores
self.tabla.frm.destroy()
archivo = self.tabla.df
archivo.to_excel("Libros.xlsx", sheet_name="Hoja1", index=False)
archivo2 = pd.read_excel("EstadoLibros.xlsx", sheet_name="Hoja1")
valores2 = archivo2[archivo2["Nombre del Libro"] == nombre].values[0]
valores2[5] += stock
valores2[4] = valores2[5] - valores2[3]
archivo2.loc[index] = valores2
archivo2.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
self.mostrarTabla(master)
messagebox.showinfo(
message="El libro se ha actualizado correctamente",
title="LIBRO ACTUALIZADO",
)
self.nombreLibro.set("")
self.cantidadLibro.set("")
return
datos = (n, nombre, fecha, stock)
self.nombreLibro.set("")
self.cantidadLibro.set("")
self.tabla.ingresarDatos(datos)
archivo = self.tabla.df
archivo.to_excel("Libros.xlsx", sheet_name="Hoja1", index=False)
archivo2 = pd.read_excel("EstadoLibros.xlsx", sheet_name="Hoja1")
archivo2.loc[archivo2.shape[0]] = [n, nombre, "Disponible", 0, stock, stock]
archivo2.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
class FrmRegistroEstudiante:
def __init__(self, master, regresar):
self.frm = Frame(master)
self.nombre = StringVar()
self.apellido = StringVar()
self.lectorDelMes = StringVar()
self.libroMasSolicitado = StringVar()
self.cbxOperacion = None
self.cbxLibro = None
self.tabla = None
self.ultimaOperacion = ""
self.ultimoLibro = ""
self.hallarDatos()
self.agregarComponentes(master, regresar)
def hallarDatos(self):
excel = pd.read_excel("HistorialLibros.xlsx", sheet_name="Hoja1")
nombres = excel["Nombre"]
apellidos = excel["Apellido"]
nombreCompleto = []
for i in range(len(nombres)):
nombreCompleto.append(nombres[i] + " " + apellidos[i])
self.lectorDelMes.set(moda(nombreCompleto))
libros = excel["Nombre del Libro"]
self.libroMasSolicitado.set(moda(list(libros)))
def agregarComponentes(self, master, regresar):
hoy = datetime.today().strftime("%d/%m/%y")
Label(
text="REGISTRO DEL ESTUDIANTE",
font=("Arial", 24, "bold"),
bg="#DF7401",
fg="white",
width="500",
height="2",
).pack()
Label(
text=hoy,
font=("Arial", 12),
bg="#F5DA81",
fg="white",
width="25",
height="1",
).pack()
Label(text="Nombre", font=("Arial", 12, "bold")).place(x=150, y=150)
Entry(
textvariable=self.nombre,
width="20",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
).place(x=250, y=150)
Label(text="Apellido", font=("Arial", 12, "bold")).place(x=150, y=200)
Entry(
textvariable=self.apellido,
width="20",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
).place(x=250, y=200)
Label(text="Operacion", font=("Arial", 12, "bold")).place(x=520, y=150)
self.cbxOperacion = ttk.Combobox(
state="readonly",
values=["Retiro", "Devolucion"],
width=15,
font=("Arial", 12),
)
self.cbxOperacion.place(x=630, y=150)
Label(text="Libro", font=("Arial", 12, "bold")).place(x=520, y=200)
self.cbxLibro = ttk.Combobox(values=["a"], width=20, font=("Arial", 12))
self.cbxLibro.place(x=630, y=200)
Button(
text="Borrar",
font=("Arial", 12),
width="20",
bg="#F7BE81",
height="2",
command=lambda: self.borrar(master),
).place(x=150, y=260)
Button(
text="Aceptar",
font=("Arial", 12),
width="20",
bg="#F7BE81",
height="2",
command=lambda: self.aceptar(master),
).place(x=400, y=260)
Button(
text="Regresar",
font=("Arial", 12),
width="20",
bg="#F7BE81",
height="2",
command=regresar,
).place(x=650, y=260)
Label(text="Lector del mes", font=("Arial", 12, "bold")).place(x=50, y=350)
Entry(
textvariable=self.lectorDelMes,
width="25",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
state="readonly",
).place(x=180, y=350)
Label(text="Libro mas solicitado", font=("Arial", 12, "bold")).place(
x=450, y=350
)
Entry(
textvariable=self.libroMasSolicitado,
width="30",
font=("Arial", 12),
highlightthickness=2,
highlightbackground="#000000",
highlightcolor="#000000",
state="readonly",
).place(x=620, y=350)
self.mostrarTabla(master)
self.cbxOperacion.current(0)
self.cbxLibro.configure(values=list(self.tabla.df["Nombre del Libro"]))
self.cbxLibro.current(0)
def mostrarTabla(self, master):
archivo = pd.read_excel("EstadoLibros.xlsx", sheet_name="Hoja1")
anchos = [5, 40, 20, 10, 10, 10]
fechas = []
self.tabla = Tabla(master, archivo, anchos, fechas, "#F5DA81", 50, 400)
def borrar(self, master):
if len(self.ultimaOperacion) == 0:
messagebox.showerror(title='ERROR', message='No hay registros anteriores para borrar')
return
excel = self.tabla.df
index = self.tabla.df.index[self.tabla.df["Nombre del Libro"] == self.ultimoLibro].tolist()[0]
valores = self.tabla.df[self.tabla.df["Nombre del Libro"] == self.ultimoLibro].values[0]
if self.ultimaOperacion == "Retiro":
valores[4] += 1
valores[3] -= 1
if valores[4] > 0:
valores[2] = 'Disponible'
historial = pd.read_excel("HistorialLibros.xlsx", sheet_name="Hoja1")
historial = historial[:-1]
historial.to_excel("HistorialLibros.xlsx", sheet_name="Hoja1", index=False)
else:
valores[3] += 1
valores[4] -= 1
if valores[4] == 0:
valores[2] = 'No Disponible'
excel.loc[index] = valores
excel.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
self.tabla.frm.destroy()
self.mostrarTabla(master)
self.hallarDatos()
self.ultimaOperacion = ""
self.ultimoLibro = ""
def aceptar(self, master):
nombre = self.nombre.get()
apellido = self.apellido.get()
operacion = self.cbxOperacion.get()
libro = self.cbxLibro.get()
excel = self.tabla.df
mensaje = ""
if len(nombre) == 0:
mensaje += "Debe ingresar el nombre del alumno\n"
if len(apellido) == 0:
mensaje += "Debe ingresar el apelldio del alumno\n"
if len(mensaje) > 0:
messagebox.showerror(title="ERROR", message=mensaje)
return
mensaje = ""
if nombreIncorrecto(nombre) is True:
mensaje += 'El nombre del alumno es incorrecto\n'
if nombreIncorrecto(apellido) is True:
mensaje += 'El apellido del alumno es incorrecto\n'
if len(mensaje) > 0:
messagebox.showerror(title='ERROR', message=mensaje)
return
if len(self.tabla.df[self.tabla.df["Nombre del Libro"] == libro]) > 0:
index = self.tabla.df.index[
self.tabla.df["Nombre del Libro"] == libro
].tolist()[0]
valores = self.tabla.df[self.tabla.df["Nombre del Libro"] == libro].values[
0
]
if operacion == "Retiro":
if valores[4] > 0:
valores[3] += 1
valores[4] -= 1
if valores[4] == 0:
valores[2] = 'No Disponible'
historial = pd.read_excel(
"HistorialLibros.xlsx", sheet_name="Hoja1"
)
n = len(list(historial.to_records(index=False))) + 1
codigo = generarCodigo(libro)
hoy = datetime.today()
entrega = timedelta(7)
datos = [
n,
nombre,
apellido,
libro,
codigo,
hoy.strftime("%d/%m/%y"),
datetime.date(hoy + entrega).strftime("%d/%m/%y"),
]
historial.loc[historial.shape[0]] = datos
historial.to_excel(
"HistorialLibros.xlsx", sheet_name="Hoja1", index=False
)
self.nombre.set("")
self.apellido.set("")
messagebox.showinfo(
title="RETIRO EXITOSO",
message="El libro ha sido retirado satisfactoriamente",
)
else:
messagebox.showerror(
title="ERROR", message="No quedan mas libros disponibles"
)
else:
if valores[4] < valores[5]:
valores[4] += 1
valores[3] -= 1
if valores[4] > 0:
valores[2] = 'Disponible'
self.nombre.set("")
self.apellido.set("")
messagebox.showinfo(
title="DEVOLUCION EXITOSA",
message="El libro ha sido devuelto satisfactoriamente",
)
else:
messagebox.showerror(
title="ERROR", message="No existen devoluciones pendientes"
)
self.ultimaOperacion = operacion
self.ultimoLibro = libro
excel.loc[index] = valores
excel.to_excel("EstadoLibros.xlsx", sheet_name="Hoja1", index=False)
self.tabla.frm.destroy()
self.mostrarTabla(master)
self.hallarDatos()
else:
messagebox.showerror(
title="ERROR", message="El libro que estas solicitando no existe"
)
class FrmRetirosDevoluciones:
def __init__(self, master, regresar):
self.cbxLibro = None
self.tabla = None
self.agregarComponentes(master, regresar)
def agregarComponentes(self, master, regresar):
Label(text="Libro", font=("Arial", 12, "bold")).place(x=50, y=40)
self.cbxLibro = ttk.Combobox(values=["a"], width=30, font=("Arial", 12))
self.cbxLibro.place(x=150, y=40)
Button(
text="Buscar",
font=("Arial", 12),
width="20",
bg="#6C3483",
height="2",
command=lambda: self.actualizarTabla(master),
).place(x=500, y=20)
Button(
text="Regresar",
font=("Arial", 12),
width="20",
bg="#6C3483",
height="2",
command=regresar,
).place(x=750, y=20)
excel = pd.read_excel("Libros.xlsx", sheet_name="Hoja1")
self.cbxLibro.configure(values=list(excel["Nombre del Libro"]))
def actualizarTabla(self, master):
if self.tabla != None:
self.tabla.frm.destroy()
libro = self.cbxLibro.get()
if len(libro) == 0:
messagebox.showerror(title='ERROR', message='Debe ingresar el nombre del libro que desea consultar')
return
excel = pd.read_excel("HistorialLibros.xlsx", sheet_name="Hoja1")
if len(excel[excel["Nombre del Libro"] == libro]) > 0:
filtrado = excel[excel["Nombre del Libro"] == libro]
anchos = [5, 15, 15, len(libro), 10, 13, 13]
fechas = []
self.tabla = Tabla(
master, filtrado, anchos, fechas, "#A569BD", 43 - len(libro), 100
)
else:
messagebox.showerror(title='ERROR', message='No existen registros del libro ingresado')
class BibliotecaEscolar:
def __init__(self):
self.root = tk.Tk()
self.root.title("Biblioteca Escolar")
screen_width = self.root.winfo_screenwidth()
screen_height = self.root.winfo_screenheight()
w = 1000
h = 600
x = (screen_width/2) - (500)
y = (screen_height/2) - (300)
self.root.geometry('%dx%d+%d+%d' % (w, h, x, y))
self.root.resizable(False, False)
self.agregarComponentes()
self.formulario = None
self.root.mainloop()
def regresar(self):
for widget in self.root.winfo_children():
widget.destroy()
self.agregarComponentes()
def limpiarFormulario(self, frm):
for widget in frm.winfo_children():
widget.destroy()
def agregarComponentes(self):
hoy = datetime.today().strftime("%d-%m-%y")
Label(
text="BIBLIOTECA ESCOLAR",
font=("Arial", 24, "bold"),
bg="#27AE60",
fg="white",
width="500",
height="2",
).pack()
Label(
text=hoy,
font=("Arial", 12),
bg="#82E0AA",
fg="black",
width="25",
height="1",
).pack()
Button(
text="Registrar Libro",
font=("Arial", 16),
width="20",
bg="#315E7A",
height="4",
fg="white",
command=self.abrirFrmRegistrar,
).place(x=150, y=230)
Button(
text="Solicitudes Libro",
font=("Arial", 16),
width="20",
bg="#DF7401",
height="4",
fg="white",
command=self.abrirFrmSolicitud,
).place(x=600, y=230)
Button(
text="Salir del programa",
font=("Arial", 16),
width="20",
bg="#A93226",
height="4",
fg="white",
command=self.cerrarPrograma,
).place(x=600, y=400)
Button(
text="Retiros y devoluciones",
font=("Arial", 16),
width="20",
bg="#5B2C6F",
height="4",
fg="white",
command=self.abrirFrmRetirosDevoluciones,
).place(x=150, y=400)
def abrirFrmRegistrar(self):
self.limpiarFormulario(self.root)
self.formulario = FrmIngresoDeLibros(self.root, self.regresar)
def abrirFrmSolicitud(self):
self.limpiarFormulario(self.root)
self.formulario = FrmRegistroEstudiante(self.root, self.regresar)
def abrirFrmRetirosDevoluciones(self):
self.limpiarFormulario(self.root)
self.formulario = FrmRetirosDevoluciones(self.root, self.regresar)
def cerrarPrograma(self):
self.root.destroy()
a = BibliotecaEscolar()
| Moisesmp75/TkinterForms | Trabajo2/Biblioteca.py | Biblioteca.py | py | 23,516 | python | es | code | 0 | github-code | 6 | [
{
"api_name": "tkinter.ttk.Frame",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 44,
"usage_type": "name"
},
{
"api_name": "tkinter.Entry",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "tkinter.Entry",
"line... |
15362206849 | from generator import Generator
from discriminator import Discriminator
from speaker_encoder import SPEncoder
import torch
import torch.nn.functional as F
import os
from os.path import join, basename, exists
import time
import datetime
import numpy as np
from tqdm import tqdm
import numpy as np
import copy
class Solver(object):
def __init__(self, train_loader, config):
"""Initialize configurations."""
self.train_loader = train_loader
self.sampling_rate = config.sampling_rate
self.D_name = config.discriminator
self.SPE_name = config.spenc
self.G_name = config.generator
self.g_hidden_size = config.g_hidden_size
self.num_speakers = config.num_speakers
self.spk_emb_dim = config.spk_emb_dim
self.lambda_rec = config.lambda_rec
self.lambda_id = config.lambda_id
self.lambda_adv = config.lambda_adv
self.batch_size = config.batch_size
self.num_iters = config.num_iters
self.g_lr = config.g_lr
self.d_lr = config.d_lr
self.beta1 = config.beta1
self.beta2 = config.beta2
self.resume_iters = config.resume_iters
self.use_ema = config.use_ema
self.auto_resume = config.auto_resume
self.kernel = config.kernel
self.num_heads = config.num_heads
self.num_res_blocks = config.num_res_blocks
self.use_tensorboard = config.use_tensorboard
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
self.log_dir = config.log_dir
self.model_save_dir = config.model_save_dir
self.log_step = config.log_step
self.sample_step = config.sample_step
self.model_save_step = config.model_save_step
self.build_model()
if self.use_tensorboard:
self.build_tensorboard()
def build_model(self):
"""Create a generator and a discriminator."""
self.generator = eval(self.G_name)(num_speakers=self.num_speakers,
kernel = self.kernel,
num_heads = self.num_heads,
num_res_blocks = self.num_res_blocks,
spk_emb_dim = self.spk_emb_dim,
)
self.discriminator = eval(self.D_name)(num_speakers=self.num_speakers)
self.sp_enc = eval(self.SPE_name)(num_speakers = self.num_speakers, spk_emb_dim = self.spk_emb_dim)
self.sp_enc.to(self.device)
self.generator.to(self.device)
self.discriminator.to(self.device)
g_params = list(self.generator.parameters())
g_params += list(self.sp_enc.parameters())
d_params = list(self.discriminator.parameters())
self.g_optimizer = torch.optim.Adam(g_params, self.g_lr, [self.beta1, self.beta2])
self.d_optimizer = torch.optim.Adam(d_params, self.d_lr, [self.beta1, self.beta2])
# restore model
if not self.auto_resume:
if self.resume_iters and not self.resume_ft:
print("resuming step %d ..."% self.resume_iters, flush=True)
self.restore_model(self.resume_iters)
else:
ckpt_files = [ int(x.split('-')[0]) for x in os.listdir(self.model_save_dir)]
last_step = sorted(ckpt_files, reverse = True)[0]
print("auto resuming step %d ..."% last_step, flush=True)
self.restore_model(last_step)
self.resume_iters = last_step
if self.use_ema:
self.generator_ema = copy.deepcopy(self.generator)
self.sp_enc_ema = copy.deepcopy(self.sp_enc)
self.print_network(self.generator, 'Generator')
self.print_network(self.discriminator, 'Discriminator')
self.print_network(self.sp_enc, 'SpeakerEncoder')
if self.use_ema:
self.generator_ema.to(self.device)
self.sp_enc_ema.to(self.device)
def print_network(self, model, name):
"""Print out the network information."""
num_params = 0
for p in model.parameters():
num_params += p.numel()
print(model, flush=True)
print(name,flush=True)
print("The number of parameters: {}".format(num_params), flush=True)
def moving_average(self, model, model_test, beta = 0.999):
for param, param_test in zip(model.parameters(), model_test.parameters()):
param_test.data = torch.lerp(param.data, param_test.data, beta)
def restore_model(self, resume_iters, resume_ft = False):
"""Restore the trained generator and discriminator."""
print('Loading the trained models from step {}...'.format(resume_iters), flush=True)
g_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(resume_iters))
d_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(resume_iters))
sp_path = os.path.join(self.model_save_dir, '{}-sp.ckpt'.format(resume_iters))
g_opt_path = os.path.join(self.model_save_dir, '{}-g_opt.ckpt'.format(resume_iters))
d_opt_path = os.path.join(self.model_save_dir, '{}-d_opt.ckpt'.format(resume_iters))
self.generator.load_state_dict(torch.load(g_path, map_location=lambda storage, loc: storage))
self.discriminator.load_state_dict(torch.load(d_path, map_location=lambda storage, loc: storage))
self.sp_enc.load_state_dict(torch.load(sp_path, map_location=lambda storage, loc: storage))
print("loading optimizer",flush=True)
if exists(g_opt_path):
self.g_optimizer.load_state_dict(torch.load(g_opt_path, map_location = lambda storage, loc: storage))
if exists(d_opt_path):
self.d_optimizer.load_state_dict(torch.load(d_opt_path, map_location = lambda storage, loc: storage))
def build_tensorboard(self):
"""Build a tensorboard logger."""
from logger import Logger
self.logger = Logger(self.log_dir)
def update_lr(self, g_lr, d_lr):
"""Decay learning rates of the generator and discriminator."""
for param_group in self.g_optimizer.param_groups:
param_group['lr'] = g_lr
for param_group in self.d_optimizer.param_groups:
param_group['lr'] = d_lr
def reset_grad(self):
"""Reset the gradientgradient buffers."""
self.g_optimizer.zero_grad()
self.d_optimizer.zero_grad()
def label2onehot(self, labels, dim):
"""Convert label indices to one-hot vectors."""
batch_size = labels.size(0)
out = torch.zeros(batch_size, dim)
out[np.arange(batch_size), labels.long()] = 1
return out
def sample_spk_c(self, size):
spk_c = np.random.randint(0, self.num_speakers, size=size)
spk_c_cat = to_categorical(spk_c, self.num_speakers)
return torch.LongTensor(spk_c), torch.FloatTensor(spk_c_cat)
def classification_loss(self, logit, target):
"""Compute softmax cross entropy loss."""
return F.cross_entropy(logit, target)
def load_wav(self, wavfile, sr=16000):
wav, _ = librosa.load(wavfile, sr=sr, mono=True)
return wav_padding(wav, sr=16000, frame_period=5, multiple = 4)
def load_mel(self, melfile):
tmp_mel = np.load(melfile)
return tmp_mel
def train(self):
# Set data loader.
train_loader = self.train_loader
data_iter = iter(train_loader)
g_lr = self.g_lr
d_lr = self.d_lr
start_iters = 0
if self.resume_iters:
start_iters = self.resume_iters
print('Start training...', flush=True)
start_time = time.time()
for i in range(start_iters, self.num_iters):
try:
mc_src, spk_label_org, spk_c_org, mc_trg, spk_label_trg, spk_c_trg = next(data_iter)
except:
data_iter = iter(train_loader)
mc_src, spk_label_org, spk_c_org, mc_trg, spk_label_trg, spk_c_trg = next(data_iter)
mc_src.unsqueeze_(1)
mc_trg.unsqueeze_(1)
mc_src = mc_src.to(self.device)
mc_trg = mc_trg.to(self.device)
spk_label_org = spk_label_org.to(self.device)
spk_c_org = spk_c_org.to(self.device)
spk_label_trg = spk_label_trg.to(self.device)
spk_c_trg = spk_c_trg.to(self.device)
spk_c_trg = self.sp_enc(mc_trg, spk_label_trg)
spk_c_org = self.sp_enc(mc_src, spk_label_org)
d_out_src = self.discriminator(mc_src, spk_label_trg, spk_label_org)
d_loss_real = torch.mean( (1.0 - d_out_src)**2 )
mc_fake = self.generator(mc_src, spk_c_org, spk_c_trg)
d_out_fake = self.discriminator(mc_fake.detach(), spk_label_org, spk_label_trg)
d_loss_fake = torch.mean(d_out_fake ** 2)
# Backward and optimize.
d_loss = d_loss_real + d_loss_fake
self.reset_grad()
d_loss.backward()
self.d_optimizer.step()
# Logging.
loss = {}
loss['D/loss_real'] = d_loss_real.item()
loss['D/loss_fake'] = d_loss_fake.item()
loss['D/loss'] = d_loss.item()
spk_c_trg = self.sp_enc(mc_trg, spk_label_trg)
spk_c_org = self.sp_enc(mc_src, spk_label_org)
mc_fake = self.generator(mc_src, spk_c_org, spk_c_trg)
g_out_src = self.discriminator(mc_fake, spk_label_org, spk_label_trg)
g_loss_fake = torch.mean((1.0 - g_out_src)**2)
mc_reconst = self.generator(mc_fake, spk_c_trg, spk_c_org)
g_loss_rec = torch.mean(torch.abs(mc_src - mc_reconst))
mc_fake_id = self.generator(mc_src, spk_c_org, spk_c_org)
g_loss_id = torch.mean(torch.abs(mc_src - mc_fake_id))
# Backward and optimize.
g_loss = self.lambda_adv * g_loss_fake \
+ self.lambda_rec * g_loss_rec \
+ self.lambda_id * g_loss_id
self.reset_grad()
g_loss.backward()
self.g_optimizer.step()
# Logging.
loss['G/loss_fake'] = g_loss_fake.item()
loss['G/loss_rec'] = g_loss_rec.item()
loss['G/loss_id'] = g_loss_id.item()
if self.use_ema:
self.moving_average(self.generator, self.generator_ema)
self.moving_average(self.sp_enc, self.sp_enc_ema)
if (i+1) % self.log_step == 0:
et = time.time() - start_time
et = str(datetime.timedelta(seconds=et))[:-7]
log = "Elapsed [{}], Iteration [{}/{}]".format(et, i+1, self.num_iters)
for tag, value in loss.items():
log += ", {}: {:.4f}".format(tag, value)
print(log, flush=True)
if self.use_tensorboard:
for tag, value in loss.items():
self.logger.scalar_summary(tag, value, i+1)
if (i+1) % self.model_save_step == 0:
g_path = os.path.join(self.model_save_dir, '{}-G.ckpt'.format(i+1))
g_path_ema = os.path.join(self.model_save_dir, '{}-G.ckpt.ema'.format(i+1))
d_path = os.path.join(self.model_save_dir, '{}-D.ckpt'.format(i+1))
sp_path = os.path.join(self.model_save_dir, '{}-sp.ckpt'.format(i+1))
sp_path_ema = os.path.join(self.model_save_dir, '{}-sp.ckpt.ema'.format(i+1))
g_opt_path = os.path.join(self.model_save_dir, '{}-g_opt.ckpt'.format(i+1))
d_opt_path = os.path.join(self.model_save_dir, '{}-d_opt.ckpt'.format(i+1))
torch.save(self.generator.state_dict(), g_path)
if self.use_ema:
torch.save(self.generator_ema.state_dict(), g_path_ema)
torch.save(self.discriminator.state_dict(), d_path)
torch.save(self.sp_enc.state_dict(), sp_path)
if self.use_ema:
torch.save(self.sp_enc_ema.state_dict(), sp_path_ema)
torch.save(self.g_optimizer.state_dict(), g_opt_path)
torch.save(self.d_optimizer.state_dict(), d_opt_path)
print('Saved model checkpoints into {}...'.format(self.model_save_dir), flush=True)
| Mortyzhou-Shef-BIT/DYGANVC | solver.py | solver.py | py | 12,824 | python | en | code | null | github-code | 6 | [
{
"api_name": "torch.device",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "torch.optim.Adam... |
17536523132 | import pystan
import stan_utility
import matplotlib
import matplotlib.pyplot as plot
##################################################
##### Simulate data and write to file
##################################################
model = stan_utility.compile_model('gen_data.stan')
fit = model.sampling(seed=194838, algorithm='Fixed_param', iter=1, chains=1,n_jobs=1)
data = dict(N = 25, M = 3,
X=fit.extract()['X'][0,:,:], y = fit.extract()['y'][0,:])
pystan.stan_rdump(data, 'lin_regr.data.R')
##################################################
##### Fit model and check diagnostics
##################################################
# Read in data from Rdump file
data = pystan.read_rdump('lin_regr.data.R')
# Fit posterior with Stan
model = stan_utility.compile_model('lin_regr.stan')
fit = model.sampling(data=data, seed=194838,n_jobs=1)
# Check sampler diagnostics
print(fit)
sampler_params = fit.get_sampler_params(inc_warmup=False)
stan_utility.check_div(sampler_params)
stan_utility.check_treedepth(sampler_params)
stan_utility.check_energy(sampler_params)
# Check visual diagnostics
fit.plot()
plot.show()
##################################################
##### Visualize posterior
##################################################
light="#DCBCBC"
light_highlight="#C79999"
mid="#B97C7C"
mid_highlight="#A25050"
dark="#8F2727"
dark_highlight="#7C0000"
# Plot parameter posteriors
params = fit.extract()
f, axarr = plot.subplots(2, 3)
for a in axarr[0,:]:
a.xaxis.set_ticks_position('bottom')
a.yaxis.set_ticks_position('none')
for a in axarr[1,:]:
a.xaxis.set_ticks_position('bottom')
a.yaxis.set_ticks_position('none')
axarr[0, 0].set_title("beta_1")
axarr[0, 0].hist(params['beta'][:,0], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 0].axvline(x=5, linewidth=2, color=light)
axarr[0, 1].set_title("beta_2")
axarr[0, 1].hist(params['beta'][:,1], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 1].axvline(x=-3, linewidth=2, color=light)
axarr[0, 2].set_title("beta_3")
axarr[0, 2].hist(params['beta'][:,2], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 2].axvline(x=2, linewidth=2, color=light)
axarr[1, 0].set_title("alpha")
axarr[1, 0].hist(params['alpha'], bins = 25, color = dark, ec = dark_highlight)
axarr[1, 0].axvline(x=10, linewidth=2, color=light)
axarr[1, 1].set_title("sigma")
axarr[1, 1].hist(params['sigma'], bins = 25, color = dark, ec = dark_highlight)
axarr[1, 1].axvline(x=1, linewidth=2, color=light)
plot.show()
# Perform a posterior predictive check by plotting
# posterior predictive distributions against data
f, axarr = plot.subplots(2, 2)
for a in axarr[0,:]:
a.xaxis.set_ticks_position('bottom')
a.yaxis.set_ticks_position('none')
for a in axarr[1,:]:
a.xaxis.set_ticks_position('bottom')
a.yaxis.set_ticks_position('none')
axarr[0, 0].set_title("y_1")
axarr[0, 0].hist(params['y_ppc'][:,0], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 0].axvline(x=data['y'][0], linewidth=2, color=light)
axarr[0, 1].set_title("y_5")
axarr[0, 1].hist(params['y_ppc'][:,4], bins = 25, color = dark, ec = dark_highlight)
axarr[0, 1].axvline(x=data['y'][4], linewidth=2, color=light)
axarr[1, 0].set_title("y_10")
axarr[1, 0].hist(params['y_ppc'][:,9], bins = 25, color = dark, ec = dark_highlight)
axarr[1, 0].axvline(x=data['y'][9], linewidth=2, color=light)
axarr[1, 1].set_title("y_15")
axarr[1, 1].hist(params['y_ppc'][:,14], bins = 25, color = dark, ec = dark_highlight)
axarr[1, 1].axvline(x=data['y'][14], linewidth=2, color=light)
plot.show()
| MiyainNYC/Rose | stan/wimlds/1/lin_regr.py | lin_regr.py | py | 3,574 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "stan_utility.compile_model",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pystan.stan_rdump",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pystan.read_rdump",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "stan_u... |
16351053586 | from bs4 import BeautifulSoup as bs
import requests
from cardBeta import CardBeta
from cardWitj import CardWitj
urls = {
'beta':
'https://beta.gouv.fr/recrutement/developpement?',
'witj':
'https://www.welcometothejungle.com/fr/companies/communaute-beta-gouv/jobs'
}
divs = {'beta': 'fr-card__body', 'witj': 'sc-1peil1v-4'}
class Crawler:
"""Crawler class"""
def __init__(self, type):
self.type = type
self.stack = { 'total' : 0 }
def run(self):
print('... start crawl ' + self.type)
response = requests.get(urls[self.type])
html = response.content
soup = bs(html, "lxml")
if hasattr(self, f'crawl_{self.type}'):
getattr(self, f'crawl_{self.type}')(soup)
def crawl_witj(self, soup):
myCards = []
print(' title : ' + soup.title.get_text())
cards = soup.find_all("div", class_=divs[self.type])
print(' total found : {}'.format(len(cards)))
for data in cards:
myCard = CardWitj(data)
myCards.append(myCard)
print(' >>> loop myCards')
for card in myCards:
result = card.loadPage()
for key in result:
if key in self.stack :
self.stack[key] += 1
self.stack['total'] += 1
else :
self.stack[key] = 1
print(' resume stack ::::')
for key in self.stack:
print(' tech : {} : {}'.format(key, self.stack[key]))
def crawl_beta(self, soup):
myCards = []
print(' title : ' + soup.title.get_text())
cards = soup.find_all("div", class_=divs[self.type])
print(' total found : {}'.format(len(cards)))
for data in cards:
myCard = CardBeta(data)
myCards.append(myCard)
print(' >>> loop myCards')
for card in myCards:
card.loadPage()
| apimobi/witj-beta-replit | crawler.py | crawler.py | py | 1,963 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "requests.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "cardWitj.CardWitj",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "cardBeta.CardBeta",
... |
25070502975 | import pydoc
import logging
from typing import Generic, Type, Optional, Union, TypeVar, Any, NamedTuple
from django.db import models
from django.conf import settings
from django.forms.models import model_to_dict
from rest_framework import serializers
logger = logging.getLogger(__name__)
T = TypeVar("T")
class AbstractSerializer:
def create(self, validated_data, **kwargs):
super().create(validated_data)
def update(self, instance, validated_data, **kwargs):
super().update(instance, validated_data, **kwargs)
class Context(NamedTuple):
user: Any
org: Any = None
def __getitem__(self, item):
return getattr(self, item)
class Serializer(serializers.Serializer, AbstractSerializer):
pass
class ModelSerializer(serializers.ModelSerializer, AbstractSerializer):
def create(self, data: dict, **kwargs):
return self.Meta.model.objects.create(**data)
def update(self, instance, data: dict, **kwargs):
for name, value in data.items():
if name != "created_by":
setattr(instance, name, value)
instance.save()
return instance
"""
Custom serializer utilities functions
"""
def PaginatedResult(serializer_name: str, content_serializer: Type[Serializer]):
return type(
serializer_name,
(Serializer,),
dict(
next=serializers.URLField(
required=False, allow_blank=True, allow_null=True
),
previous=serializers.URLField(
required=False, allow_blank=True, allow_null=True
),
results=content_serializer(many=True),
),
)
class _SerializerDecoratorInitializer(Generic[T]):
def __getitem__(self, serializer_type: Type[Serializer]):
class Decorator:
def __init__(self, instance=None, data: Union[str, dict] = None, **kwargs):
self._instance = instance
if data is None and instance is None:
self._serializer = None
else:
self._serializer: serializer_type = (
serializer_type(data=data, **kwargs)
if instance is None
else serializer_type(
instance, data=data, **{**kwargs, "partial": True}
)
)
self._serializer.is_valid(raise_exception=True)
@property
def data(self) -> Optional[dict]:
return (
self._serializer.validated_data
if self._serializer is not None
else None
)
@property
def instance(self):
return self._instance
def save(self, **kwargs) -> "Decorator":
if self._serializer is not None:
self._instance = self._serializer.save(**kwargs)
return self
return Decorator
SerializerDecorator = _SerializerDecoratorInitializer()
def owned_model_serializer(serializer: Type[Serializer]):
class MetaSerializer(serializer):
def __init__(self, *args, **kwargs):
if "context" in kwargs:
context = kwargs.get("context") or {}
user = (
context.get("user") if isinstance(context, dict) else context.user
)
org = context.get("org") if isinstance(context, dict) else context.org
if settings.MULTI_ORGANIZATIONS and org is None:
import purplship.server.orgs.models as orgs
org = orgs.Organization.objects.filter(
users__id=getattr(user, "id", None)
).first()
self.__context: Context = Context(user, org)
else:
self.__context: Context = getattr(self, "__context", None)
kwargs.update({"context": self.__context})
super().__init__(*args, **kwargs)
def create(self, data: dict, **kwargs):
payload = {"created_by": self.__context.user, **data}
try:
instance = super().create(payload, context=self.__context)
link_org(instance, self.__context) # Link to organization if supported
except Exception as e:
logger.exception(e)
raise e
return instance
def update(self, instance, data: dict, **kwargs):
payload = {k: v for k, v in data.items()}
return super().update(instance, payload, context=self.__context)
return type(serializer.__name__, (MetaSerializer,), {})
def link_org(entity: ModelSerializer, context: Context):
if hasattr(entity, "org") and context.org is not None and not entity.org.exists():
entity.link = entity.__class__.link.related.related_model.objects.create(
org=context.org, item=entity
)
entity.save(
update_fields=(["created_at"] if hasattr(entity, "created_at") else [])
)
def save_many_to_many_data(
name: str,
serializer: ModelSerializer,
parent: models.Model,
payload: dict = None,
**kwargs,
):
if not any((key in payload for key in [name])):
return None
collection_data = payload.get(name)
collection = getattr(parent, name)
if collection_data is None and any(collection.all()):
for item in collection.all():
item.delete()
for data in collection_data:
item_instance = (
collection.filter(id=data.pop("id")).first() if "id" in data else None
)
if item_instance is None:
item = SerializerDecorator[serializer](data=data, **kwargs).save().instance
else:
item = (
SerializerDecorator[serializer](
instance=item_instance, data=data, **{**kwargs, "partial": True}
)
.save()
.instance
)
getattr(parent, name).add(item)
def save_one_to_one_data(
name: str,
serializer: ModelSerializer,
parent: models.Model = None,
payload: dict = None,
**kwargs,
):
if name not in payload:
return None
data = payload.get(name)
instance = getattr(parent, name, None)
if data is None and instance is not None:
instance.delete()
setattr(parent, name, None)
if instance is None:
new_instance = (
SerializerDecorator[serializer](data=data, **kwargs).save().instance
)
parent and setattr(parent, name, new_instance)
return new_instance
return (
SerializerDecorator[serializer](
instance=instance, data=data, partial=True, **kwargs
)
.save()
.instance
)
def allow_model_id(model_paths: []):
def _decorator(serializer: Type[Serializer]):
class ModelIdSerializer(serializer):
def __init__(self, *args, **kwargs):
for param, model_path in model_paths:
content = kwargs.get("data", {}).get(param)
values = content if isinstance(content, list) else [content]
model = pydoc.locate(model_path)
if any([isinstance(val, str) for val in values]):
new_content = []
for value in values:
if isinstance(value, str) and (model is not None):
data = model_to_dict(model.objects.get(pk=value))
("id" in data) and data.pop("id")
new_content.append(data)
kwargs.update(
data={
**kwargs["data"],
param: new_content
if isinstance(content, list)
else next(iter(new_content)),
}
)
super().__init__(*args, **kwargs)
return type(serializer.__name__, (ModelIdSerializer,), {})
return _decorator
def make_fields_optional(serializer: Type[ModelSerializer]):
_name = f"Partial{serializer.__name__}"
class _Meta(serializer.Meta):
extra_kwargs = {
**getattr(serializer.Meta, "extra_kwargs", {}),
**{
field.name: {"required": False}
for field in serializer.Meta.model._meta.fields
},
}
return type(_name, (serializer,), dict(Meta=_Meta))
def exclude_id_field(serializer: Type[ModelSerializer]):
class _Meta(serializer.Meta):
exclude = [*getattr(serializer.Meta, "exclude", []), "id"]
return type(serializer.__name__, (serializer,), dict(Meta=_Meta))
| danh91/purplship | server/modules/core/purplship/server/serializers/abstract.py | abstract.py | py | 8,956 | python | en | code | null | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.TypeVar",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "typing.NamedTuple",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"l... |
5769042811 | import hashlib
import json
import os
import pathlib
import shutil
import subprocess
from typing import Mapping, Any, List
class RunException(Exception):
pass
class ExecuteException(Exception):
pass
class style:
reset = 0
bold = 1
dim = 2
italic = 3
underline = 4
blink = 5
rblink = 6
reversed = 7
conceal = 8
crossed = 9
class fg:
black = 30
red = 31
green = 32
yellow = 33
blue = 34
magenta = 35
cyan = 36
gray = 37
reset = 39
def color(value):
return "\033[" + str(int(value)) + "m";
def print_check():
print("%s✓ %s" % (color(fg.green)+color(style.bold),
color(fg.reset)+color(style.reset)))
def bname(base, cmd, filename):
hstring = cmd
if filename:
hstring += filename
h = hashlib.sha224(hstring.encode()).hexdigest()[:7]
if filename:
bname = os.path.basename(filename)
bname, _ = os.path.splitext(bname)
return "%s-%s-%s" % (base, bname, h)
else:
return "%s-%s" % (base, h)
def _compare_eq_dict(
left: Mapping[Any, Any], right: Mapping[Any, Any], verbose: int = 0
) -> List[str]:
explanation = [] # type: List[str]
set_left = set(left)
set_right = set(right)
common = set_left.intersection(set_right)
same = {k: left[k] for k in common if left[k] == right[k]}
if same and verbose < 2:
explanation += ["Omitting %s identical items" % len(same)]
elif same:
explanation += ["Common items:"]
explanation += pprint.pformat(same).splitlines()
diff = {k for k in common if left[k] != right[k]}
if diff:
explanation += ["Differing items:"]
for k in diff:
explanation += [repr({k: left[k]}) + " != " + repr({k: right[k]})]
extra_left = set_left - set_right
len_extra_left = len(extra_left)
if len_extra_left:
explanation.append(
"Left contains %d more item%s:"
% (len_extra_left, "" if len_extra_left == 1 else "s")
)
explanation.extend(
pprint.pformat({k: left[k] for k in extra_left}).splitlines()
)
extra_right = set_right - set_left
len_extra_right = len(extra_right)
if len_extra_right:
explanation.append(
"Right contains %d more item%s:"
% (len_extra_right, "" if len_extra_right == 1 else "s")
)
explanation.extend(
pprint.pformat({k: right[k] for k in extra_right}).splitlines()
)
return explanation
def fixdir(s):
local_dir = os.getcwd()
return s.replace(local_dir.encode(), "$DIR".encode())
def run(basename, cmd, out_dir, infile=None, extra_args=None):
"""
Runs the `cmd` and collects stdout, stderr, exit code.
The stdout, stderr and outfile are saved in the `out_dir` directory and
all metadata is saved in a json file, whose path is returned from the
function.
The idea is to use this function to test the compiler by running it with
an option to save the AST, ASR or LLVM IR or binary, and then ensure that
the output does not change.
Arguments:
basename ... name of the run
cmd ........ command to run, can use {infile} and {outfile}
out_dir .... output directory to store output
infile ..... optional input file. If present, it will check that it exists
and hash it.
extra_args . extra arguments, not part of the hash
Examples:
>>> run("cat2", "cat tests/cat.txt > {outfile}", "output", "tests/cat.txt")
>>> run("ls4", "ls --wrong-option", "output")
"""
assert basename is not None and basename != ""
pathlib.Path(out_dir).mkdir(parents=True, exist_ok=True)
if infile and not os.path.exists(infile):
raise RunException("The input file does not exist")
outfile = os.path.join(out_dir, basename + "." + "out")
cmd2 = cmd.format(infile=infile, outfile=outfile)
if extra_args:
cmd2 += " " + extra_args
r = subprocess.run(cmd2, shell=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if not os.path.exists(outfile):
outfile = None
if len(r.stdout):
stdout_file = os.path.join(out_dir, basename + "." + "stdout")
open(stdout_file, "wb").write(fixdir(r.stdout))
else:
stdout_file = None
if len(r.stderr):
stderr_file = os.path.join(out_dir, basename + "." + "stderr")
open(stderr_file, "wb").write(fixdir(r.stderr))
else:
stderr_file = None
if infile:
infile_hash = hashlib.sha224(open(infile, "rb").read()).hexdigest()
else:
infile_hash = None
if outfile:
outfile_hash = hashlib.sha224(open(outfile, "rb").read()).hexdigest()
outfile = os.path.basename(outfile)
else:
outfile_hash = None
if stdout_file:
stdout_hash = hashlib.sha224(open(stdout_file, "rb").read()).hexdigest()
stdout_file = os.path.basename(stdout_file)
else:
stdout_hash = None
if stderr_file:
stderr_hash = hashlib.sha224(open(stderr_file, "rb").read()).hexdigest()
stderr_file = os.path.basename(stderr_file)
else:
stderr_hash = None
data = {
"basename": basename,
"cmd": cmd,
"infile": infile,
"infile_hash": infile_hash,
"outfile": outfile,
"outfile_hash": outfile_hash,
"stdout": stdout_file,
"stdout_hash": stdout_hash,
"stderr": stderr_file,
"stderr_hash": stderr_hash,
"returncode": r.returncode,
}
json_file = os.path.join(out_dir, basename + "." + "json")
json.dump(data, open(json_file, "w"), indent=4)
return json_file
def run_test(basename, cmd, infile=None, update_reference=False,
extra_args=None):
"""
Runs the test `cmd` and compare against reference results.
The `cmd` is executed via `run` (passing in `basename` and `infile`) and
the output is saved in the `output` directory. The generated json file is
then compared against reference results and if it differs, the
RunException is thrown.
Arguments:
basename ........... name of the run
cmd ................ command to run, can use {infile} and {outfile}
infile ............. optional input file. If present, it will check that
it exists and hash it.
update_reference ... if True, it will copy the output into the reference
directory as reference results, overwriting old ones
extra_args ......... Extra arguments to append to the command that are not
part of the hash
Examples:
>>> run_test("cat12", "cat {infile} > {outfile}", "cat.txt",
... update_reference=True)
>>> run_test("cat12", "cat {infile} > {outfile}", "cat.txt")
"""
s = " * %-6s " % basename
print(s, end="")
basename = bname(basename, cmd, infile)
if infile:
infile = os.path.join("tests", infile)
jo = run(basename, cmd, os.path.join("tests", "output"), infile=infile,
extra_args=extra_args)
jr = os.path.join("tests", "reference", os.path.basename(jo))
do = json.load(open(jo))
if update_reference:
shutil.copyfile(jo, jr)
for f in ["outfile", "stdout", "stderr"]:
if do[f]:
f_o = os.path.join(os.path.dirname(jo), do[f])
f_r = os.path.join(os.path.dirname(jr), do[f])
shutil.copyfile(f_o, f_r)
return
if not os.path.exists(jr):
raise RunException("The reference json file '%s' does not exist" % jr)
dr = json.load(open(jr))
if do != dr:
e = _compare_eq_dict(do, dr)
print("The JSON metadata differs against reference results")
print("Reference JSON:", jr)
print("Output JSON: ", jo)
print("\n".join(e))
if do["outfile_hash"] != dr["outfile_hash"]:
if do["outfile_hash"] is not None and dr["outfile_hash"] is not None:
fo = os.path.join("tests", "output", do["outfile"])
fr = os.path.join("tests", "reference", dr["outfile"])
if os.path.exists(fr):
print("Diff against: %s" % fr)
os.system("diff %s %s" % (fr, fo))
else:
print("Reference file '%s' does not exist" % fr)
if do["stdout_hash"] != dr["stdout_hash"]:
if do["stdout_hash"] is not None and dr["stdout_hash"] is not None:
fo = os.path.join("tests", "output", do["stdout"])
fr = os.path.join("tests", "reference", dr["stdout"])
if os.path.exists(fr):
print("Diff against: %s" % fr)
os.system("diff %s %s" % (fr, fo))
else:
print("Reference file '%s' does not exist" % fr)
if do["stderr_hash"] != dr["stderr_hash"]:
if do["stderr_hash"] is not None and dr["stderr_hash"] is not None:
fo = os.path.join("tests", "output", do["stderr"])
fr = os.path.join("tests", "reference", dr["stderr"])
if os.path.exists(fr):
print("Diff against: %s" % fr)
os.system("diff %s %s" % (fr, fo))
else:
print("Reference file '%s' does not exist" % fr)
elif do["stderr_hash"] is not None and dr["stderr_hash"] is None:
fo = os.path.join("tests", "output", do["stderr"])
print("No reference stderr output exists. Stderr:")
os.system("cat %s" % fo)
raise RunException("The reference result differs")
print_check()
| Abdullahjavednesar/lpython | compiler_tester/tester.py | tester.py | py | 9,744 | python | en | code | null | github-code | 6 | [
{
"api_name": "hashlib.sha224",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 52,
"usage_type": "attribute"
},
{
"api_name": "os.path.splitext",
"... |
14956977226 | import argparse
import os
from scipy.interpolate import griddata
import numpy as np
from tqdm import tqdm
import cv2
import scipy.ndimage as sp
import matplotlib.pyplot as plt
from matplotlib import cm, patches
# Argument Parser
parser = argparse.ArgumentParser(description="Time-series Heatmap Generator")
parser.add_argument(
"--dataset_path",
type=str,
default="./data/dataset_08-12-2023_05-02-59",
help="Folder containing time-series data",
)
args = parser.parse_args()
# Data Load
dataset_path = args.dataset_path
data_file_path = os.path.join(dataset_path, "timeseries.txt")
data = np.loadtxt(data_file_path)
# Split Data
pos = data[:, :3]
force = data[:, -3:]
Frms = np.sqrt(np.sum(force**2, axis=1))
# Video Writer Setup
# fourcc = cv2.VideoWriter_fourcc(*"mp4v")
# out = cv2.VideoWriter("heatmap_video.mp4", fourcc, 20.0, (640, 480))
# Gaussian Smoothing
# Circle Data
point1 = [0.61346058, 0.07027999, 0.05241557] # magnet
radius1 = 0.01732 / 2
point2 = [0.60665408, 0.09511717, 0.05193599] # 3d print
radius2 = 0.005
pos_x = pos[Frms > 5, 1]
pos_y = pos[Frms > 5, 0]
pos_z = pos[Frms > 5, 2]
print("pos_y", pos_y.std())
print("pos_x", pos_x.std())
x_min, x_max = np.min(pos_x), np.max(pos_x)
y_min, y_max = np.min(pos_y), np.max(pos_y)
dim_x = 30
dim_y = 30
# Frms = Frms[pos[:, 2] < 0.055]
pos_palp = pos[pos[:, 2] < 0.06]
plt.axis("equal")
x = np.linspace(x_min, x_max, dim_x)
y = np.linspace(y_min, y_max, dim_y)
X, Y = np.meshgrid(x, y)
# Interpolate (x,y,z) points [mat] over a normal (x,y) grid [X,Y]
# Depending on your "error", you may be able to use other methods
Z = griddata((pos_x, pos_y), pos_z, (X, Y), method="nearest")
plt.pcolormesh(X, Y, Z)
# plt.scatter(pos_palp[:, 1], pos_palp[:, 0], marker="x")
# Add circles
circle1 = patches.Circle(
(point1[1], point1[0]),
radius1,
fill=False,
color="blue",
)
circle2 = patches.Circle(
(point2[1], point2[0]),
radius2,
fill=False,
color="green",
)
# plt.gca().add_patch(circle1)
# plt.gca().add_patch(circle2)
plt.title("Heatmap with smoothing")
plt.xlabel("Y (m)")
plt.ylabel("X (m)")
cbar = plt.colorbar()
cbar.set_label("Z (m)", rotation=270, labelpad=15)
plt.draw()
# Convert to OpenCV
fig = plt.gcf()
fig.canvas.draw()
img_arr = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
img_arr = img_arr.reshape(fig.canvas.get_width_height()[::-1] + (3,))
img_arr = cv2.cvtColor(img_arr, cv2.COLOR_RGB2BGR)
dataset_name = dataset_path.split("/")[-1]
cv2.imwrite(f"{dataset_path}/{dataset_name}_2d_heatmap.png", img_arr)
| raghavauppuluri13/robot-palpation | rpal/scripts/visualize_heatmap.py | visualize_heatmap.py | py | 2,568 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "numpy.loadtxt",
... |
26113397145 | __authors__ = ["T. Vincent"]
__license__ = "MIT"
__date__ = "08/09/2017"
import weakref
from silx.gui import qt
from silx.gui.icons import getQIcon
from .. import actions
class ViewpointToolButton(qt.QToolButton):
"""A toolbutton with a drop-down list of ways to reset the viewpoint.
:param parent: See :class:`QToolButton`
"""
def __init__(self, parent=None):
super(ViewpointToolButton, self).__init__(parent)
self._plot3DRef = None
menu = qt.QMenu(self)
menu.addAction(actions.viewpoint.FrontViewpointAction(parent=self))
menu.addAction(actions.viewpoint.BackViewpointAction(parent=self))
menu.addAction(actions.viewpoint.TopViewpointAction(parent=self))
menu.addAction(actions.viewpoint.BottomViewpointAction(parent=self))
menu.addAction(actions.viewpoint.RightViewpointAction(parent=self))
menu.addAction(actions.viewpoint.LeftViewpointAction(parent=self))
menu.addAction(actions.viewpoint.SideViewpointAction(parent=self))
self.setMenu(menu)
self.setPopupMode(qt.QToolButton.InstantPopup)
self.setIcon(getQIcon('cube'))
self.setToolTip('Reset the viewpoint to a defined position')
def setPlot3DWidget(self, widget):
"""Set the Plot3DWidget this toolbar is associated with
:param ~silx.gui.plot3d.Plot3DWidget.Plot3DWidget widget:
The widget to control
"""
self._plot3DRef = None if widget is None else weakref.ref(widget)
for action in self.menu().actions():
action.setPlot3DWidget(widget)
def getPlot3DWidget(self):
"""Return the Plot3DWidget associated to this toolbar.
If no widget is associated, it returns None.
:rtype: ~silx.gui.plot3d.Plot3DWidget.Plot3DWidget or None
"""
return None if self._plot3DRef is None else self._plot3DRef()
| silx-kit/silx | src/silx/gui/plot3d/tools/ViewpointTools.py | ViewpointTools.py | py | 1,903 | python | en | code | 106 | github-code | 6 | [
{
"api_name": "silx.gui.qt.QToolButton",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "silx.gui.qt",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "silx.gui.qt.QMenu",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "silx.gui.q... |
31108358568 | import tushare as ts
import pandas as pd
#当列太多时,显示不换行
pd.set_option('expand_frame_repr',False)
#显示所有的列
pd.set_option('display.max_columns', None)
'''
Created on 2020年12月24日
@author: My
'''
ts.set_token('b869861b624139897d87db589b6782ca0313e0e9378b2dd73a4baff5')
pro=ts.pro_api()
#data = pro.stock_basic(exchange='', list_status='L', fields='ts_code,symbol,name,area,industry,list_date')
"""stock='300001.SZ'
df=pro.daily(ts_code=stock,
start_date='20091001',
end_date='20161214')
df.rename(columns={'trade_date':'date'},inplace=True)
print(df)
df.to_csv('./data/日行情_特锐德_tushare.csv',
encoding='gbk',
index=False)"""
df=pd.read_csv('./data/日行情_特锐德_tushare.csv',encoding='gbk')
df.sort_values(by=['date',],inplace=True)
df['pct_chg']=df['pct_chg']/100.0
df['pct_chg_2']=df['close'].pct_change()
print(df[abs(df['pct_chg_2']-df['pct_chg'])>0.0001])
del df['pct_chg_2']
df['factor']=(df['pct_chg']+1).cumprod()
#print(df)
initi_price=df.iloc[0]['close']/df['factor'].iloc[0]
#print(initi_price)
df['close_post']=initi_price*df['factor']
#print(df)
initi_price_pre=df.iloc[-1]['close']/df['factor'].iloc[-1]
df['close_pre']=initi_price_pre*df['factor']
#print(df)
#df.sort_values(by=['date'],inplace=True)
print(df)
| geekzhp/zhpLiangHua | tmp/tushareStudy.py | tushareStudy.py | py | 1,356 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.set_option",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tushare.set_token",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tushare.pro_api",... |
41969655941 | import cv2 as cv
src = cv.imread("./img_input/266679.png") #读取图片
# 新建一个窗口并展示
cv.namedWindow("input image", cv.WINDOW_AUTOSIZE)
cv.imshow("input image", src)
cv.waitKey(0)
cv.destroyAllWindows()
print("hello") | RMVision/study-opencv | chapter01/test.py | test.py | py | 237 | python | zh | code | 1 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "cv2.namedWindow",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.WINDOW_AUTOSIZE",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "cv2.imshow",
"li... |
3709328599 | import os
from cloudservice import add_file, add_dir, get_dir_subs, get_root_dir_id
from pathlib import Path
import pandas as pd
def test():
uploadfile(os.path.join('我文件夹', 'test1.docx'), dirid=39, projid=36)
print()
def create_dir_test():
add_dir('addsub', 39, 36)
def uploadfile(fpath, dirid, projid):
# fpath = os.path.join(config.batch_file_upload_root, relative_fpath)
fdir, fname = os.path.split(fpath)
ftype = os.path.splitext(fname)[-1]
fsize = os.path.getsize(fpath)
fdata = {
"name": fname,
"remark": "",
"keyWord": "",
"abstract": "",
"url": fpath,
"fileSize": fsize,
"fileType": ftype,
"directoryId": dirid,
"creatorId": 1,
"uploaderId": 0,
"newWords": "",
"wordFrequency": "",
"phrases": ""
}
r = add_file(fdata, projid)
return r
def do_batch_upload(dpath: Path, projid, rootid):
for thing in dpath.iterdir():
# 是文件夹则递归
if thing.is_dir():
name = str(thing).split('\\')[-1]
if name.startswith('__'): # 双下划线跳过
print('skip ' + str(thing))
continue
do_batch_upload(thing, projid, get_dirid(str(thing), rootid, projid))
# 是文件则上传
if thing.is_file():
try:
uploadfile(str(thing), rootid, projid)
print('upload ' + str(thing))
except:
try:
print('failed ' + str(thing))
except:
print('solid failed')
# if exist return id, if not exist create it then return id
def get_dirid(p, curdirid, projid):
subs = get_dir_subs(curdirid, projid)
for sd in subs:
if sd['name'] == p.split('\\')[-1]:
return sd['id']
# 如果没返回 就是没这个文件夹 创建一个
createname = p.split('\\')[-1]
add_dir(createname, curdirid, projid)
print('create ' + p)
# 再找到文件夹ID
subs = get_dir_subs(curdirid, projid)
for sd in subs:
if sd['name'] == createname:
return sd['id']
return 0
if __name__ == '__main__':
pass
# do_batch_upload(Path(r'F:\402\004 小洋山资料备份-晓莉'), 240, 42)
# do_batch_upload(Path(r'F:\402\testupload'), 36, 200)
# do_batch_upload(Path(r'F:\402\001 交响乐团20130311需合并'), 434, 202)
# do_batch_upload(Path(r'F:\dfyyfile\东方医院'), projid=230, rootid=2211)
# do_batch_upload(Path(r'D:\技术群文档'), projid=687, rootid=2370)
# http:\\10.6.0.50:6789\files\工程资料 01\01 工程资料\404\008 解放日报-张雷\1.txt
# do_batch_upload(Path(r'\\192.168.11.70\工程资料 02\03 工程资料\404\国金资料'), projid=183, rootid=4000)
# uploadfile(r'E:\work\论文\空调故障诊断与风险评估.pdf',projid=33,dirid=38292)
# proj_infos = [['401', '001 中国馆', 196]]
# proj_infos = pd.read_csv(r'.\projs.csv')
# for indx, info in proj_infos.iterrows():
# subdir = str(info['sub'])
# projname = info['name']
# projid = info['pid']
#
# pathstr = os.path.join(r'\\192.168.11.70\工程资料 01\01 工程资料', subdir, projname)
# test = Path(pathstr)
#
# try:
# add_dir(projname, None, projid)
# except:
# pass
# rootid = get_root_dir_id(projid)
#
# do_batch_upload(Path(pathstr), projid=projid, rootid=rootid)
| pengyang486868/PY-read-Document | batch_upload.py | batch_upload.py | py | 3,549 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cloudservice.add_dir",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"lin... |
75226774588 | import logging
from kiteconnect import KiteConnect
import datetime
import pymongo
instrument_token = "738561"
from_date = "2021-04-01"
to_date = "2021-06-30"
interval = '5minute'
logging.basicConfig(level=logging.DEBUG)
api_key = "kpgos7e4vbsaam5x"
api_secret = "t9092opsldr1huxk1bgopmitovurftto"
request_token = "qRQhzRYukvQetbXDhiRYJI4XgLhwX51k"
access_token = "gP5gr51tDMpYiPBKTH95oNluvzS20c6Y"
kite = KiteConnect(api_key=api_key)
# data = kite.generate_session(request_token, api_secret=api_secret)
# print(data)
kite.set_access_token(access_token)
print(kite.quote(['NSE:INFY']))
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
functional_col =myclient["core"]["functional"]
functional_data = {}
functional_data['description'] = 'Price limit for trading'
functional_data['variable'] = 'price_limit'
functional_data['values'] = 20
functional_col.insert_one(functional_data)
#print(kite.historical_data(instrument_token, from_date, to_date, interval, continuous=False, oi=True))
# print(datetime.datetime.now().strftime('%H:%M'))
# print(datetime.datetime.strptime('13:19', '%H:%M').strftime(('%H:%M')))
# print(datetime.datetime.now().strftime('%H:%M') == datetime.datetime.strptime('13:19', '%H:%M').strftime(('%H:%M'))) | prashanth470/trading | source/sample.py | sample.py | py | 1,284 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "kiteconnect.KiteConnect",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pymong... |
10958770997 | import os
import csv
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from torch.utils.data import Dataset, DataLoader
from torchvision.io import read_image
import torchvision.datasets as datasets
import torchvision.transforms as transforms
from torchvision.io import read_image, ImageReadMode
np.random.seed(0)
DATA_FOLDER_PATH = "YOURPATH\\\Animals_with_Attributes2\\"
JPEGIMAGES_FOLDER_PATH = "YOURPATH\\JPEGImages\\"
labels_dirs = os.listdir(JPEGIMAGES_FOLDER_PATH)
ANNOTATIONS_FILENAME = 'annotations.csv'
def find_num_images_per_label(img_dir = JPEGIMAGES_FOLDER_PATH) -> tuple[dict,dict]:
"""
USEFUL FOR SAMPLING.
Return a dict with keys as the 50 labels, and values being the number of images in each subdirectory corresponding to label
and a second dict with the relative numbers (proportion) for every label compared to the total number of images (useful for sampling)"""
labels_dirs = os.listdir(img_dir)
num_images_per_label = dict.fromkeys(labels_dirs)
proportions_images_per_label = dict.fromkeys(labels_dirs)
total_num_images = 0
# Update absolute number of images per label
for i, label in enumerate(labels_dirs) :
specific_label_path = os.path.join(img_dir, labels_dirs[i])
num_images_label = len(os.listdir(specific_label_path))
total_num_images += num_images_label
num_images_per_label[label] = num_images_label
# Update relative number of images per label (proportion)
for i, label in enumerate(labels_dirs) :
num_images_label = num_images_per_label[label]
proportion_label = round(num_images_label / total_num_images, 4)
proportions_images_per_label[label] = proportion_label
return num_images_per_label, proportions_images_per_label
labels_dict = {}
with open(DATA_FOLDER_PATH+"classes.txt") as f:
for line in f:
(key,val) = line.split()
labels_dict[val] = int(key)-1
print(labels_dict)
def create_annotations_csv_file(annotations_filename = ANNOTATIONS_FILENAME, img_dir = JPEGIMAGES_FOLDER_PATH) :
"""
Create a csv annotations_file, annotations.csv, with two columns, in the format :
path/to/image, label
The annotation csv is necessary for DataLoader.
"""
labels_dirs:list = os.listdir(img_dir)
if os.path.exists(annotations_filename):
os.remove(annotations_filename)
print(f'Deleted existent {ANNOTATIONS_FILENAME} file.\n ---------------------------')
with open(annotations_filename, 'w', newline='') as file :
writer = csv.writer(file, dialect='excel', delimiter=',')
for i, label in enumerate(labels_dirs) :
specific_label_path = os.path.join(img_dir, label)
images_names = os.listdir(specific_label_path)
for j, image_name in enumerate(images_names):
full_path_to_img= os.path.join(specific_label_path, image_name)
full_path_to_img= os.path.join(label, image_name)
row = [full_path_to_img, label]
writer.writerow(row)
print(f'Sucessfully created {ANNOTATIONS_FILENAME} file.')
create_annotations_csv_file()
class AWA2Dataset(Dataset): # Dataset class to serve as input for the DataLoader.
"""
Dataset class to serve as input for the DataLoader.
Implements all the required methods and more.
"""
def __init__(self, annotations_file=ANNOTATIONS_FILENAME, img_dir=JPEGIMAGES_FOLDER_PATH,
transform=None, target_transform=None):
self.img_labels = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.transform = transform
self.target_transform = target_transform
numbers_infos_dicts: tuple[dict,dict] = find_num_images_per_label(img_dir=JPEGIMAGES_FOLDER_PATH)
self.num_images_per_label = numbers_infos_dicts[0]
self.proportions_images_per_label = numbers_infos_dicts[1]
def __len__(self):
return len(self.img_labels)
def __getitem__(self, idx):
img_path = os.path.join(self.img_dir, self.img_labels.iloc[idx, 0])
# img_path = self.img_labels.iloc[idx, 0]
key = self.img_labels.iloc[idx, 1]
# Mapping the labels from string to tensor
label = labels_dict[key]
image = read_image(path = img_path, mode = ImageReadMode.RGB)
if self.transform:
image = self.transform(image)
if self.target_transform:
label = self.target_transform(label)
return image, label
class Subset_(AWA2Dataset) :
def __init__(self, dataset, indices, transform=None):
super().__init__()
self.dataset = dataset
self.indices = indices
self.transform = transform
def __len__(self):
return len(self.indices)
def __getitem__(self, index):
original_index_in_AWA2Dataset = self.indices[index]
image, label = self.dataset[original_index_in_AWA2Dataset]
if self.transform:
image = self.transform(image)
return image, label
'''
Procedure to Create Dataloader objects, and train-test split
'''
# With Data augmentation to remedy overfitting
transforms_pipeline_train = transforms.Compose([
## Input size
transforms.ToPILImage(),
transforms.Resize((256,256)),
## Data augmentation
transforms.RandomRotation(15),
transforms.RandomHorizontalFlip(p=0.4),
transforms.ColorJitter(brightness=0.2,
contrast=0.2,
saturation=0.2,
hue=0.1),
transforms.RandomCrop((224,224)),
## Normalize
transforms.ToTensor(),
transforms.Normalize(mean = [0.4643, 0.4640, 0.3985] , std=[0.2521, 0.2425, 0.2538]) # real mean and std of AwA2
])
transforms_pipeline_test = transforms.Compose([
## Input size
transforms.ToPILImage(),
transforms.Resize((256,256)),
transforms.CenterCrop((224,224)),
## Normalize
transforms.ToTensor(), # Already a tensor as implemented in Dataset class with the
transforms.Normalize(mean = [0.4643, 0.4640, 0.3985] , std=[0.2521, 0.2425, 0.2538]) # real mean and std of AwA2
])
# Initialize dataset and train/valid/test split
from sklearn.model_selection import train_test_split
dataset = AWA2Dataset()
n_images = len(dataset)
# Split all indices into training/testing sets
train_indices, test_indices = train_test_split(range(n_images), test_size=0.2, random_state=1)
# Split training indices into training/validation sets.
train_indices, valid_indices = train_test_split(train_indices, test_size=0.2, random_state=1)
# Initialize the 3 DataSet objects (as Subset_) and apply the relevant Transforms to each subset (train/test/valid)
train_data = Subset_(dataset, train_indices, transform = transforms_pipeline_train)
valid_data = Subset_(dataset, valid_indices, transform = transforms_pipeline_test)
test_data = Subset_(dataset, test_indices, transform = transforms_pipeline_test)
# Initalize DataLoaders
batch_size = 32
train_loader = DataLoader(dataset = train_data, batch_size=batch_size, shuffle=True, num_workers=6, pin_memory=True)
valid_loader = DataLoader(dataset = valid_data, batch_size=batch_size, shuffle=False, num_workers=6, pin_memory=True)
test_loader = DataLoader(dataset = test_data, batch_size=batch_size, shuffle=False, num_workers=6, pin_memory=True) | K-kiron/animal-detect | Helpers/AWA2_Dataloader.py | AWA2_Dataloader.py | py | 7,864 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "numpy.random.seed",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line... |
23850509915 | from datasets import load_dataset,load_metric
from transformers import AutoTokenizer,AutoModelForSeq2SeqLM,Seq2SeqTrainingArguments,DataCollatorForSeq2Seq,Seq2SeqTrainer
import numpy as np
metric=load_metric("BLEU.py")
max_input_length = 64
max_target_length = 64
src_lang = "zh"
tag_lang = "en"
model_path = "Helsinki-NLP/opus-mt-zh-en"
# model_path = "translations/checkpoint-1500/"
batch_size = 4
learning_rate = 1e-5
output_dir = "translations"
def preprocess_function(examples):
inputs = [eval(ex)[src_lang] for ex in examples["text"]]
targets = [eval(ex)[tag_lang] for ex in examples["text"]]
model_inputs=tokenizer(inputs,max_length=max_input_length,truncation=True)
with tokenizer.as_target_tokenizer():
labels=tokenizer(targets,max_length=max_target_length,truncation=True)
model_inputs["labels"]=labels["input_ids"]
return model_inputs
def postprocess_text(preds, labels):
preds = [pred.strip() for pred in preds]
labels = [[label.strip()] for label in labels]
return preds, labels
def compute_metrics(eval_preds):
preds, labels = eval_preds
if isinstance(preds, tuple):
preds = preds[0]
decoded_preds = tokenizer.batch_decode(preds, skip_special_tokens=True)
labels = np.where(labels != -100, labels, tokenizer.pad_token_id)
decoded_labels = tokenizer.batch_decode(labels, skip_special_tokens=True)
decoded_preds, decoded_labels = postprocess_text(decoded_preds, decoded_labels)
result = metric.compute(predictions=decoded_preds, references=decoded_labels)
result = {"bleu": result["score"]}
print(result)
prediction_lens = [np.count_nonzero(pred != tokenizer.pad_token_id) for pred in preds]
result["gen_len"] = np.mean(prediction_lens)
result = {k: round(v, 4) for k, v in result.items()}
return result
train_dataset = load_dataset("text",data_files="data/train.txt")
val_dataset = load_dataset("text",data_files="data/val.txt")
tokenizer = AutoTokenizer.from_pretrained(model_path)
tokenized_train_datasets = train_dataset.map(preprocess_function, batched=True)
tokenized_val_datasets = val_dataset.map(preprocess_function, batched=True)
model = AutoModelForSeq2SeqLM.from_pretrained(model_path)
data_collator = DataCollatorForSeq2Seq(tokenizer, model=model)
args = Seq2SeqTrainingArguments(
auto_find_batch_size = True,
learning_rate = learning_rate,
output_dir = output_dir,
predict_with_generate=True
)
trainer = Seq2SeqTrainer(
model,
args,
train_dataset=tokenized_train_datasets["train"],
eval_dataset=tokenized_val_datasets["train"],
data_collator=data_collator,
tokenizer=tokenizer,
compute_metrics=compute_metrics
)
trainer.train()
trainer.predict(test_dataset=tokenized_val_datasets["train"]) | Scpjoker/NLP-Course-Homework-2022 | translate.py | translate.py | py | 2,866 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "datasets.load_metric",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.where",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.count_nonzero",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
... |
20463208050 | from collections import defaultdict
d = defaultdict(int)
n = int(input())
for _ in range(n):
d[input()] += 1
allwords = list(d)
allwords_str = d.values()
listofx = []
for x in allwords_str:
listofx.append(str(x))
print(len(allwords))
print(" ".join(listofx))
# This line is the same as the above block > print(*d.values()) except print(len(allwords))
| Ronen-EDH/Code-exercises | Python/Hackerrank/Hackrank_wordorder.py | Hackrank_wordorder.py | py | 361 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "collections.defaultdict",
"line_number": 2,
"usage_type": "call"
}
] |
24150027900 | from fastapi import FastAPI, APIRouter,status, Request
from fastapi.staticfiles import StaticFiles
from fastapi.templating import Jinja2Templates
from fastapi.responses import HTMLResponse
from services.connectionHobolink import Connection
from routers import login
app=FastAPI(title="WeatherStation")
#routers
app.include_router(login.router)
app.mount("/static", StaticFiles(directory="static"), name="static")
router=APIRouter(prefix="/home",
tags=["Home page"],
responses={status.HTTP_404_NOT_FOUND:{"message":"Page not found"}})
# Modificado por me!
template = Jinja2Templates(directory="templates")
@app.get("/", response_class=HTMLResponse)
async def root(request:Request):
return template.TemplateResponse("index.html", {"request": request})
@app.get("/graficas")
async def root(request:Request):
return template.TemplateResponse("graficas.html", {"request": request})
@app.get("/api_data")
async def root():
conn = Connection()
data = conn.dataSensors
data['times'] = conn.timeStation
return {"data":data}
""" @app.get("/login")
async def login(request: Request):
return templates.TemplateResponse("index.html", {"request": request})
""" | AlvaroCoder/WeatherStation | main.py | main.py | py | 1,214 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "routers.login.router",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "routers.login",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "fastapi.static... |
73400221629 | # Configuration file for the Sphinx documentation builder.
#
# For the full list of built-in configuration values, see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Project information -----------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#project-information
project = "Polaris' NoteBook"
copyright = '2023, PolarisXQ'
author = 'PolarisXQ'
release = '0.0'
# -- General configuration ---------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#general-configuration
extensions = [
'sphinx_markdown_tables',
# 'sphinxemoji.sphinxemoji',
'sphinx.ext.githubpages',
'sphinx_copybutton',
'sphinx.ext.mathjax',
# 'pallets_sphinx_themes'
'myst_parser'
]
myst_enable_extensions = [
"amsmath",
"attrs_inline",
"colon_fence",
"deflist",
"dollarmath",
"fieldlist",
"html_admonition",
"html_image",
"linkify",
"replacements",
"smartquotes",
"strikethrough",
"substitution",
"tasklist",
]
templates_path = ['_templates']
exclude_patterns = []
language = 'zh_CN'
# -- Options for HTML output -------------------------------------------------
# https://www.sphinx-doc.org/en/master/usage/configuration.html#options-for-html-output
html_theme = 'press'
html_static_path = ['_static']
html_sidebars = {
'***': ['util/searchbox.html', 'util/sidetoc.html'],
}
from recommonmark.parser import CommonMarkParser
source_parsers = {
'.md': CommonMarkParser,
}
source_suffix = ['.rst', '.md']
html_logo = '_static/madcat_mini.png'
html_favicon='_static/madcat_mini.png'
html_theme_options = {
"external_links": [
("Github", "https://github.com/PolarisXQ"),
# ("Other", "https://bla.com")
]
} | PolarisXQ/Polaris-NoteBook | source/conf.py | conf.py | py | 1,924 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "recommonmark.parser.CommonMarkParser",
"line_number": 62,
"usage_type": "name"
}
] |
34572128931 | import random,server,time,istatistik,settings
import sqlite3 as sql
server_list=server.Server()
patlayan_power=6.5;kartopu_power=7;oyuk_power=2
_35power=10;_25power=9;_15power=5
def randomplayer():
global first,two
while True:
first=random.choice(server_list)
two=random.choice(server_list)
if first!=two:
break
return [first,two]
def fight(a=0,b=0):
x=a;xx=b
firstall=list();twoall=list()
players=randomplayer()
connect=sql.connect("C:\\Users\path\PycharmProjects\pythonProject\dosya\\denemetaban.db")
cursor=connect.cursor()
cursor.execute("SELECT * FROM players WHERE id={}".format(players[0]))
first=cursor.fetchall()
for i in range(len(first[0])):
firstall.append(first[0][i])
cursor.execute("SELECT * FROM players WHERE id={}".format(players[1]))
two=cursor.fetchall()
for i in range(len(two[0])):
twoall.append(two[0][i])
first_name=firstall[1];two_name=twoall[1]
first_35=firstall[5];two_35=twoall[5];first_25=firstall[6];two_25=twoall[6];first_15=firstall[7];two_15=twoall[7];first_kartopu=firstall[9]
two_kartopu=twoall[9];first_patlayan=firstall[10];two_patlayan=twoall[10];first_oyuk=firstall[11];two_oyuk=twoall[11];first_batirma=firstall[13]
two_batirma=twoall[13]
firstpower=((int(first_35)*kartopu_power*_35power+int(first_25)*kartopu_power*_25power+int(first_15)*kartopu_power*_15power))
twopower=((int(two_35) * kartopu_power * _35power+int(two_25) * kartopu_power * _25power+int(two_15) * kartopu_power * _15power))
first_hp=10000
two_hp=10000
a=6;b=5
kazanan=""
while True:
if first_hp > 0 and two_hp > 0:
if a % 6 == 0:
time.sleep(x)
if two_hp <= firstpower:
#print("{} Oyuncusu {} vurdu rakip battı".format(first_name, two_hp))
two_hp=0
break
#print("{} Oyuncusu {} vurdu".format(first_name, firstpower))
two_hp-=int(firstpower)
#print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(first_name, first_hp, two_name, two_hp))
time.sleep(x)
if b % 5 == 0:
if first_hp <= twopower:
#print("{} Oyuncusu {} vurdu rakip battı".format(two_name, first_hp))
first_hp=0
break
#print("{} Oyuncusu {} vurdu".format(two_name, twopower))
first_hp-=int(twopower)
#print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(first_name, first_hp, two_name, two_hp))
time.sleep(xx)
a+=1
b+=1
else:
time.sleep(xx)
a+=1
b+=1
else:
break
if first_hp >= two_hp:
#print("Kazanan {} {} oyuncusunun gemisi battı".format(first_name, two_name))
kazanan=first_name
else:
#print("Kazanan {} {} oyuncusunun gemisi battı".format(two_name, first_name))
kazanan=two_name
return kazanan
def xpfight():
try:
loop=0
while True:
print(loop)
winner=fight()
connect=sql.connect("C:\\Users\path\PycharmProjects\pythonProject\dosya\\denemetaban.db")
cursor=connect.cursor()
cursor.execute("SELECT xp,sunk,money FROM players WHERE username='{}'".format(winner))
data=cursor.fetchall()
xp=int(data[0][0]) + random.randint(1000, 1400)
sunk=int(data[0][1]) + 1
money=data[0][2] + random.randint(4000, 8000)
xp=str(xp)
sunk=str(sunk)
cursor.execute(
"UPDATE players SET xp='{}',sunk='{}',money={} WHERE username='{}'".format(xp, sunk, money, winner))
connect.commit()
loop+=1
except KeyboardInterrupt:
print("you are not allowed to quit right now")
exit()
def GetMoney(a=0,b=0):
x=a;xx=b
loop=0
while True:
for i in range(len(server_list)):
print(loop)
connect=sql.connect("C:\\Users\path\PycharmProjects\pythonProject\dosya\\denemetaban.db")
cursor=connect.cursor()
cursor.execute("SELECT level,cannon1,cannon2,cannon3,username,xp,money,npcsunk FROM players WHERE id={}".format(server_list[i]))
data=cursor.fetchall()
level=(data[0][0])
cannon1=data[0][1]
cannon2=data[0][2]
cannon3=data[0][3]
playername=data[0][4]
playerxp=int(data[0][5])
money=int(data[0][6])
npcsunk=int(data[0][7])
playerhp=10000
power=((int(cannon1)*kartopu_power*_35power+int(cannon2)*kartopu_power*_25power+int(cannon3)*kartopu_power*_15power))
npc_name=npc_list[0][0]
npc_hp=int(npc_list[0][1])
npc_power=140
npc_prize=int(npc_list[0][2])
npc_xp=int(npc_list[0][3])
a=6
b=5
while True:
if playerhp > 0 and npc_hp > 0:
if a % 6 == 0:
time.sleep(x)
if npc_hp <= power:
#print("{} Oyuncusu {} vurdu rakip battı".format(playername, npc_hp))
npc_hp=0
break
#print("{} Oyuncusu {} vurdu".format(playername, power))
npc_hp-=int(power)
#print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(playername, playerhp, npc_name,npc_hp))
time.sleep(x)
if b % 5 == 0:
if playerhp <= npc_power:
#print("{} Oyuncusu {} vurdu rakip battı".format(npc_name, playerhp))
playerhp=0
break
#print("{} Oyuncusu {} vurdu".format(npc_name, npc_power))
playerhp-=int(npc_power)
#print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(playername, playerhp, npc_name,npc_hp))
time.sleep(xx)
a+=1
b+=1
else:
time.sleep(xx)
a+=1
b+=1
else:
break
if playerhp >= npc_hp:
playerxp+=npc_xp
money+=npc_prize
npcsunk+=1
#print("Kazanan {} {} oyuncusunun gemisi battı".format(playername, npc_name))
cursor.execute("UPDATE players SET money={},xp={},npcsunk={} WHERE username='{}'".format(money,playerxp,npcsunk,playername))
connect.commit()
else:
print("Kazanan {} {} oyuncusunun gemisi battı".format(npc_name, playername))
loop+=1
i=0
def Event(a=0,b=0):
x=a;xx=b
loop=0
try:
while True:
npc_list=server.Npc()
print(loop)
connect=sql.connect("C:\\Users\path\PycharmProjects\pythonProject\dosya\\denemetaban.db")
cursor=connect.cursor()
cursor.execute(
"SELECT level,cannon1,cannon2,cannon3,username,xp,money,npcsunk FROM players WHERE id={}".format(
random.choice(server_list)))
data=cursor.fetchall()
level=(data[0][0])
cannon1=data[0][1]
cannon2=data[0][2]
cannon3=data[0][3]
playername=data[0][4]
playerxp=int(data[0][5])
money=int(data[0][6])
npcsunk=int(data[0][7])
playerhp=10000
power=((int(cannon1) * kartopu_power * _35power + int(cannon2) * kartopu_power * _25power + int(
cannon3) * kartopu_power * _15power))
npc_name=npc_list[9][0]
npc_hp=int(npc_list[9][1])
npc_power=4200
npc_prize=int(npc_list[9][2])
npc_xp=int(npc_list[9][3])
a=6
b=5
while True:
if playerhp > 0 and npc_hp > 0:
if a % 6 == 0:
time.sleep(x)
if npc_hp <= power:
# print("{} Oyuncusu {} vurdu rakip battı".format(playername, npc_hp))
npc_hp=0
break
# print("{} Oyuncusu {} vurdu".format(playername, power))
npc_hp-=int(power)
# print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(playername, playerhp, npc_name,npc_hp))
time.sleep(x)
if b % 5 == 0:
if playerhp <= npc_power:
# print("{} Oyuncusu {} vurdu rakip battı".format(npc_name, playerhp))
playerhp=0
break
# print("{} Oyuncusu {} vurdu".format(npc_name, npc_power))
playerhp-=int(npc_power)
# print("{} oyuncusunun canı {}, {} oyuncusunun canı {}".format(playername, playerhp, npc_name,npc_hp))
time.sleep(xx)
a+=1
b+=1
else:
time.sleep(xx)
a+=1
b+=1
else:
break
if playerhp >= npc_hp:
playerxp+=npc_xp
money+=npc_prize
npcsunk+=1
print("Etkinliği Kazanan {} {} gemisi battı.{} {} altın ve {} xp kazandı".format(playername, npc_name,
playername,
npc_prize, npc_xp))
cursor.execute(
"UPDATE players SET money={},xp={},npcsunk={} WHERE username='{}'".format(money, playerxp, npcsunk,
playername))
connect.commit()
quit()
else:
npc_hp=npc_hp
print("Kazanan {} {} oyuncusunun gemisi battı".format(npc_name, playername))
cursor.execute("UPDATE npc SET hp={} WHERE npc='{}'".format(npc_hp, npc_name))
connect.commit()
loop+=1
except KeyboardInterrupt:
quit()
| zeminkat/Game | savas.py | savas.py | py | 11,157 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "server.Server",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_n... |
40070373372 | import boto3
import json
from tqdm import tqdm
dynamodb = boto3.resource('dynamodb',region_name='us-east-2')
table = dynamodb.Table('FSBP_tree')
print(table.creation_date_time)
'''
with open('/hdd/c3s/data/aws_data/breach_compilation-pw_tree_1000000.json') as f:
data = json.load(f)
with table.batch_writer() as batch:
for item in data:
batch.put_item(
Item={
'NodeId': item,
'Info': data[item]
}
)
'''
f = open('/hdd/c3s/data/aws_data/splits/intr_tree_lucy_0.txt','r')
t = 0
bar= tqdm(f)
with table.batch_writer() as batch:
for line in bar:
item = line.split('\t')
batch.put_item(
Item={
'NodeId': item[0],
'Info': item[1]
}
) | lucy7li/compromised-credential-checking | perfomance_simulations/fsbp/save_amazon.py | save_amazon.py | py | 793 | python | en | code | 6 | github-code | 6 | [
{
"api_name": "boto3.resource",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "tqdm.tqdm",
"line_number": 22,
"usage_type": "call"
}
] |
33381013184 | from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
from django.urls import path, include
from django.contrib.auth import views as auth_views
from polls.views import (
RegistrationView,
CreateBoardView,
BoardDetailView,
BoardDeleteView,
CreateListView,
# ListDetailView,
ListEditView,
ListDeleteView,
CreateCardView,
CardEditView,
CardDeleteView,
CardMoveView,
)
urlpatterns = [
path('admin/', admin.site.urls),
path("accounts/", include("django.contrib.auth.urls")),
path("accounts/register/", RegistrationView.as_view()),
path("", CreateBoardView.as_view(), name="board"),
path("board/detail/<id>/", BoardDetailView.as_view(), name="board_detail"),
path("board/delete<id>/", BoardDeleteView.as_view(), name="board_delete"),
path("list/<id>", CreateListView.as_view(), name="list_create"),
# path("list/detail/<id>/", ListDetailView.as_view(), name="list_detail"),
path("list/edit/<id>/", ListEditView.as_view(), name="list_edit"),
path("list/delete/<id>/", ListDeleteView.as_view(), name="list_delete"),
path("card/<id>/", CreateCardView.as_view(), name="card_create"),
path("card/edit/<id>/", CardEditView.as_view(), name="card_edit"),
path("card/delete/<id>/", CardDeleteView.as_view(), name="card_delete"),
path("card/<id>/move/", CardMoveView.as_view(), name="card_move"),
]
urlpatterns += staticfiles_urlpatterns()
| destinymalone/projectmanagement-capstone | mysite/urls.py | urls.py | py | 1,486 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.urls.path",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "django.contrib.admin.site",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.admin",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "... |
2665829226 | from heatSink import HeatSink
from waterPipes import WaterPipes
from solarPanel import SolarPanel
from system import System
import matplotlib.pyplot as plt
flow_rates = [0.00025, 0.0005, 0.001, 0.002, 0.003, 0.005]
panel_temp = []
no_pipes = []
inlet_temp = 30
for f in flow_rates:
temps = []
pipes = []
for p in [1, 2, 3, 4, 5]:
heat_sink = HeatSink()
solar_panel = SolarPanel()
water_pipes = WaterPipes(no_pipes=p)
final_temp = 0
for i in range(0, 40):
system = System(heat_sink=heat_sink,
solar_panel=solar_panel,
water_pipes=water_pipes,
ambient_temp=30,
flow_rate=f,
flow_temp=inlet_temp)
system.update()
inlet_temp = system.outletTemp
final_temp = system.T_2
temps.append(final_temp)
pipes.append(p)
panel_temp.append(temps)
no_pipes.append(pipes)
for i in range(0, len(flow_rates)):
plt.plot(no_pipes[i], panel_temp[i], 'o-', label='Flow rate: ' + str(flow_rates[i]) + ' m3/s')
plt.legend()
plt.xlabel('Number of Pipes')
plt.ylabel('Panel Surface Temperature (°C)')
plt.show()
| southwelljake/HeatSinkModelling | src/comparePipes.py | comparePipes.py | py | 1,260 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "heatSink.HeatSink",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "solarPanel.SolarPanel",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "waterPipes.WaterPipes",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "system.... |
12483812629 | import numpy as np
import matplotlib.pyplot as plt
from scipy.constants import degree
from FallingCat import FallingCat
JI = 0.25
alpha = 30*degree
plt.figure(figsize=(5,7))
c = FallingCat(JI, alpha)
t = c.theta/degree
psi = c.lean()/degree
gamma = c.bend()/degree
phi = c.twist()/degree
print(phi[-1])
print((c.alpha + c.beta)/degree)
print((c.beta - c.alpha)/degree)
plt.subplot(3,1,1)
plt.plot(t, psi)
plt.ylabel(r'$\psi$ / deg')
plt.subplot(3,1,2)
plt.plot(t, gamma)
plt.ylabel(r'$\gamma$ / deg')
plt.subplot(3,1,3)
plt.plot(t, phi)
plt.ylabel(r'$\phi$ / deg')
plt.xlabel(r'$\theta$ / deg')
plt.tight_layout()
plt.savefig('fig2.eps')
plt.show()
| tt-nakamura/cat | fig2.py | fig2.py | py | 660 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "scipy.constants.degree",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "Fallin... |
26664284885 | import json
import logging
import os
from http.client import HTTPConnection
from pathlib import Path
from typing import Dict, Any
from mmcc_framework import DictCallback, Framework
from mmcc_framework.nlu_adapters import NluAdapter
from tuning.mmcc_config.callbacks import my_callbacks
from tuning.types import Pipeline, PipelineCallback
# Load the process description and kb from file.
with open(Path(__file__).parent / 'mmcc_config' / 'process_desc.json', "r") as process_file:
proc = json.loads(process_file.read())
logging.getLogger(__name__).info('Reading process_desc file')
with open(Path(__file__).parent / 'mmcc_config' / 'process_kb.json', "r") as process_file:
kb = json.loads(process_file.read())
logging.getLogger(__name__).info('Reading process_kb file')
def get_framework(pipeline: Pipeline, result: str, start_work: PipelineCallback) -> Framework:
"""Creates a new framework object, remember to call `handle_data_input({})` to get the first sentence.
The framework will have no NLU and the kb will not be saved at the end of execution.
The context will contain the dataset and the pipeline.
:param pipeline: the pipeline used in the last analysis
:param result: base64 string representation of the previous analysis result
:param start_work: callback that takes the pipeline and starts the execution in another thread
"""
return Framework(process=proc,
kb=kb,
initial_context={'pipeline': pipeline, 'result': result, 'start_work': start_work},
callback_getter=DictCallback(callbacks=my_callbacks),
nlu=MyRasaNlu(),
on_save=lambda *args: None)
class MyRasaNlu(NluAdapter):
""" This adapter uses Rasa, to use this adapter it is necessary to first setup and train the interpreter.
The instructions on how to use Rasa are available on Rasa's website, and consist basically in the following steps:
- Install Rasa and its dependencies;
- Run `rasa init` in your folder of choice;
- Edit the `data/nlu` file with the utterances used for training;
- Run `rasa train nlu` to produce a model;
- Start rasa on port 5005 and pass the location of the model:
for example `rasa run --enable-api -m models/nlu-20201228-183937.tar.gz`
Example:
Suppose that the nlu is trained with, among the others, the intent "insert_name" with a entity "name".
Initialize the adapter: `my_adapter = RasaNlu()`
Suppose that it is time to insert the name. If it is necessary to insert it as text use:
`my_framework.handle_text_input("Mark")`. The callback corresponding to the current activity will receive
(if the intent is recognized): `{"intent": "insert_name", "name": "Mark"}`.
If it is necessary to insert the name as data use:
`my_framework.handle_data_input(RasaNlu.dict("insert_name", {"name": "Mark"}))`, which will pass to the callback
the same structure as above.
:ivar interpreter: the instance of the rasa interpreter used by this adapter
"""
def __init__(self):
self.host = os.getenv("RASA_IP", "localhost") # TODO(giubots): fix here (host.docker.internal)
self.port = int(os.getenv("RASA_PORT", "5005"))
def parse(self, utterance: str) -> Dict[str, Any]:
""" Runs the interpreter to parse the given utterance and returns a dictionary containing the parsed data.
If no intent can be extracted from the provided utterance, this returns an empty dictionary.
:param utterance: the text input from the user
:return: a dictionary containing the detected intent and corresponding entities if any exists.
"""
connection = HTTPConnection(host=self.host, port=self.port)
connection.request("POST", "/model/parse", json.dumps({"text": utterance}))
response = json.loads(connection.getresponse().read())
if response["intent"]["name"] is None:
return {"intent": ""}
res = self.dict(response["intent"]["name"], {item['entity']: item["value"] for item in response["entities"]})
logging.getLogger(__name__).info('Detected intent: %s', res)
return res
@staticmethod
def dict(intent: str, values: Dict[str, Any] = None) -> Dict[str, Any]:
""" Helper method that can be used to produce a dictionary equivalent to the one of the parse method.
Use this method with framework.handle_data_input.
:param intent: the intent corresponding to this input
:param values: an optional dictionary containing pairs of entity-value
:return: a dictionary equivalent to the one produced by the parse method
"""
if values is None:
values = {}
return {"intent": intent, **values}
| DEIB-GECO/DSBot | DSBot/tuning/mmcc_integration.py | mmcc_integration.py | py | 4,842 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_nu... |
1904177195 | from fastapi import FastAPI, HTTPException
from fastapi.middleware.cors import CORSMiddleware
import json, os
app = FastAPI()
app.add_middleware(
CORSMiddleware,
allow_origins=['*'],
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get('/contents/{page_id}/{content_id}')
async def content(
page_id: str,
content_id: str):
json_path = f"./data/json/{page_id}/{content_id}.json"
if not os.path.exists(json_path):
raise HTTPException(status_code=404, detail="Page not found")
with open(json_path, 'r', encoding='utf-8') as j:
json_load = json.load(j)
return json_load
@app.get('/contents/{page_id}')
async def contentslist(
page_id: str,):
json_path = f"./data/pagelist/{page_id}.json"
if not os.path.exists(json_path):
raise HTTPException(status_code=404, detail="Page not found")
with open(json_path, 'r', encoding='utf-8') as j:
json_load = json.load(j)
return json_load
@app.get('/pagelist')
async def pagelist():
json_path = "./data/pagelist/all.json"
with open(json_path, 'r', encoding='utf-8') as j:
json_load = json.load(j)
return json_load | tetla/knowledge-reader | backend/offdemy-api.py | offdemy-api.py | py | 1,203 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "fastapi.middleware.cors.CORSMiddleware",
"line_number": 8,
"usage_type": "argument"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name"... |
25814131906 | import errno
from flask import current_app, request, render_template
from flask.views import MethodView
from werkzeug.exceptions import Forbidden, NotFound
from ..constants import COMPLETE, FILENAME, LOCKED, TYPE
from ..utils.date_funcs import delete_if_lifetime_over
from ..utils.http import redirect_next_referrer
from ..utils.permissions import ADMIN, CREATE, may
from ..utils.upload import Upload
class ModifyView(MethodView):
def error(self, item, error):
return render_template('error.html', heading=item.meta[FILENAME], body=error), 409
def response(self, name):
return redirect_next_referrer('bepasty.display', name=name)
def get_params(self):
return {
FILENAME: request.form.get('filename'),
TYPE: request.form.get('contenttype'),
}
def post(self, name):
if not may(CREATE):
raise Forbidden()
try:
with current_app.storage.openwrite(name) as item:
if not item.meta[COMPLETE] and not may(ADMIN):
error = 'Upload incomplete. Try again later.'
return self.error(item, error)
if item.meta[LOCKED] and not may(ADMIN):
raise Forbidden()
if delete_if_lifetime_over(item, name):
raise NotFound()
params = self.get_params()
if params[FILENAME]:
item.meta[FILENAME] = Upload.filter_filename(
params[FILENAME], name, params[TYPE], item.meta[TYPE]
)
if params[TYPE]:
item.meta[TYPE], _ = Upload.filter_type(
params[TYPE], item.meta[TYPE]
)
return self.response(name)
except OSError as e:
if e.errno == errno.ENOENT:
raise NotFound()
raise
| bepasty/bepasty-server | src/bepasty/views/modify.py | modify.py | py | 1,929 | python | en | code | 162 | github-code | 6 | [
{
"api_name": "flask.views.MethodView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "flask.render_template",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "constants.FILENAME",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "utils... |
40155982512 | # -*- coding: utf-8 -*-
"""
This module contains functions for losses of various types: soiling, mismatch,
snow cover, etc.
"""
import numpy as np
import pandas as pd
from pvlib.tools import cosd
def soiling_hsu(rainfall, cleaning_threshold, tilt, pm2_5, pm10,
depo_veloc={'2_5': 0.004, '10': 0.0009},
rain_accum_period=pd.Timedelta('1h')):
"""
Calculates soiling ratio given particulate and rain data using the model
from Humboldt State University [1]_.
Parameters
----------
rainfall : Series
Rain accumulated in each time period. [mm]
cleaning_threshold : float
Amount of rain in an accumulation period needed to clean the PV
modules. [mm]
tilt : float
Tilt of the PV panels from horizontal. [degree]
pm2_5 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamic diameter less than 2.5 microns. [g/m^3]
pm10 : numeric
Concentration of airborne particulate matter (PM) with
aerodynamicdiameter less than 10 microns. [g/m^3]
depo_veloc : dict, default {'2_5': 0.4, '10': 0.09}
Deposition or settling velocity of particulates. [m/s]
rain_accum_period : Timedelta, default 1 hour
Period for accumulating rainfall to check against `cleaning_threshold`
It is recommended that `rain_accum_period` be between 1 hour and
24 hours.
Returns
-------
soiling_ratio : Series
Values between 0 and 1. Equal to 1 - transmission loss.
References
-----------
.. [1] M. Coello and L. Boyle, "Simple Model For Predicting Time Series
Soiling of Photovoltaic Panels," in IEEE Journal of Photovoltaics.
doi: 10.1109/JPHOTOV.2019.2919628
.. [2] Atmospheric Chemistry and Physics: From Air Pollution to Climate
Change. J. Seinfeld and S. Pandis. Wiley and Sons 2001.
"""
try:
from scipy.special import erf
except ImportError:
raise ImportError("The soiling_hsu function requires scipy.")
# accumulate rainfall into periods for comparison with threshold
accum_rain = rainfall.rolling(rain_accum_period, closed='right').sum()
# cleaning is True for intervals with rainfall greater than threshold
cleaning_times = accum_rain.index[accum_rain >= cleaning_threshold]
horiz_mass_rate = pm2_5 * depo_veloc['2_5']\
+ np.maximum(pm10 - pm2_5, 0.) * depo_veloc['10']
tilted_mass_rate = horiz_mass_rate * cosd(tilt) # assuming no rain
# tms -> tilt_mass_rate
tms_cumsum = np.cumsum(tilted_mass_rate * np.ones(rainfall.shape))
mass_no_cleaning = pd.Series(index=rainfall.index, data=tms_cumsum)
mass_removed = pd.Series(index=rainfall.index)
mass_removed[0] = 0.
mass_removed[cleaning_times] = mass_no_cleaning[cleaning_times]
accum_mass = mass_no_cleaning - mass_removed.ffill()
soiling_ratio = 1 - 0.3437 * erf(0.17 * accum_mass**0.8473)
return soiling_ratio
| Samuel-psa/pvlib-python | pvlib/losses.py | losses.py | py | 2,997 | python | en | code | null | github-code | 6 | [
{
"api_name": "pandas.Timedelta",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.maximum",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "pvlib.tools.cosd",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "numpy.cumsum",
"l... |
34248836732 | import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as ticker
plt.style.use("bmh")
def exact(r1, r2, w):
return 2 * np.sqrt(w/np.pi) * np.exp(- w * (r1 * r1 + r2 * r2))
def fmt(x, pos):
a, b = '{:.1e}'.format(x).split('e')
b = int(b)
return r'${} \times 10^{{{}}}$'.format(a, b)
if __name__ == "__main__":
N = 1000
radius = 3
r = np.linspace(-radius, radius, N)
data = np.zeros((N, N))
for i in range(N):
for j in range(N):
data[i, j] = exact(r[i], r[j], 1)
#data /= np.sum(data)
size = 28
size_ticks = 20
label_size = {"size":str(size)}
plt.rcParams["font.family"] = "Serif"
plt.rcParams.update({'figure.autolayout': True})
fig, ax = plt.subplots(figsize=(8,6))
img = ax.imshow(data, cmap=plt.cm.jet, extent=[-radius,radius,-radius,radius])
cbar = fig.colorbar(img, fraction=0.046, pad=0.04) #, format=ticker.FuncFormatter(fmt))
cbar.set_label(r'$\rho(r_i,r_j)$', rotation=90, labelpad=10, y=0.5, **label_size)
cbar.ax.tick_params(labelsize=size_ticks)
plt.tight_layout()
ax.set_xlabel("$r_j$", **label_size)
ax.set_ylabel("$r_i$", **label_size)
ax.tick_params(labelsize=size_ticks)
tick = [-3, -2, -1, 0, 1, 2, 3]
ax.set_xticks(tick)
ax.set_yticks(tick)
plt.grid()
plt.show()
| evenmn/Master-thesis | scripts/plot_exact_tb.py | plot_exact_tb.py | py | 1,391 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name"... |
70398650747 | """utilities for generation of CTRMs
Author: Keisuke Okumura
Affiliation: TokyoTech & OSX
"""
from __future__ import annotations
import numpy as np
from numba import f8, jit
from ..environment import Instance
from ..roadmap import TimedNode, TimedRoadmap
from ..roadmap.utils import valid_move
@jit(f8[:](f8[:, :], f8[:]), nopython=True)
def get_dist_arr(cands_pos: np.ndarray, loc: np.ndarray) -> np.ndarray:
return np.sum((cands_pos - loc) ** 2, axis=1)
def merge_samples(
loc: np.ndarray,
t: int,
agent: int,
trm: TimedRoadmap,
ins: Instance,
merge_distance: float = 0.01,
) -> np.ndarray:
"""find compatible sample, otherwise return loc
Args:
loc (np.ndarray): location
t (int): timestep
agent (int): target agent
trm (TimedRoadmap): target timed roadmap
ins (Instance): instance
merge_distance (:obj:`float`, optional):
distance regarding as spatially close enough
Returns:
np.ndarray: location of compatible sample if found, otherwise loc
Todo:
use efficient set operation
"""
rad = ins.rads[agent]
max_speed = ins.max_speeds[agent]
goal = ins.goals[agent]
# get necessary distance
cands_pos_arr = [u.pos for u in trm.V[t - 1]] # parents
if t + 1 <= len(trm.V) - 1:
cands_pos_arr += [u.pos for u in trm.V[t + 1]] # children
if len(trm.V) > t:
cands_pos_arr += [u.pos for u in trm.V[t]] # merge
dist_arr = get_dist_arr(np.array(cands_pos_arr), loc)
# compute parents
offset = len(trm.V[t - 1])
parents_cands_index = np.where(dist_arr[:offset] <= max_speed ** 2)[0]
parents = [
i
for i in parents_cands_index
if not ins.objs.collide_continuous_sphere(
trm.V[t - 1][i].pos, loc, rad
)
]
set_loc_parents = set(parents)
# compute children
if t + 1 <= len(trm.V) - 1:
children_cands_index = np.where(
dist_arr[offset : offset + len(trm.V[t + 1])] <= max_speed ** 2
)[0]
children = [
i
for i in children_cands_index
if not ins.objs.collide_continuous_sphere(
trm.V[t + 1][i].pos, loc, rad
)
]
else:
children = []
set_loc_children = set(children)
if len(trm.V) > t:
merge_cands_idx = np.where(
dist_arr[-len(trm.V[t]) :] <= merge_distance ** 2
)[0]
# get heuristics
h_loc = sum((loc - goal) ** 2)
for u_ind in merge_cands_idx:
u = trm.V[t][u_ind]
u_parents = trm.get_parents(u)
u_children = trm.E[t][u.index]
set_u_parents = set(u_parents)
set_u_children = set(u_children)
if (
set_u_parents == set_loc_parents
and set_u_children == set_loc_children
):
# merge to better one
h_u = sum((u.pos - goal) ** 2)
if h_loc < h_u:
# replace u by loc
trm.V[t][u.index] = TimedNode(t, u.index, loc)
return loc
else:
# abandon loc
return u.pos
if (
set_u_parents >= set_loc_parents
and set_u_children >= set_loc_children
):
# abandon loc
return u.pos
if (
set_u_parents <= set_loc_parents
and set_u_children <= set_loc_children
):
# replace u by loc
trm.V[t][u.index] = TimedNode(t, u.index, loc)
# append additional edge, children
trm.E[t][u.index] += list(set_loc_children - set_u_children)
# append parents
for p in set_loc_parents - set_u_parents:
trm.E[t - 1][p].append(u.index)
return loc
# append new sample
trm.append_sample(loc=loc, t=t, parents=parents, children=children)
return loc
def format_trms(ins: Instance, trms: list[TimedRoadmap]) -> None:
"""align length of timed roadmaps
Args:
ins (Instance): instance
trms (list[TimedRoadmap]): timed roadmaps
"""
T = max([len(trm.V) for trm in trms]) - 1
for i, trm in enumerate(trms):
def valid_edge(pos1: np.ndarray, pos2: np.ndarray) -> bool:
return valid_move(
pos1, pos2, ins.max_speeds[i], ins.rads[i], ins.objs
)
# technical point, add one additional layer
trm.extend_until(T + 1, valid_edge)
def append_goals(ins: Instance, trms: list[TimedRoadmap]) -> None:
"""append goals to timed roadmaps
Args:
ins (Instance): instance
trms (list[TimedRoadmap]): timed roadmaps
"""
for i, (trm, goal) in enumerate(zip(trms, ins.goals)):
def valid_edge(pos1: np.ndarray, pos2: np.ndarray) -> bool:
return valid_move(
pos1, pos2, ins.max_speeds[i], ins.rads[i], ins.objs
)
for t in range(1, len(trm.V)):
trm.append_sample(goal, t, valid_edge)
| omron-sinicx/ctrm | src/ctrm/roadmap_learned/utils.py | utils.py | py | 5,210 | python | en | code | 21 | github-code | 6 | [
{
"api_name": "numpy.ndarray",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "numpy.sum",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numba.jit",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numba.f8",
"line_number": ... |
28663549378 | # Please develop your ingestion service in Python. You may select the delivery format (e.g., Jupyter
# Notebook, containerized microservice). For this exercise, you may assume that a scheduling service
# to regularly invoke your ingestion is provided.
# Where and how you process the data is at your discretion.
import os
import requests
# import psycopg2
import pandas as pd
import geopandas as gpd
from zipfile import ZipFile
from shapely.geometry import Point
from urllib.request import urlretrieve
from requests.exceptions import RequestException
from zipfile import BadZipFile
from psycopg2 import OperationalError
from mappings import event_root_codes, event_base_codes, event_codes, map_fips_to_iso2
def main():
""" Main controller function
"""
try:
# add folders because git won't push empty folders
try:
os.mkdir('files')
os.mkdir('extracted')
except Exception:
print('Folders already exist, no problem! Continuing...')
extracted_file_path, zip_file_path = retrieve_event_data()
geo_data = retrieve_geo_data()
cleaned_data = clean_data(extracted_file_path)
filtered_event_data = filter_data(cleaned_data, geo_data)
load_db(filtered_event_data, event_root_codes, event_base_codes, event_codes, map_fips_to_iso2)
cleanup(extracted_file_path, zip_file_path)
except RequestException as e:
print(f"Error while retrieving data: {e}")
except BadZipFile as e:
print(f"Error while extracting the zip file: {e}")
except OperationalError as e:
print(f"Database connection error: {e}")
except Exception as e:
print(f"An unexpected error occurred: {e}")
def retrieve_event_data() -> str:
""" Gets event data from external source.
I would improve this by looking into the GDELT API.
"""
# Retrieve data from the source site
data_files = requests.get('http://data.gdeltproject.org/gdeltv2/lastupdate.txt').content.decode()
# Selecting the first entry with “export” in it will
# give you the latest 15 min worth of data
file_download_location = data_files.replace("\n", " ").split(" ")[2]
# get just the file name out of the url
file_name = file_download_location.split("/")[-1]
file_path = 'files/' + file_name
# downloading the file to files/
urlretrieve(file_download_location, file_path)
# unzip and extract file to extracted/
with ZipFile(file_path, 'r') as zip:
zip.extractall('extracted/')
# remove .zip suffix
extracted_file_path = 'extracted/' + file_name[0:-4]
print('File downloaded')
return extracted_file_path, file_path
def clean_data(extracted_file_path):
""" Perform some foundational data prep and quality assurance
"""
try:
# load event data into pandas df
event_df = pd.read_csv(extracted_file_path, sep='\t')
# name cols so df is easier to use
event_df.columns = ['col_' + str(i) for i in range(61)]
# there are many things I could do here if I had more time
# for now I will drop duplicates and remove columns that aren't needed
# To make this more robust, I would clean and standardize the text
# and convert dates and floats to the appropriate formats/types
# I would also do ifnull checks and add in logic to fill in null values as needed
# Select cols needed in final output defined in assignment
event_df = event_df[['col_0', 'col_1', 'col_26', 'col_27', 'col_28', 'col_52', 'col_53', 'col_56', 'col_57', 'col_59', 'col_60']]
# name the columns according to doc
event_df.columns = ['GLOBALEVENTID', 'SQLDATE', 'EventCode', 'EventBaseCode', 'EventRootCode', 'ActionGeo_FullName', 'ActionGeo_CountryCode', 'ActionGeo_Lat', 'ActionGeo_Long', 'DATEADDED', 'SOURCEURL']
# Drop duplicates
event_df = event_df.drop_duplicates()
return event_df
except pd.errors.EmptyDataError as e:
raise pd.errors.EmptyDataError(f"Empty data error: {e}")
except pd.errors.ParserError as e:
raise pd.errors.ParserError(f"Parser error: {e}")
except Exception as e:
raise Exception(f"An unexpected error occurred during data cleaning: {e}")
def retrieve_geo_data():
""" In addition to the above source data, geometric location data for US counties may be located at:
https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json
"""
print('Retrieving geo data')
return requests.get('https://raw.githubusercontent.com/plotly/datasets/master/geojson-counties-fips.json').content.decode()
def filter_data(event_df, geo_data):
""" Please filter the event data to those events located within the US
based on their lat/lon coordinates (lat: ActionGeo_Long, long:ActionGeo_Lat)
"""
# Load choropleth data using geopandas
choropleth_df = gpd.read_file(geo_data)
# Convert the event dataframe to a GeoDataFrame using "ActionGeo_Lat" and "ActionGeo_Long" columns
event_df['geometry'] = event_df.apply(lambda row: Point(row['ActionGeo_Long'], row['ActionGeo_Lat']), axis=1)
# Specify the CRS for the event data
event_gdf = gpd.GeoDataFrame(event_df, geometry='geometry', crs="EPSG:4326")
# Ensure that both datasets have the same CRS
event_gdf = event_gdf.to_crs(choropleth_df.crs)
# Perform the spatial join to filter events in the U.S.
us_events = gpd.sjoin(event_gdf, choropleth_df, how='inner', predicate='intersects')
print('Data filtered - might add in specifics using variables here')
return us_events
def load_db(filtered_event_data, event_root_codes, event_base_codes, event_codes, map_fips_to_iso2):
""" Please use Postgres/GIS as your target database. You should demonstrate how you might make
and manage the database connection, as well as the execution of needed transactions. You do not
need to configure and run the actual database except as it is helpful to you to do so.
"""
# This is just example code
# # Define the database connection parameters
# database_uri = "<insert your uri connection string here>"
# # Establish a connection to the database
# connection = psycopg2.connect(database_uri)
# # Create a cursor for executing SQL commands
# cursor = connection.cursor()
# create_table_sql = """
# CREATE TABLE events (
# GLOBALEVENTID SERIAL PRIMARY KEY,
# SQLDATE DATE,
# EventCode VARCHAR,
# EventBaseCode VARCHAR,
# EventRootCode VARCHAR,
# ActionGeo_FullName VARCHAR,
# ActionGeo_CountryCode VARCHAR,
# ActionGeo_Lat FLOAT,
# ActionGeo_Long FLOAT,
# DATEADDED DATE,
# SOURCEURL TEXT
# )
# """
# I would also add the JSON mappings into the database as dimension tables
# By creating the tables and inserting the given values into them
# # Execute the SQL command to create the table
# cursor.execute(create_table_sql)
# connection.commit()
# us_events.to_sql("events", connection, if_exists="replace", index=False)
# connection.commit()
# cursor.close()
# connection.close()
print('DB fictionally loaded: fictionally variable number of rows inserted')
def cleanup(extracted_file_path, zip_file_path):
""" Removes downloaded and extracted files at end
"""
print('Removing files')
os.remove(extracted_file_path)
os.remove(zip_file_path)
if __name__ == "__main__":
main()
| madelinepet/take_home_assignment | assignment.py | assignment.py | py | 7,586 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "os.mkdir",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "mappings.event_root_codes",
"line_number": 34,
"usage_type": "argument"
},
{
"api_name": "mappings.event_base_c... |
24168209609 | #!/usr/bin/env python
'''
summarise slurm job details
Usage: summarise.py --files slurm-*.log > summary.tsv
Time is in hours.
Memory is in GB.
'''
#(venv_somatic_2) spartan-login1 18:48:20 msi-evaluation$ sacct -j 18860471 --format="JobName,CPUTime,MaxRSS,Elapsed,MaxVMSize,Timelimit"
# JobName CPUTime MaxRSS Elapsed MaxVMSize Timelimit
#---------- ---------- ---------- ---------- ---------- ----------
# mantis 17:37:48 02:56:18 08:00:00
# batch 17:37:48 733264K 02:56:18 47907692K
# extern 17:37:48 1212K 02:56:18 144788K
import argparse
import logging
import subprocess
import sys
def to_hours(v):
# d-hh:mm:ss or hh:mm:ss
if '-' in v:
d = float(v.split('-')[0])
return 24 * d + to_hours(v.split('-')[1])
else:
h, m, s = [int(x) for x in v.split(':')]
return h + m / 60 + s / 3600
def to_g(v):
if v.endswith('K'):
return float(v[:-1]) / 1024 / 1024
elif v.endswith('M'):
return float(v[:-1]) / 1024
elif v.endswith('Mn'):
return float(v[:-2]) / 1024
elif v.endswith('Gn'):
return float(v[:-2])
else:
logging.warn('tricky memory value: %s', v)
return float(v)
def main(files, filter_name):
logging.info('starting...')
sys.stdout.write('ID,Name,TimeRequested,TimeUsed,MemoryRequested,MemoryUsed,TimeDiff,MemoryDiff\n')
for f in files:
logging.info('%s...', f)
i = f.split('/')[-1].split('.')[0].split('-')[-1]
output = subprocess.check_output("sacct -j {} -p --format JobName,Elapsed,MaxRSS,ReqMem,TimeLimit".format(i), shell=True).decode()
lines = output.split('\n')
jobname = lines[1].split('|')[0]
time_requested = to_hours(lines[1].split('|')[4])
time_used = to_hours(lines[2].split('|')[1])
memory_used = to_g(lines[2].split('|')[2])
memory_requested = to_g(lines[2].split('|')[3])
if filter_name == 'snakemake':
jobname = '-'.join(jobname.split('-')[-2:-1])
logging.debug('new jobname is %s', jobname)
sys.stdout.write('{},{},{:.1f},{:.1f},{:.1f},{:.1f},{:.1f},{:.1f}\n'.format(i, jobname, time_requested, time_used, memory_requested, memory_used, time_requested - time_used, memory_requested - memory_used))
logging.info('done')
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Slurm summariser')
parser.add_argument('--files', required=True, nargs='+', help='files containing slurm ids')
parser.add_argument('--filter_name', required=False, help='filter names in snakemake format *-name-*')
parser.add_argument('--verbose', action='store_true', help='more logging')
args = parser.parse_args()
if args.verbose:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.DEBUG)
else:
logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s', level=logging.INFO)
main(args.files, args.filter_name)
| supernifty/slurm_util | summarise.py | summarise.py | py | 2,901 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.warn",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging.info",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "sys.stdout.write",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_num... |
22682272557 | # -*- coding: utf-8 -*-
"""
Created on Wed May 12 04:34:12 2021
@author: Zakaria
"""
import pandas as pd
data = pd.read_csv('prediction_de_fraud_2.csv')
caracteristiques = data.drop('isFraud', axis=1).values
cible = data['isFraud'].values
from sklearn.preprocessing import LabelEncoder
LabEncdr_X = LabelEncoder()
caracteristiques[:, 1] = LabEncdr_X.fit_transform(caracteristiques[:, 1])
caracteristiques[:, 3] = LabEncdr_X.fit_transform(caracteristiques[:, 3])
caracteristiques[:, 6] = LabEncdr_X.fit_transform(caracteristiques[:, 6])
from sklearn.model_selection import train_test_split
x_train, x_test, y_train, y_test = train_test_split(caracteristiques, cible, test_size=.3, random_state=50)
from sklearn.ensemble import RandomForestClassifier
Random_frst_cls = RandomForestClassifier(random_state=50)
Random_frst_cls.fit(x_train, y_train)
Random_frst_cls.score(x_test, y_test) ## ==> 0.9550561797752809
| Baxx95/6-10-Programmes-Data-Science-SL-Random_Forest_Classifier | Random_Forest_Classifier.py | Random_Forest_Classifier.py | py | 961 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 26,
"usage_type": "call"
}... |
29186498876 | import numpy
import multiprocessing as mp
import scipy.fftpack as fft
import scipy.signal as signal
import h5py
from .utilities import working_dir
from .stationbandpass import lofar_station_subband_bandpass
def fir_filter_coefficients(num_chan, num_taps, cal_factor=1./50.0):
'''
Compute FIR filter coefficients for channel separation.
**Parameters**
num_chan : int
Required number of channels in PPF output.
num_taps : int
Number of PPF taps.
**Returns**
A num_taps x num_chan numpy.array of float32.
**Example**
>>> fir_filter_coefficients(num_chan=4, num_taps=8)
array([[-0.00337621, 0.01111862, -0.01466139, 0.00781696],
[ 0.00988741, -0.02981976, 0.03694931, -0.01888615],
[-0.0233096 , 0.06982564, -0.08770566, 0.0466728 ],
[ 0.06241577, -0.21720791, 0.36907339, -0.46305624],
[ 0.46305624, -0.36907339, 0.21720791, -0.06241577],
[-0.0466728 , 0.08770566, -0.06982564, 0.0233096 ],
[ 0.01888615, -0.03694931, 0.02981976, -0.00988741],
[-0.00781696, 0.01466139, -0.01111862, 0.00337621]], dtype=float32)
'''
raw_coefficients = signal.firwin((num_taps)*num_chan, 1/(num_chan),
width=0.5/(num_chan))
auto_fftshift = raw_coefficients*(-1)**numpy.arange(num_taps*num_chan)
coefficients = numpy.array(auto_fftshift*(num_chan**0.5),
dtype=numpy.float32)
coefficients *= cal_factor
return coefficients.reshape((num_taps, num_chan))
def channelize_ppf(timeseries_taps, fir_coefficients):
'''
Make a polyphase-filtered spectrum of a timeseries.
**Parameters**
timeseries_taps : 2D numpy.array of complex64
A `num_taps x num_chan` array containing the timeseries data,
where `timeseries_taps.ravel()` should yield the input (single
channel) timeseries data.
fir_coefficients : 2D numpy.array of float32
A `num_taps x num_chan` array containing the FIR coefficients,
where `fir_coefficients.ravel()` should yield the FIR filter to
multiply with the original (single channel) timeseries data.
**Returns**
A 1D numpy.array of complex64 with length num_chan containing the
PPF output.
**Example**
>>> fir = fir_filter_coefficients(num_chan=4, num_taps=2, cal_factor=1)
>>> fir.dtype
dtype('float32')
>>> timeseries = numpy.array(numpy.exp(2j*numpy.pi*2.8*numpy.arange(8)),
... dtype=numpy.complex64)
>>> timeseries
array([ 1.000000 +0.00000000e+00j, 0.309017 -9.51056540e-01j,
-0.809017 -5.87785244e-01j, -0.809017 +5.87785244e-01j,
0.309017 +9.51056540e-01j, 1.000000 -3.42901108e-15j,
0.309017 -9.51056540e-01j, -0.809017 -5.87785244e-01j], dtype=complex64)
>>> spectrum = channelize_ppf(timeseries.reshape(fir.shape), fir)
>>> spectrum
array([-0.03263591-0.01060404j, -0.00383157+0.00195229j,
-0.00848089+0.02610143j, 0.78864020+1.54779351j], dtype=complex64)
'''
return (fft.fft((timeseries_taps*fir_coefficients).sum(axis=0)))
def channelize_ppf_multi_ts(timeseries_taps, fir_coefficients):
'''FIR coefficients are num_taps x num_chan, blocks are num_timeslots x num_taps x num_chan arrays'''
return (fft.fft((timeseries_taps*fir_coefficients[numpy.newaxis,:,:]).sum(axis=1),
axis=1))
def channelize_ppf_contiguous_block(timeseries_taps, fir_coefficients):
num_taps, num_chan = fir_coefficients.shape
num_ts_blocks = timeseries_taps.shape[0]
num_spectra = num_ts_blocks -(num_taps-1)
output_spectra = numpy.zeros((num_spectra, num_chan),
dtype=numpy.complex64)
for sp in range(num_spectra):
output_spectra[sp,:] += channelize_ppf(timeseries_taps[sp:sp+num_taps,:],
fir_coefficients)
return output_spectra
def samples_per_block(block_length_s, sample_duration_s, num_chan, num_taps):
r'''
Calculate the number of samples per correlator intergration time,
as well as the number of samples that must be read. The latter is
larger because a certain number of samples before and after the
actual interval must be read to properly fill the PPF.
**Parameters**
block_length_s : float
Number of seconds per correlator interval.
sample_duration_s : float
Number of seconds per sample in the time series data.
num_chan : int
Number of channels for the PPF.
num_taps : int
Number of taps in the PPF
**Returns**
Tuple (block_length samples, samples_to_read_per_block). Both
integers.
**Examples**
>>> block_length_samples, samples_to_read = samples_per_block(0.1, 1024/200e6, num_chan=256, num_taps=16)
>>> block_length_samples, block_length_samples/256, samples_to_read/256
(19456, 76.0, 91.0)
>>> print(block_length_samples*1024/200e6, ' seconds')
0.09961472 seconds
'''
num_spectra = int(round(block_length_s/sample_duration_s/num_chan))
block_length_samples = num_spectra*num_chan
samples_to_read_per_block = (num_spectra+(num_taps-1))*num_chan
return block_length_samples, samples_to_read_per_block
def read_and_process_antenna_worker(h5_names, sap_id,
num_sb, fir_coefficients,
connection):
r'''
Read a complex time series from a sequence of four HDF5 groups
containing, X_re, X_im , Y_re, Y_im, respectively. Read
num_timeslots starting at first_timeslot. If apply_fn is not None,
apply it to the resulting time series per sub band and return its
result.
**Parameters**
h5_names : sequence strings
The HDF5 file names of X_re, X_im, Y_re, and Y_im.
first_timeslot : int
The first timeslot to read.
num_timeslots : int
The number of timeslots to read.
num_sb : int
The number of sub bands expected in the data.
fir_coefficients : 2D numpy.array of float32
A `num_taps x num_chan` array containing the FIR coefficients,
where `fir_coefficients.ravel()` should yield the FIR filter to
multiply with the original (single channel) timeseries data.
**Returns**
Tuple of x and y numpy arrays(time, sb, channel).
**Example**
>>> None
None
'''
sap_fmt = 'SUB_ARRAY_POINTING_%03d/BEAM_000/STOKES_%d'
num_pol = len(h5_names)
num_taps, num_chan = fir_coefficients.shape
bandpass = lofar_station_subband_bandpass(num_chan)
# with working_dir(dir_name):
h5_files = [h5py.File(file_name, mode='r') for file_name in h5_names]
h5_groups = [h5_file[sap_fmt % (sap_id, pol)]
for pol, h5_file in enumerate(h5_files)]
while True:
message = connection.recv()
if message == 'done':
connection.close()
[h5_file.close() for h5_file in h5_files]
break
first_timeslot, num_timeslots = message
time_series_real = numpy.zeros((4, num_timeslots, num_sb), dtype=numpy.float32)
[h5_groups[pol].read_direct(time_series_real,
numpy.s_[first_timeslot:first_timeslot+num_timeslots,:],
numpy.s_[pol, :, :])
for pol in range(num_pol)]
time_series_complex_x = time_series_real[0,:,:] + 1j*time_series_real[1,:,:]
time_series_complex_y = time_series_real[2,:,:] + 1j*time_series_real[3,:,:]
result_x = numpy.array([channelize_ppf_contiguous_block(
time_series_complex_x[:, sb].reshape((-1, num_chan)),
fir_coefficients)/bandpass[numpy.newaxis,:]
for sb in range(num_sb)],
dtype=numpy.complex64)
result_y = numpy.array([channelize_ppf_contiguous_block(
time_series_complex_y[:, sb].reshape((-1, num_chan)),
fir_coefficients)/bandpass[numpy.newaxis,:]
for sb in range(num_sb)],
dtype=numpy.complex64)
connection.send(['x', result_x.shape, result_x.dtype])
connection.send_bytes(result_x.tobytes())
connection.send(['y', result_y.shape, result_y.dtype])
connection.send_bytes(result_y.tobytes())
def time_and_freq_axes(h5_filename, sap_id=0):
r'''
'''
coordinate_fmt = 'SUB_ARRAY_POINTING_%03d/BEAM_000/COORDINATES/COORDINATE_%d'
h5_file = h5py.File(h5_filename, mode='r')
time_axis, freq_axis = [
dict([item
for item in h5_file[coordinate_fmt %
(sap_id, axis_id)].attrs.items()])
for axis_id in [0, 1]]
h5_file.close()
return time_axis, freq_axis
def read_and_process_antenna_block_mp(dir_name, sas_id_string, sap_ids,
fir_coefficients, interval_s=None,
interval_samples=None, num_samples=256*16,
max_duration_s=None):
sap_fmt = 'SUB_ARRAY_POINTING_%03d/BEAM_000/STOKES_%d'
with working_dir(dir_name):
sap_names = [[('%s_SAP%03d_B000_S%d_P000_bf.h5' % (sas_id_string, sap_id, pol))
for pol in [0, 1, 2, 3]]
for sap_id in sap_ids]
first_file = h5py.File(sap_names[0][0], mode='r')
timeslots_per_file = first_file[sap_fmt % (0, 0)].shape[0]
first_file.close()
time_axis, freq_axis = time_and_freq_axes(sap_names[0][0], sap_id=0)
num_sb = len(freq_axis['AXIS_VALUES_WORLD'])
sample_duration_s = time_axis['INCREMENT']
if interval_samples is None:
samples_per_interval = int(numpy.floor(interval_s/sample_duration_s))
else:
samples_per_interval = interval_samples
first_timeslot = 0
pipes = [mp.Pipe() for sap_id in sap_ids]
manager_ends = [pipe[0] for pipe in pipes]
worker_ends = [pipe[1] for pipe in pipes]
processes = [mp.Process(target=read_and_process_antenna_worker,
args=(h5_names, sap_id, num_sb, fir_coefficients, connection))
for h5_names, sap_id, connection in zip(sap_names, sap_ids, worker_ends)]
[process.start() for process in processes]
while first_timeslot < timeslots_per_file - samples_per_interval - num_samples:
time_axis['REFERENCE_VALUE'] = (first_timeslot + num_samples/2)*sample_duration_s
if max_duration_s is not None and (first_timeslot +num_samples)*sample_duration_s > max_duration_s:
break
[pipe.send([first_timeslot, num_samples]) for pipe in manager_ends]
x_metadata = [pipe.recv() for pipe in manager_ends]
x_data = [numpy.frombuffer(pipe.recv_bytes(), dtype=x_meta[2]).reshape(x_meta[1])
for x_meta, pipe in zip(x_metadata, manager_ends)]
y_metadata = [pipe.recv() for pipe in manager_ends]
y_data = [numpy.frombuffer(pipe.recv_bytes(), dtype=y_meta[2]).reshape(y_meta[1])
for y_meta, pipe in zip(y_metadata, manager_ends)]
first_timeslot += samples_per_interval
# Return X[sap, sb, time, chan], Y[sap, sb, time, chan], time, freq
yield (numpy.array(x_data, dtype=numpy.complex64),
numpy.array(y_data, dtype=numpy.complex64), time_axis, freq_axis)
[pipe.send('done') for pipe in manager_ends]
[pipe.close() for pipe in manager_ends]
[process.join() for process in processes]
return None
| brentjens/software-correlator | softwarecorrelator/stationprocessing.py | stationprocessing.py | py | 11,844 | python | en | code | 4 | github-code | 6 | [
{
"api_name": "scipy.signal.firwin",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scipy.signal",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "numpy.arange",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line... |
7796988085 | import xml.dom.minidom
import string;
import logging;
def LoadSession(system, FileName):
Logger = logging.getLogger("PPLT");
Logger.debug("Try to load Session from %s"%FileName);
doc = xml.dom.minidom.parse(FileName);
dev_tag = doc.getElementsByTagName("Devices")[0];
sym_tag = doc.getElementsByTagName("SymbolTree")[0];
srv_tag = doc.getElementsByTagName("Servers")[0];
LoadDevices(system, dev_tag);
LoadSymTree(system, sym_tag.firstChild);
LoadServers(system, srv_tag);
def LoadDevices(system, Tag):
devlst = Tag.getElementsByTagName("Device");
for dev in devlst:
Para = xmlFetchParameters(dev);
Alias = dev.getAttribute("alias");
FQDN = dev.getAttribute("fqdn");
system.LoadDevice(FQDN, Alias, Para);
def LoadServers(system, Tag):
srvlst = Tag.getElementsByTagName("Server");
for srv in srvlst:
Para = xmlFetchParameters(srv);
Alias = srv.getAttribute("alias");
FQSN = srv.getAttribute("fqsn");
DefUser = srv.getAttribute("user");
Root = srv.getAttribute("root");
if not Root:
Root = "/";
system.LoadServer(FQSN, Alias, DefUser, Para,Root);
def LoadSymTree(system, Tag, PathList=[]):
if not Tag:
return(None);
if Tag.nodeType != Tag.ELEMENT_NODE:
return(LoadSymTree(system, Tag.nextSibling, PathList));
if Tag.localName == "Symbol":
Name = Tag.getAttribute("name");
Slot = Tag.getAttribute("slot");
Refresh = Tag.getAttribute("refresh");
Group = Tag.getAttribute("group");
Owner = Tag.getAttribute("owner");
Modus = str(Tag.getAttribute("modus"));
Path = PathList2Str(PathList+[Name]);
system.CreateSymbol(Path, Slot, Refresh, Modus, Owner, Group);
if Tag.localName == "Folder":
Name = Tag.getAttribute("name");
Group = Tag.getAttribute("group");
Owner = Tag.getAttribute("owner");
Modus = Tag.getAttribute("modus");
Path = PathList2Str(PathList+[Name]);
system.CreateFolder(Path, Modus, Owner, Group);
if Tag.hasChildNodes():
LoadSymTree(system,Tag.firstChild,PathList+[Name]);
return(LoadSymTree(system,Tag.nextSibling,PathList));
def xmlFetchParameters(Node):
parameter = {};
parlst = Node.getElementsByTagName("Parameter");
for par in parlst:
name = par.getAttribute("name");
value = xmlFetchText(par.firstChild);
parameter.update( {name:value} );
return(parameter);
def xmlFetchText(Node,txt=""):
if not Node:
return(txt);
if Node.nodeType == Node.TEXT_NODE:
txt += string.strip(Node.data);
return(xmlFetchText(Node.nextSibling,txt));
def PathList2Str(PathLst):
p = "";
if len(PathLst) == 0:
return("/");
for item in PathLst:
p += "/"+item;
return(p);
| BackupTheBerlios/pplt-svn | PPLT/PPLT/LoadSession.py | LoadSession.py | py | 2,939 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom.minidom.parse",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "xml.dom.minidom.dom",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_nam... |
13610828545 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import taggit.managers
class Migration(migrations.Migration):
dependencies = [
('taggit', '0001_initial'),
('learn', '0003_project_photo'),
]
operations = [
migrations.CreateModel(
name='Area',
fields=[
('id', models.AutoField(auto_created=True, serialize=False, verbose_name='ID', primary_key=True)),
('created', models.DateTimeField(auto_now_add=True, verbose_name='Criado em')),
('modified', models.DateTimeField(auto_now=True, verbose_name='Modificado em')),
('name', models.CharField(max_length=100, verbose_name='Nome')),
('slug', models.SlugField(max_length=100, verbose_name='Identificador')),
('tags', taggit.managers.TaggableManager(help_text='A comma-separated list of tags.', verbose_name='Tags', through='taggit.TaggedItem', to='taggit.Tag')),
],
options={
'ordering': ['name'],
'verbose_name': 'Área de Estudo',
'verbose_name_plural': 'Áreas de Estudo',
},
bases=(models.Model,),
),
migrations.AddField(
model_name='project',
name='area',
field=models.ForeignKey(verbose_name='Área', blank=True, related_name='projects', null=True, to='learn.Area'),
preserve_default=True,
),
migrations.AddField(
model_name='project',
name='open_enrollment',
field=models.BooleanField(default=False, verbose_name='Inscrições Abertas'),
preserve_default=True,
),
]
| klebercode/sofia | sofia/apps/learn/migrations/0004_auto_20141215_1723.py | 0004_auto_20141215_1723.py | py | 1,769 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.CreateModel",
"line_number": 16,
"usage_type": "call"
},
... |
38474579179 | import argparse
import regex as re
from pathlib import Path
from textwrap import dedent
import yaml
from .validator import run_sigma_validator
from clint.textui import colored, puts
import logging
STANDARD_YAML_PATH = Path(__file__).resolve().parent.parent / Path('CCCS_SIGMA.yml')
SIGMA_FILENAME_REGEX = r'(\.yaml|\.yml)$'
SIGMA_VALID_PREFIX = r'valid_'
SIGMA_VALID_PREFIX_REG = re.compile(r'^' + SIGMA_VALID_PREFIX)
logger = logging.getLogger(__file__)
parser = argparse.ArgumentParser(description='CCCS SIGMA script to run the CCCS SIGMA validator, '
'use the -i or -c flags to generate the id, fingerprint, version, '
'first_imported, or last_modified (if not already present) and add them '
'to the file.')
parser.add_argument('paths', nargs='+', type=str, default=[],
help='A list of files or folders to be analyzed.')
parser.add_argument('-r', '--recursive', action='store_true', default=False, dest='recursive',
help='Recursively search folders provided.')
parser.add_argument('-v', '--verbose', action='store_true', default=False, dest='verbose',
help='Verbose mode, will print why a rule was invalid.')
parser.add_argument('-vv', '--very-verbose', action='store_true', default=False, dest='veryverbose',
help='Very-verbose mode, will printout what rule is about to be processed, '
'the invalid rules, the reasons they are invalid and all contents of the rule.')
parser.add_argument('-f', '--fail', action='store_true', default=False, dest='fail',
help='Fail mode, only prints messages about invalid rules.')
parser.add_argument('-w', '--warnings', action='store_true', default=False, dest='warnings',
help='This mode will ignore warnings and proceed with other behaviors if the rule is valid.')
parser.add_argument('-s', '--standard', action='store_true', default=False, dest='standard',
help='This prints the SIGMA standard to the screen.')
parser.add_argument('-st', '--strict', action='store_true', default=False, dest='strict',
help='This causes the cli to return a non-zero exit code for warnings.')
parser_group = parser.add_mutually_exclusive_group()
parser_group.add_argument('-i', '--in-place', action='store_true', default=False, dest='inplace', # removes comments
help='Modifies valid files in place, mutually exclusive with -c.') # and indentation
parser_group.add_argument('-c', '--create-files', action='store_true', default=False, dest='createfile',
help='Writes a new file for each valid file, mutually exclusive with -i.')
def parse_args(custom_args=None):
if isinstance(custom_args, list):
options = parser.parse_args(custom_args)
else:
options = parser.parse_args()
return options
def get_sigma_paths_from_dir(directory, recursive):
""" Recursively get SIGMA rules from a directory """
if directory.is_file() and re.fullmatch(SIGMA_FILENAME_REGEX, directory.suffix):
yield directory
elif directory.is_dir():
for path in list(directory.iterdir()):
if path.is_file() and re.fullmatch(SIGMA_FILENAME_REGEX, path.suffix):
yield path
elif path.is_dir() and recursive:
for sub_dir_path in get_sigma_paths_from_dir(path, recursive):
yield sub_dir_path
def get_paths_to_validate(options_paths, recursive):
""" Returns a set of pathlib.Path objects for all
SIGMA rules that will be validated """
paths_to_validate = set()
for path in [Path(path_name) for path_name in options_paths]:
if path.exists():
if path.is_dir():
paths_to_validate.update(get_sigma_paths_from_dir(path, recursive))
elif re.match(SIGMA_FILENAME_REGEX, path.suffix):
paths_to_validate.add(path)
else:
print('{message:40}{path}'.format(message='Path does not exist:', path=str(path)))
return sorted(paths_to_validate)
def get_sigma_file_new_path(path):
""" takes a path in argument, and return the same path with the
filename prefixed with SIGMA_VALID_PREFIX.
if the file already has the prefix, returns the path unchanged.
"""
if SIGMA_VALID_PREFIX_REG.match(path.name):
return path
else:
new_name = SIGMA_VALID_PREFIX + path.name
return path.parent / new_name
def overwrite_file(path, content):
# convert sigma rule from dict to str and write contents to disk
with open(path, 'w', encoding='utf-8') as f:
f.write(yaml.dump(content, sort_keys=False) + '\n')
def print_errors(sigma_file_processor, options):
if sigma_file_processor.return_file_error_state():
print(colored.red('{indent:>7}{message}'.format(indent='- ', message='Errors:')))
print(colored.white(sigma_file_processor.return_rule_errors_for_cmlt()))
def print_warnings(sigma_file_processor, options):
if sigma_file_processor.return_file_warning_state() and not options.warnings:
print(colored.yellow('{indent:>7}{message}'.format(indent='- ', message='Warnings:')))
print(colored.white(sigma_file_processor.return_rule_warnings_for_cmlt()))
def print_standard():
# TODO fix entries in standard
print('Printing the CCCS SIGMA Standard:')
with open(STANDARD_YAML_PATH, 'r') as yaml_file:
standard = yaml.safe_load(yaml_file)
for standard_key in standard:
standard_entry_name = standard_key
standard_entry_description = standard[standard_key]['description']
standard_entry_unique = standard[standard_key]['unique']
standard_entry_optional = standard[standard_key]['optional']
standard_entry_format = standard[standard_key]['format']
print('{se_name}{message}'.format(message=':',
se_name=standard_entry_name))
print('{preface:20}{se_text}'.format(preface=' - Description:',
se_text=standard_entry_description))
print('{preface:20}{se_text}'.format(preface=' - Format:',
se_text=standard_entry_format))
print('{preface:20}{se_text}'.format(preface=' - Unique:',
se_text=standard_entry_unique))
print('{preface:20}{se_text}'.format(preface=' - Optional:',
se_text=standard_entry_optional))
if 'validator' in standard[standard_key]:
standard_entry_validator = standard[standard_key]['validator']
print('{preface:20}{se_text}'.format(preface=' - Validator:',
se_text=standard_entry_validator))
if 'argument' in standard[standard_key]:
standard_entry_argument = standard[standard_key]['argument']
print('{preface:20}{se_text}'.format(preface=' - Argument:',
se_text=''))
for param in standard_entry_argument:
print('{preface:20}{se_text}'.format(preface=' - ' + param + ': ',
se_text=standard_entry_argument[param]))
print()
def _call_validator(options):
paths_to_validate = get_paths_to_validate(options.paths,
options.recursive)
all_invalid_rule_returns = []
all_warning_rule_returns = []
# if options.standard:
# print_standard()
# main loop : will iterate over every file the program has to validate,
# validate them and then print the output
for sigma_rule_path in list(paths_to_validate):
if options.veryverbose:
print('{message:40}{y_file}'.format(
message='Validating Rule file:',
y_file=sigma_rule_path,
))
# handle if we want to overwrite or create new files
if options.createfile:
generate_values = True
sigma_file_output = get_sigma_file_new_path(sigma_rule_path)
what_will_be_done = 'create a new file with the {} preface.'.format(SIGMA_VALID_PREFIX)
elif options.inplace:
generate_values = True
sigma_file_output = sigma_rule_path
what_will_be_done = 'modify the file in place.'
else:
generate_values = False
what_will_be_done = 'make no changes'
sigma_file_output = None
sigma_validator = run_sigma_validator(sigma_rule_path, generate_values)
# Prints the output of the validator.
file_message = '{message:39}{y_file}'
if sigma_validator.return_file_error_state():
# The rule is invalid
all_invalid_rule_returns.append((sigma_rule_path, sigma_validator))
puts(colored.red(file_message.format(
message='🍅 Invalid Rule File:',
y_file=sigma_rule_path)))
if options.inplace or options.createfile:
# TODO add these methods to SigmaValidator
sigma_validator.modify_values()
if sigma_validator.return_edited_file_string():
print('modifying file ', sigma_file_output)
overwrite_file(sigma_file_output, sigma_validator.return_edited_file_string())
else:
print('No fields were edited ')
if options.verbose or options.veryverbose:
print_errors(sigma_validator, options)
print_warnings(sigma_validator, options)
elif sigma_validator.return_file_warning_state() and not options.warnings:
# The rule is valid, has warnings and warning are turned on
all_warning_rule_returns.append((sigma_rule_path, sigma_validator))
puts(colored.yellow(file_message.format(
message=' Warnings in Rule File:',
y_file=sigma_rule_path
)))
if options.verbose or options.veryverbose:
print_warnings(sigma_validator, options)
elif not sigma_validator.return_file_error_state():
# The rule is valid with no warnings or has warnings and warnings are turned off
if not options.fail:
print(file_message.format(
message="🥦 Valid Rule File:",
y_file=sigma_rule_path
))
else:
print('Invalid Code Execution Block')
if options.veryverbose:
for invalid_rule_path, invalid_rule_return in all_invalid_rule_returns:
print(dedent('''
----------------------------------------------------------------------------
Invalid rule file:{invalid_rule_path}
Warnings:
{rule_warnings}
Errors:
{rule_errors}
{original_rule}
----------------------------------------------------------------------------
''').format(rule_warnings=invalid_rule_return.return_rule_warnings_for_cmlt(),
rule_errors=invalid_rule_return.return_rule_errors_for_cmlt(),
original_rule=invalid_rule_return.return_original_rule(),
invalid_rule_path=invalid_rule_path))
total_sigma_rule_paths = len(paths_to_validate)
total_invalid_sigma_rule_paths = len(all_invalid_rule_returns)
total_warning_sigma_rule_paths = len(all_warning_rule_returns)
total_valid_sigma_rule_paths = (total_sigma_rule_paths
- total_invalid_sigma_rule_paths
- total_warning_sigma_rule_paths)
print(dedent('''
----------------------------------------------------------------------------
All .yaml Rule files found have been passed through the CCCS Sigma Validator:
Total Sigma Rule Files to Analyze: {total_sigma_rule_paths}
Total Valid CCCS Sigma Rule Files: {total_valid_sigma_rule_paths}
Total Warning CCCS Sigma Rule Files: {total_warning_sigma_rule_paths}
Total Invalid CCCS Sigma Rule Files: {total_invalid_sigma_rule_paths}
---------------------------------------------------------------------------
''').format(total_sigma_rule_paths=str(total_sigma_rule_paths),
total_valid_sigma_rule_paths=colored.green(str(total_valid_sigma_rule_paths)),
total_warning_sigma_rule_paths=colored.yellow(str(total_warning_sigma_rule_paths)),
total_invalid_sigma_rule_paths=colored.red(str(total_invalid_sigma_rule_paths))))
if total_invalid_sigma_rule_paths >= 1:
exit(99)
elif total_warning_sigma_rule_paths >= 1 and options.strict:
exit(49)
def git_ci(changed_file_paths):
options = parser.parse_args(changed_file_paths)
_call_validator(options)
def main():
print('Sigma Rule Validator')
options = parse_args()
_call_validator(options)
if __name__ == '__main__':
main()
| CybercentreCanada/pysigma | pysigma/validator_cli.py | validator_cli.py | py | 13,374 | python | en | code | 7 | github-code | 6 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "regex.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser"... |
8677677831 | import xarray as xr
import xesmf as xe
import pandas as pd
import datetime
import os
first_date = '2021-01-01'
last_date = '2022-12-31'
lonmin,lonmax = 360-90,360-69
latmin,latmax = -40,-15
variables = [
'surf_el',
'water_temp',
'salinity',
'water_u',
'water_v']
renamedict = {'surf_el':'zos',
'water_temp':'thetao',
'salinity':'so',
'water_u':'uo',
'water_v':'vo'}
def get_hycom_filename(ftype):
if ftype=='hindcast':
url = 'https://<Lucas.Glasner:y4vkrp7lqcv>@tds.hycom.org/thredds/dodsC/GLBy0.08/expt_93.0'
return url
def get_hycom_hindcast(first_date, last_date, lonmin, lonmax, latmin, latmax, variables):
url = get_hycom_filename('hindcast')
data = xr.open_dataset(url, decode_times=False)
data = data[variables]
data = data.sel(lat=slice(latmin,latmax), lon=slice(lonmin, lonmax))
attrs = data.time.attrs
units,reference_date = data.time.attrs['units'].split('since')
time = [pd.Timedelta(hours=t)+pd.to_datetime(reference_date) for t in data.time.values]
data.coords['time'] = ('time',time, {'long_name':attrs['long_name'],
'axis':attrs['axis'],
'NAVO_code':attrs['NAVO_code']})
data = data.sel(time=slice(first_date, last_date))
return data
if __name__=='__main__':
data = get_hycom_hindcast(first_date=first_date,
last_date=last_date,
lonmin=lonmin, lonmax=lonmax,
latmin=latmin, latmax=latmax,
variables=variables)
data = data.rename(renamedict)
daterange = pd.date_range(first_date, last_date, freq='d')
for date in daterange:
datestr = date.strftime('%Y-%m-%d')
try:
print('Downloading data for ',datestr,'please wait...')
x = data.sel(time=datestr).resample({'time':'d'}).mean()
x.coords['time'] = x.time+pd.Timedelta(hours=12)
x.to_netcdf(
'HINDCAST/hycom_hindcast_0p08_{}.nc'.format(date.strftime('%Y%m%d')),
encoding={
'time':{'units':'hours since 2000-01-01', 'dtype':float},
'zos':{'zlib':True, 'complevel':3},
'so':{'zlib':True, 'complevel':3},
'uo':{'zlib':True, 'complevel':3},
'vo':{'zlib':True, 'complevel':3},
'thetao':{'zlib':True, 'complevel':3}
}
)
except Exception as e:
print('Download for ',datestr,' failed:',e)
| lucasglasner/DOWNLOADSCRIPTS | HYCOM/download_hycom_hindcast.py | download_hycom_hindcast.py | py | 2,754 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "xarray.open_dataset",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.Timedelta",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pandas.date_r... |
21645750883 | #Tutorial de Umbral OpenCV
import cv2
import numpy as np
img = cv2.imread('Pagina.jpg')
#Imagen a escala de grises
grayscaled = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
#Umbral de 10
retval, threshold = cv2.threshold(img, 12, 255, cv2.THRESH_BINARY)
#Umbral en escala de grises
retval, threshold2 = cv2.threshold(grayscaled, 10, 255, cv2.THRESH_BINARY)
#Umbral Adaptativo
th = cv2.adaptiveThreshold(grayscaled, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 115, 1)
cv2.imshow('Original',img)
cv2.imshow('Umbral',threshold)
cv2.imshow('Umbral en Escala de grises',threshold2)
cv2.imshow('Umbral Adaptativo',threshold2)
cv2.waitKey(0)
cv2.destroyAllWindows()
| Deniry/Practicas_OpenCV | Practica5.py | Practica5.py | py | 666 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"lin... |
38785952057 | import cv2 as cv
import sys
img = cv.imread("Photos/cat_large.jpg")
print(img.shape)
cv.imshow("Cat", img)
def rescale(frame, scale=0.75):
width = frame.shape[1] * scale
height = frame.shape[0] * scale
dimensions = (int(width), int(height))
new_frame = cv.resize(frame, dimensions, interpolation=cv.INTER_AREA)
return new_frame
new_img = rescale(img, 0.2)
print(new_img.shape)
cv.imshow("Catnew", new_img)
cv.waitKey(0) | adamferencz/opencv-course-ghb | rescale.py | rescale.py | py | 446 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.resize",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "cv2.INTER_AREA",
"line_number": 15... |
1922022592 | from sklearn import preprocessing
import pandas as pd
import numpy as np
import pickle
data_path = './data/STT.csv'
window = 15
def normalize(df):
min_max_scaler = preprocessing.MinMaxScaler()
df['open'] = min_max_scaler.fit_transform(df.open.values.reshape(-1, 1))
df['close'] = min_max_scaler.fit_transform(df.close.values.reshape(-1, 1))
df['high'] = min_max_scaler.fit_transform(df.high.values.reshape(-1, 1))
df['low'] = min_max_scaler.fit_transform(df.low.values.reshape(-1, 1))
df['volume'] = min_max_scaler.fit_transform(
df.volume.values.reshape(-1, 1))
return df
def split_data(stock, window, percent=0.85):
amount_of_features = len(stock.columns) # 5
data = stock.values
sequence_length = window + 1 # index starting from 0
result = []
for index in range(len(data) - sequence_length):
result.append(data[index: index + sequence_length])
row = round(percent * data.shape[0])
result = np.array(result)
train = result[:int(row), :]
x_train = train[:, :-1]
y_train = np.array(train[:, -1][:, -1])
x_test = result[int(row):, :-1]
y_test = np.array(result[int(row):, -1][:, -1])
x_train = np.reshape(
x_train, (x_train.shape[0], x_train.shape[1], amount_of_features))
x_test = np.reshape(
x_test, (x_test.shape[0], x_test.shape[1], amount_of_features))
return [x_train, y_train, x_test, y_test]
if __name__ == "__main__":
df = pd.read_csv(data_path, index_col=0)
target_df = df[df.symbol == 'STT'].copy()
target_df.drop(['symbol'], 1, inplace=True)
target_df_normalized = normalize(target_df)
x_train, y_train, x_test, y_test = split_data(
target_df_normalized, window)
with open('./data/train.pickle', 'wb') as f:
pickle.dump((x_train, y_train), f)
with open('./data/test.pickle', 'wb') as f:
pickle.dump((x_test, y_test), f)
| sinlin0908/ML_course | hw4/prepro.py | prepro.py | py | 1,925 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "sklearn.preprocessing.MinMaxScaler",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "... |
41584679238 | # 윈도우에서는 한글 인코딩 오류가 발생할 수 있습니다.
# 한글 인코딩 오류가 발생한다면
# Message.log(message_type="info", msg="데이터를 저장했습니다.")
# 위의 코드 부분의 msg를 영어로 수정해서 사용해주세요.
import json
import sys
from eliot import Message, start_action, to_file, write_traceback
import requests
# 로그 출력을 표준 출력으로 설정(터미널에 출력하기)
to_file(sys.stdout)
# 크롤링 대상 URL 리스트
PAGE_URL_LIST = [
'https://eliot.readthedocs.io/en/1.0.0/',
'https://eliot.readthedocs.io/en/1.0.0/generating/index.html',
'https://example.com/notfound.html',
]
def fetch_pages():
"""페이지의 내용을 추출합니다."""
# 어떤 처리의 로그인지는 action_type으로 지정
with start_action(action_type="fetch_pages"):
page_contents = {}
for page_url in PAGE_URL_LIST:
# 어떤 처리의 로그인지 action_type으로 출력
with start_action(action_type="download", url=page_url):
try:
r = requests.get(page_url, timeout=30)
r.raise_for_status()
except requests.exceptions.RequestException as e:
write_traceback() # 예외가 발생하면 트레이스백 출력
continue
page_contents[page_url] = r.text
return page_contents
if __name__ == '__main__':
page_contents = fetch_pages()
with open('page_contents.json', 'w') as f_page_contents:
json.dump(page_contents, f_page_contents, ensure_ascii=False)
# 단순하게 로그 메시지만 출력할 수도 있음
Message.log(message_type="info", msg="데이터를 저장했습니다.")
| JSJeong-me/2021-K-Digital-Training | Web_Crawling/python-crawler/chapter_5/sample_eliot.py | sample_eliot.py | py | 1,833 | python | ko | code | 7 | github-code | 6 | [
{
"api_name": "eliot.to_file",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "eliot.start_action",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "eliot.start_action",... |
23748260373 | import os
import sys
from functools import partial
from PyQt5.QtGui import *
from PyQt5.QtCore import *
from PyQt5.QtWidgets import *
from toolBar import ToolBar
from Canvas.canvas import Canvas
import cv2
import numpy as np
from grab_cut import Grab_cut
from choiceDiaGen import ChoiceDiaGen
from choiceDiaStyle import ChoiceDiaStyle
from zoomWidget import ZoomWidget
from birdDialog import BirdDialog
from generator import Generator
from styleChanger import StyleChanger
__appname__ = 'grab_cut'
class ResizesQWidget(QWidget):
def sizeHint(self):
return QSize(100, 150)
class struct(object):
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
# 菜单栏和工具栏
class WindowMixin(object):
# 根据名字和action列表创建一个菜单,比如File,[new,edit]
def menu(self, title, actions=None):
menu = self.menuBar().addMenu(title)
if actions:
addActions(menu, actions)
return menu
def toolbar(self, title, actions=None):
toolbar = ToolBar(title)
toolbar.setObjectName('{}ToolBar'.format(title))
toolbar.setToolButtonStyle(Qt.ToolButtonTextUnderIcon)
if actions:
addActions(toolbar, actions)
self.addToolBar(Qt.LeftToolBarArea, toolbar) # 加到布局左侧
return toolbar
# 创建一个新Action
def newAction(parent, text, slot=None, shortcut=None,
tip=None, icon=None, checkable=False,
enable=True):
a = QAction(text, parent)
if icon is not None:
a.setIcon(QIcon(icon))
if shortcut is not None:
a.setShortcut(shortcut)
if tip is not None:
a.setToolTip(tip)
a.setStatusTip(tip)
if slot is not None:
a.triggered.connect(slot)
if checkable:
a.setChecked(True)
a.setEnabled(enable)
return a
# 讲actions加入到父控件
def addActions(widget, actions):
for action in actions:
if action is None:
widget.addSeparator()
widget.addAction(action) # weidget is toolBar or menu
# 主界面
class MainWindow(QMainWindow, WindowMixin):
FIT_WINDOW, FIT_WIDTH, MANUAL_ZOOM = list(range(3))
def __init__(self, defaultFilename=None):
super().__init__()
self.dirty = True # 文件是否已保存
self.mImgList = [] # 图片列表
self.dirname = None # 文件名
self._beginner = True #
self.image_out_np = None # 提取结果
self.default_save_dir = None # 默认保存路径
self.filePath = None # 当前载入的图片路径
self.mattingFile = None
# 垂直布局,
listLayout = QVBoxLayout()
listLayout.setContentsMargins(0, 0, 0, 0)
# ---#显示图片的label pic
matResultShow = ResizesQWidget() # 返回是是Qwidget
matResultShow.resize(150, 150)
self.pic = QLabel(matResultShow)
self.pic.resize(150, 150)
self.setGeometry(50, 20, 150, 150)
matResultShow.setLayout(listLayout)
# 建一个dockwidget放图片label
self.resultdock = QDockWidget('输出结果', self)
self.resultdock.setObjectName('result')
self.resultdock.setWidget(matResultShow)
self.resultdock.resize(150, 150)
# self.resultdock.setFeatures(QDockWidget.DockWidgetFloatable)
# 建一个fileDoc放文件
self.fileListWidget = QListWidget() # 列表布局
self.fileListWidget.itemDoubleClicked.connect(
self.fileItemDoubleClicked)
fileListLayout = QVBoxLayout()
fileListLayout.setContentsMargins(0, 0, 0, 0)
fileListLayout.addWidget(self.fileListWidget)
fileListContainer = QWidget()
fileListContainer.setLayout(fileListLayout)
self.filedock = QDockWidget('导入文件列表', self)
self.filedock.setObjectName('Files')
self.filedock.setWidget(fileListContainer)
self.zoomWidget = ZoomWidget()
self.canvas = Canvas(parent=self)
scroll = QScrollArea()
scroll.setWidget(self.canvas)
scroll.setWidgetResizable(True)
self.scrollBars = {
Qt.Vertical: scroll.verticalScrollBar(),
Qt.Horizontal: scroll.horizontalScrollBar()
}
self.scrollArea = scroll
self.canvas.scrollRequest.connect(self.scrollRequest)
self.setCentralWidget(scroll)
self.addDockWidget(Qt.RightDockWidgetArea, self.resultdock)
self.addDockWidget(Qt.RightDockWidgetArea, self.filedock)
# self.filedock.setFeatures(QDockWidget.DockWidgetFloatable)
self.dockFeatures = QDockWidget.DockWidgetClosable | QDockWidget.DockWidgetFloatable
self.resultdock.setFeatures(
self.resultdock.features() ^ self.dockFeatures)
# Actions
action = partial(newAction, self)
open_file = action('导入图片', self.openFile, 'Ctrl+O', '导入图片')
open_dir = action('导入文件夹', self.openDir,
'Ctrl+D', '导入文件夹中的所有图片到列表')
change_save_dir = action('&更改预设的保存路径', self.changeSavedirDialog)
# open_next_img = action('&Next Image', self.openNextImg,
# 'Ctrl+N', 'Open next image')
# open_pre_img = action('&Previous Image', self.openPreImg,
# 'Ctrl+M', 'Open previous image')
save = action('保存结果', self.saveFile, 'Crl+S', '保存输出结果图')
create = action('指定区域', self.createShape,
'w', '框选ROI')
mark = action('标记微调', self.markDown, None, '左键白色,标记前景;右键黑色,标记后景')
matting = action('迭代一次', self.grabcutMatting,
'e', '用当前标记迭代一次获取前景算法')
# 用预训练模型生成图片
generate = action('生成图片', self.generate, None, '输入文字,生成图片素材')
# 用预训练模型进行风格迁移
style = action('风格转换', self.styleChange, None, '选择一个风格,进行图像风格转换')
# 字典,对应一个放缩比
self.scalers = {
self.FIT_WINDOW: self.scaleFitWindow,
self.FIT_WIDTH: self.scaleFitWidth,
# Set to one to scale to 100% when loading files.
self.MANUAL_ZOOM: lambda: 1,
}
# store actions for further handling
self.actions = struct(save=save, open_file=open_file,
open_dir=open_dir, change_save_dir=change_save_dir,
# open_next_img=open_next_img, open_pre_img=open_pre_img,
create=create, mark=mark, matting=matting, generate=generate, style=style)
# Auto saving: enable auto saving if pressing next
# self.autoSaving = QAction('Auto Saving', self)
# self.autoSaving.setCheckable(True)
# self.autoSaving.setChecked()
# set toolbar
self.tools = self.toolbar('Tools')
self.actions.all = (open_file, open_dir,
change_save_dir, create,
# open_pre_img, open_next_img,
mark, matting, generate, style, save)
addActions(self.tools, self.actions.all)
# set status
self.statusBar().showMessage('{} 已就绪.'.format(__appname__))
def okToContinue(self):
if self.dirty:
reply = QMessageBox.question(self, "Attention",
"you have unsaved changes, proceed anyway?",
QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)
if reply == QMessageBox.Cancel:
return False
elif reply == QMessageBox.Yes:
return self.fileSave
return True
def resetState(self):
self.canvas.resetState()
def errorMessage(self, title, message):
return QMessageBox.critical(self, title,
'<p><b>%s</b></p>%s' % (title, message))
def beginner(self):
return self._beginner
def advanced(self):
return not self.beginner()
def openFile(self, _value=False):
path = os.path.dirname(self.filePath) if self.filePath else '.'
formats = ['*.%s' % fmt.data().decode("ascii").lower()
for fmt in QImageReader.supportedImageFormats()]
filters = "Image (%s)" % ' '.join(formats)
filename = QFileDialog.getOpenFileName(
self, '%s - Choose Image or Label file' % __appname__, path, filters)
if filename:
if isinstance(filename, (tuple, list)):
filename = filename[0]
self.loadFile(filename)
def openDir(self, dirpath=None):
defaultOpenDirPath = dirpath if dirpath else '.'
targetDirPath = QFileDialog.getExistingDirectory(self,
'%s - Open Directory' % __appname__, defaultOpenDirPath,
QFileDialog.ShowDirsOnly | QFileDialog.DontResolveSymlinks)
self.importDirImages(targetDirPath)
# 将导入图片显示在列表栏
def importDirImages(self, dirpath):
self.fileListWidget.clear()
self.mImgList = self.scanAllImages(dirpath)
# self.openNextImg()
for imgPath in self.mImgList:
item = QListWidgetItem(imgPath)
self.fileListWidget.addItem(item)
# 扫描路径下的所有文件,返回图片列表
def scanAllImages(self, folderPath):
extensions = ['.%s' % fmt.data().decode("ascii").lower()
for fmt in QImageReader.supportedImageFormats()]
imageList = []
for root, dirs, files in os.walk(folderPath):
for file in files:
if file.lower().endswith(tuple(extensions)):
relativePath = os.path.join(root, file)
path = os.path.abspath(relativePath)
imageList.append(path)
imageList.sort(key=lambda x: x.lower())
return imageList
def fileItemDoubleClicked(self, item=None):
currIndex = self.mImgList.index(item.text()) # 获取图片列表的index
if currIndex < len(self.mImgList):
filename = self.mImgList[currIndex]
if filename:
self.loadFile(filename) # 载入图片列表
# 读取图片到canvas
def loadFile(self, filePath=None):
self.resetState() # 清理canvas
self.canvas.setEnabled(False)
# 高亮选中项
if filePath and self.fileListWidget.count() > 0:
index = self.mImgList.index(filePath)
fileWidgetItem = self.fileListWidget.item(index)
fileWidgetItem.setSelected(True)
if filePath and os.path.exists(filePath):
# load image
self.ImageData = read(filePath, None)
else:
return
image = QImage.fromData(self.ImageData)
# 内存中没有图片
if image.isNull():
self.errorMessage(u'Error opening file',
u'<p>Make sure <i>%s</i> is a valid image file.' % filePath)
self.status('Error reading %s' % filePath)
return False
self.status('Loaded %s' % os.path.basename(filePath))
self.image = image # Qimage格式
self.filePath = filePath # 当前载入的文件路径
self.canvas.loadPixmap(QPixmap.fromImage(image)) # canvas中放置图片
self.canvas.setEnabled(True)
self.adjustScale(initial=True)
self.paintCanvas()
# 显示当前状态
def status(self, message, delay=5000):
self.statusBar().showMessage(message, delay)
def adjustScale(self, initial=False):
value = self.scalers[self.FIT_WINDOW if initial else self.zoomMode]()
self.zoomWidget.setValue(int(100 * value))
def scaleFitWindow(self):
"""Figure out the size of the pixmap in order to fit the main widget."""
e = 2.0 # So that no scrollbars are generated.
w1 = self.centralWidget().width() - e
h1 = self.centralWidget().height() - e
a1 = w1 / h1
# Calculate a new scale value based on the pixmap's aspect ratio.
w2 = self.canvas.pixmap.width() - 0.0
h2 = self.canvas.pixmap.height() - 0.0
a2 = w2 / h2
return w1 / w2 if a2 >= a1 else h1 / h2
def scaleFitWidth(self):
# The epsilon does not seem to work too well here.
w = self.centralWidget().width() - 2.0
return w / self.canvas.pixmap.width()
def paintCanvas(self):
assert not self.image.isNull(), "cannot paint null image"
self.canvas.scale = 0.01 * self.zoomWidget.value()
self.canvas.adjustSize()
self.canvas.update()
def createShape(self):
assert self.beginner()
self.canvas.setEditing(False)
self.actions.create.setEnabled(False)
# 开始标记,mod换成editting
def markDown(self):
self.canvas.setEditing(True)
def toggleDrawMode(self, edit=True):
self.canvas.setEditing(edit)
self.actions.createMode.setEnabled(edit)
self.actions.editMode.setEnabled(not edit)
# 生成图片
def generate(self):
# 种类选择对话框
choiceDia = ChoiceDiaGen()
choiceDia.show()
choiceDia.hide()
# 由Generator类控制生成对话框
# 传入类型和属性列表
gen = Generator(choiceDia.type,choiceDia.attrList)
gen.generate()
# 将生成的图片取出来显示在主页
self.loadFile("StackGAN/resultImg/latest.png")
def styleChange(self):
# 风格选择对话框
choiceDia = ChoiceDiaStyle()
choiceDia.show()
choiceDia.hide()
print(choiceDia.type)
changer = StyleChanger(choiceDia.type, self.filePath)
changer.changeStyle()
# self.loadFile("CycleGAN/targetImg/latest.png")
result = cv2.imread("CycleGAN/targetImg/latest.png")
# 转换为四通道
result = self.addAchannel(result)
self.showResultImg(result)
self.image_out_np = result
# 接收opencv读入的格式
def addAchannel(self, x):
b_channel, g_channel, r_channel = cv2.split(x)
alpha_channel = np.ones(b_channel.shape, dtype=b_channel.dtype) * 255
result_BGAR = cv2.merge((b_channel, g_channel, r_channel, alpha_channel))
# result[np.all(result==[0,0,0,255],axis=2)]=[0,0,0,0]
result_BGAR[np.all(result_BGAR == [0, 0, 0, 255], axis=2)] = [0, 0, 0, 0]
return result_BGAR
# 提取前景操作
def grabcutMatting(self):
if self.mattingFile is None:
self.mattingFile = Grab_cut()
def format_shape(s):
return dict(line_color=s.line_color.getRgb(),
fill_color=s.fill_color.getRgb(),
points=[(p.x(), p.y()) for p in s.points],
backMark=self.canvas.getBackMark(),
whiteMark=self.canvas.getForMark())
# 有四个点(矩形的话)+填充线颜色和边缘线颜色
shape = format_shape(self.canvas.shapes[-1])
self.image_out_np = self.mattingFile.image_matting(self.filePath,
shape, iteration=10)
self.showResultImg(self.image_out_np)
self.actions.save.setEnabled(True)
# 接收opencv矩阵格式
def showResultImg(self, image_np):
# resize to pic
# factor = min(self.pic.width() /
# image_np.shape[1], self.pic.height() / image_np.shape[0])
# image_np = cv2.resize(image_np, None, fx=factor,
# fy=factor, interpolation=cv2.INTER_AREA)
# image_np = cv2.resize((self.pic.height(), self.pic.width()))
image = QImage(image_np, image_np.shape[1],
image_np.shape[0], QImage.Format_ARGB32)
matImg = QPixmap(image)
self.pic.setFixedSize(matImg.size())
self.pic.setPixmap(matImg)
def saveFile(self):
self._saveFile(self.saveFileDialog())
def _saveFile(self, saved_path):
print(saved_path)
if saved_path:
Grab_cut.resultSave(saved_path, self.image_out_np)
self.setClean()
self.statusBar().showMessage('Saved to %s' % saved_path)
self.statusBar().show()
def saveFileDialog(self):
caption = '%s - Choose File' % __appname__
filters = 'File (*%s)' % 'png'
if self.default_save_dir is not None and len(self.default_save_dir):
openDialogPath = self.default_save_dir
else:
openDialogPath = self.currentPath()
print(openDialogPath)
dlg = QFileDialog(self, caption, openDialogPath, filters)
dlg.setDefaultSuffix('png')
dlg.setAcceptMode(QFileDialog.AcceptSave)
filenameWithoutExtension = os.path.splitext(self.filePath)[0]
dlg.selectFile(filenameWithoutExtension)
dlg.setOption(QFileDialog.DontUseNativeDialog, False)
if dlg.exec_():
return dlg.selectedFiles()[0]
return ''
def currentPath(self):
return os.path.dirname(self.filePath) if self.filePath else '.'
def changeSavedirDialog(self, _value=False):
if self.default_save_dir is not None:
path = self.default_save_dir
else:
path = '.'
dirpath = QFileDialog.getExistingDirectory(self,
'%s - Save annotations to the directory' % __appname__, path,
QFileDialog.ShowDirsOnly
| QFileDialog.DontResolveSymlinks)
if dirpath is not None and len(dirpath) > 1:
self.default_save_dir = dirpath
self.statusBar().showMessage('%s . Annotation will be saved to %s' %
('Change saved folder', self.default_save_dir))
self.statusBar().show()
def setClean(self):
self.dirty = False
self.actions.save.setEnabled(False)
self.actions.create.setEnabled(True)
def openNextImg(self):
pass
def openPreImg(self):
pass
def scrollRequest(self, delta, orientation):
units = - delta / (8 * 15)
bar = self.scrollBars[orientation]
bar.setValue(bar.value() + bar.singleStep() * units)
# 读取二进制图片 返回
def read(filename, default=None):
try:
with open(filename, 'rb') as f:
return f.read()
except Exception:
return default
def resetState(self):
self.canvas.resetState()
if __name__ == "__main__":
app = QApplication(sys.argv)
ex = MainWindow()
ex.show()
sys.exit(app.exec_())
| kisstherain8677/Image_generate | app.py | app.py | py | 19,181 | python | en | code | 3 | github-code | 6 | [
{
"api_name": "toolBar.ToolBar",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "zoomWidget.ZoomWidget",
"line_number": 137,
"usage_type": "call"
},
{
"api_name": "Canvas.canvas.Canvas",
"line_number": 139,
"usage_type": "call"
},
{
"api_name": "functool... |
24916898593 | import time
from datetime import datetime
from bluepy.btle import BTLEDisconnectError
from miband import miband
from ibmcloudant.cloudant_v1 import CloudantV1
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibmcloudant.cloudant_v1 import CloudantV1, Document
import os
from dotenv import load_dotenv
# All necessary imports
load_dotenv()
SERVICE_URL = os.getenv("SERVICE_URL")
API_KEY = os.getenv("API_KEY")
AUTH_KEY = os.getenv("AUTH_KEY")
MAC_ADDR = os.getenv("MAC_ADDR")
AUTH_KEY = bytes.fromhex(AUTH_KEY)
alternate = True
authenticator = IAMAuthenticator(API_KEY)
client = CloudantV1(authenticator=authenticator)
client.set_service_url(SERVICE_URL)
# All private keys loaded from .env file
def general_info(): # Prints general info about the band
global band
print("MiBand-4")
print("Soft revision:", band.get_revision())
print("Hardware revision:", band.get_hrdw_revision())
print("Serial:", band.get_serial())
print("Battery:", band.get_battery_info()["level"])
print("Time:", band.get_current_time()["date"].isoformat())
# function to create connection and band object ;-;
def create_connection():
success = False
while not success:
try:
band = miband(MAC_ADDR, AUTH_KEY, debug=True)
success = band.initialize()
return band
except BTLEDisconnectError:
print("Connection to the MIBand failed. Trying out again in 3 seconds")
time.sleep(3)
continue
except KeyboardInterrupt:
print("\nExit.")
exit()
band = create_connection()
general_info()
hr_list = {}
count = 0
def get_realtime():
try:
band.start_heart_rate_realtime(heart_measure_callback=heart_logger)
except KeyboardInterrupt:
print("\nExit.")
def heart_logger(data): # data is the heart rate value
data = abs(data)
global count # global variable to count the number of heart rate values
print("Realtime heart BPM:", data) # print the heart rate value
hr_list[
datetime.now().strftime("%d/%m/%y %H:%M:%S")
] = data # add the heart rate value to the dictionary
print(len(hr_list) // 2)
if count % 3 == 0: # Using every 10th heart rate value to create a new document
time_ = str(datetime.now().strftime("%d/%m/%y %H:%M:%S"))
data_entry: Document = Document(id=time_)
# Add "add heart rate reading as value" field to the document
data_entry.value = data
# Save the document in the database
create_document_response = client.post_document(
db="jxtin", document=data_entry
).get_result()
print(
f"You have created the document:\n{data_entry}"
) # print the document that was created
print("Logged the data")
else:
print("Didnt log the data")
count += 1
get_realtime()
| Rushour0/MSIT-The-New-Normal-Submission | WebVersions/web_v1/cloudant-module.py | cloudant-module.py | py | 2,911 | python | en | code | 1 | github-code | 6 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number":... |
19218028573 | from rest_framework import serializers
from api.v1.auth.schemas import LanguageChoiceField, TimeZoneNameChoiceField
from users.models import User
class CurrentUserOutputSchema(serializers.ModelSerializer):
language_code = LanguageChoiceField()
time_zone = TimeZoneNameChoiceField()
class Meta:
model = User
fields = (
"id",
"email",
"full_name",
"notification_token",
"language_code",
"time_zone",
"date_joined",
"is_staff",
"is_superuser",
)
| plathanus-tech/django_boilerplate | src/api/v1/users/schemas.py | schemas.py | py | 591 | python | en | code | 2 | github-code | 6 | [
{
"api_name": "rest_framework.serializers.ModelSerializer",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "api.v1.auth.schemas.LanguageChoiceField",
"line_number": 8,
"usa... |
42156059489 | import pytest
import responses
from repositories.app import APP
@pytest.fixture
def client():
with APP.test_client() as client:
APP.extensions["cache"].clear()
yield client
@responses.activate
def test_get_repo(client):
url = f"https://api.github.com/repos/owner/repo"
response = {
"full_name": "test/name",
"description": "description",
"clone_url": "clone url",
"stargazers_count": 500,
"created_at": "2020-01-17T22:24:45Z",
}
responses.add(responses.GET, url, json=response)
r = client.get("/repositories/owner/repo")
assert r.get_json() == {
"fullName": "test/name",
"description": "description",
"cloneUrl": "clone url",
"stars": 500,
"createdAt": "2020-01-17T22:24:45+00:00",
}
assert r.status_code == 200
assert r.is_json
@responses.activate
def test_404_error(client):
url = f"https://api.github.com/repos/owner/repo"
responses.add(responses.GET, url, status=404)
r = client.get("/repositories/owner/repo")
assert r.get_json() == {
"status": 404,
"error": "Not Found",
"message": "requested repository does not exist",
}
assert r.status_code == 404
@responses.activate
def test_500_error(client):
url = f"https://api.github.com/repos/owner/repo"
responses.add(responses.GET, url, status=500)
r = client.get("/repositories/owner/repo")
assert r.get_json() == {
"status": 500,
"error": "Internal Server Error",
"message": "the server encountered an unexpected internal server error",
}
assert r.status_code == 500
| lukaszmenc/get-repository-data | tests/test_app.py | test_app.py | py | 1,667 | python | en | code | 0 | github-code | 6 | [
{
"api_name": "repositories.app.APP.test_client",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "repositories.app.APP",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "repositories.app.APP.extensions",
"line_number": 10,
"usage_type": "attribute"
},
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.